repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
ZS-YANG/FemtoDet-v3
mmdet/datasets/transforms/transforms.py
[ { "identifier": "TRANSFORMS", "path": "mmdet/registry.py", "snippet": "TRANSFORMS = Registry(\n 'transform',\n parent=MMENGINE_TRANSFORMS,\n locations=['mmdet.datasets.transforms'])" }, { "identifier": "autocast_box_type", "path": "mmdet/structures/bbox/box_type.py", "snippet": "def autocast_box_type(dst_box_type='hbox') -> Callable:\n \"\"\"A decorator which automatically casts results['gt_bboxes'] to the\n destination box type.\n\n It commenly used in mmdet.datasets.transforms to make the transforms up-\n compatible with the np.ndarray type of results['gt_bboxes'].\n\n The speed of processing of np.ndarray and BaseBoxes data are the same:\n\n - np.ndarray: 0.0509 img/s\n - BaseBoxes: 0.0551 img/s\n\n Args:\n dst_box_type (str): Destination box type.\n \"\"\"\n _, box_type_cls = get_box_type(dst_box_type)\n\n def decorator(func: Callable) -> Callable:\n\n def wrapper(self, results: dict, *args, **kwargs) -> dict:\n if ('gt_bboxes' not in results\n or isinstance(results['gt_bboxes'], BaseBoxes)):\n return func(self, results)\n elif isinstance(results['gt_bboxes'], np.ndarray):\n results['gt_bboxes'] = box_type_cls(\n results['gt_bboxes'], clone=False)\n if 'mix_results' in results:\n for res in results['mix_results']:\n if isinstance(res['gt_bboxes'], np.ndarray):\n res['gt_bboxes'] = box_type_cls(\n res['gt_bboxes'], clone=False)\n\n _results = func(self, results, *args, **kwargs)\n\n # In some cases, the function will process gt_bboxes in-place\n # Simultaneously convert inputting and outputting gt_bboxes\n # back to np.ndarray\n if isinstance(_results, dict) and 'gt_bboxes' in _results:\n if isinstance(_results['gt_bboxes'], BaseBoxes):\n _results['gt_bboxes'] = _results['gt_bboxes'].numpy()\n if isinstance(results['gt_bboxes'], BaseBoxes):\n results['gt_bboxes'] = results['gt_bboxes'].numpy()\n return _results\n else:\n raise TypeError(\n \"auto_box_type requires results['gt_bboxes'] to \"\n 'be BaseBoxes or np.ndarray, but got '\n f\"{type(results['gt_bboxes'])}\")\n\n return wrapper\n\n return decorator" }, { "identifier": "HorizontalBoxes", "path": "mmdet/structures/bbox/horizontal_boxes.py", "snippet": "class HorizontalBoxes(BaseBoxes):\n \"\"\"The horizontal box class used in MMDetection by default.\n\n The ``box_dim`` of ``HorizontalBoxes`` is 4, which means the length of\n the last dimension of the data should be 4. Two modes of box data are\n supported in ``HorizontalBoxes``:\n\n - 'xyxy': Each row of data indicates (x1, y1, x2, y2), which are the\n coordinates of the left-top and right-bottom points.\n - 'cxcywh': Each row of data indicates (x, y, w, h), where (x, y) are the\n coordinates of the box centers and (w, h) are the width and height.\n\n ``HorizontalBoxes`` only restores 'xyxy' mode of data. If the the data is\n in 'cxcywh' mode, users need to input ``in_mode='cxcywh'`` and The code\n will convert the 'cxcywh' data to 'xyxy' automatically.\n\n Args:\n data (Tensor or np.ndarray or Sequence): The box data with shape of\n (..., 4).\n dtype (torch.dtype, Optional): data type of boxes. Defaults to None.\n device (str or torch.device, Optional): device of boxes.\n Default to None.\n clone (bool): Whether clone ``boxes`` or not. Defaults to True.\n mode (str, Optional): the mode of boxes. If it is 'cxcywh', the\n `data` will be converted to 'xyxy' mode. Defaults to None.\n \"\"\"\n\n box_dim: int = 4\n\n def __init__(self,\n data: Union[Tensor, np.ndarray],\n dtype: torch.dtype = None,\n device: DeviceType = None,\n clone: bool = True,\n in_mode: Optional[str] = None) -> None:\n super().__init__(data=data, dtype=dtype, device=device, clone=clone)\n if isinstance(in_mode, str):\n if in_mode not in ('xyxy', 'cxcywh'):\n raise ValueError(f'Get invalid mode {in_mode}.')\n if in_mode == 'cxcywh':\n self.tensor = self.cxcywh_to_xyxy(self.tensor)\n\n @staticmethod\n def cxcywh_to_xyxy(boxes: Tensor) -> Tensor:\n \"\"\"Convert box coordinates from (cx, cy, w, h) to (x1, y1, x2, y2).\n\n Args:\n boxes (Tensor): cxcywh boxes tensor with shape of (..., 4).\n\n Returns:\n Tensor: xyxy boxes tensor with shape of (..., 4).\n \"\"\"\n ctr, wh = boxes.split((2, 2), dim=-1)\n return torch.cat([(ctr - wh / 2), (ctr + wh / 2)], dim=-1)\n\n @staticmethod\n def xyxy_to_cxcywh(boxes: Tensor) -> Tensor:\n \"\"\"Convert box coordinates from (x1, y1, x2, y2) to (cx, cy, w, h).\n\n Args:\n boxes (Tensor): xyxy boxes tensor with shape of (..., 4).\n\n Returns:\n Tensor: cxcywh boxes tensor with shape of (..., 4).\n \"\"\"\n xy1, xy2 = boxes.split((2, 2), dim=-1)\n return torch.cat([(xy2 + xy1) / 2, (xy2 - xy1)], dim=-1)\n\n @property\n def cxcywh(self) -> Tensor:\n \"\"\"Return a tensor representing the cxcywh boxes.\"\"\"\n return self.xyxy_to_cxcywh(self.tensor)\n\n @property\n def centers(self) -> Tensor:\n \"\"\"Return a tensor representing the centers of boxes.\"\"\"\n boxes = self.tensor\n return (boxes[..., :2] + boxes[..., 2:]) / 2\n\n @property\n def areas(self) -> Tensor:\n \"\"\"Return a tensor representing the areas of boxes.\"\"\"\n boxes = self.tensor\n return (boxes[..., 2] - boxes[..., 0]) * (\n boxes[..., 3] - boxes[..., 1])\n\n @property\n def widths(self) -> Tensor:\n \"\"\"Return a tensor representing the widths of boxes.\"\"\"\n boxes = self.tensor\n return boxes[..., 2] - boxes[..., 0]\n\n @property\n def heights(self) -> Tensor:\n \"\"\"Return a tensor representing the heights of boxes.\"\"\"\n boxes = self.tensor\n return boxes[..., 3] - boxes[..., 1]\n\n def flip_(self,\n img_shape: Tuple[int, int],\n direction: str = 'horizontal') -> None:\n \"\"\"Flip boxes horizontally or vertically in-place.\n\n Args:\n img_shape (Tuple[int, int]): A tuple of image height and width.\n direction (str): Flip direction, options are \"horizontal\",\n \"vertical\" and \"diagonal\". Defaults to \"horizontal\"\n \"\"\"\n assert direction in ['horizontal', 'vertical', 'diagonal']\n flipped = self.tensor\n boxes = flipped.clone()\n if direction == 'horizontal':\n flipped[..., 0] = img_shape[1] - boxes[..., 2]\n flipped[..., 2] = img_shape[1] - boxes[..., 0]\n elif direction == 'vertical':\n flipped[..., 1] = img_shape[0] - boxes[..., 3]\n flipped[..., 3] = img_shape[0] - boxes[..., 1]\n else:\n flipped[..., 0] = img_shape[1] - boxes[..., 2]\n flipped[..., 1] = img_shape[0] - boxes[..., 3]\n flipped[..., 2] = img_shape[1] - boxes[..., 0]\n flipped[..., 3] = img_shape[0] - boxes[..., 1]\n\n def translate_(self, distances: Tuple[float, float]) -> None:\n \"\"\"Translate boxes in-place.\n\n Args:\n distances (Tuple[float, float]): translate distances. The first\n is horizontal distance and the second is vertical distance.\n \"\"\"\n boxes = self.tensor\n assert len(distances) == 2\n self.tensor = boxes + boxes.new_tensor(distances).repeat(2)\n\n def clip_(self, img_shape: Tuple[int, int]) -> None:\n \"\"\"Clip boxes according to the image shape in-place.\n\n Args:\n img_shape (Tuple[int, int]): A tuple of image height and width.\n \"\"\"\n boxes = self.tensor\n boxes[..., 0::2] = boxes[..., 0::2].clamp(0, img_shape[1])\n boxes[..., 1::2] = boxes[..., 1::2].clamp(0, img_shape[0])\n\n def rotate_(self, center: Tuple[float, float], angle: float) -> None:\n \"\"\"Rotate all boxes in-place.\n\n Args:\n center (Tuple[float, float]): Rotation origin.\n angle (float): Rotation angle represented in degrees. Positive\n values mean clockwise rotation.\n \"\"\"\n boxes = self.tensor\n rotation_matrix = boxes.new_tensor(\n cv2.getRotationMatrix2D(center, -angle, 1))\n\n corners = self.hbox2corner(boxes)\n corners = torch.cat(\n [corners, corners.new_ones(*corners.shape[:-1], 1)], dim=-1)\n corners_T = torch.transpose(corners, -1, -2)\n corners_T = torch.matmul(rotation_matrix, corners_T)\n corners = torch.transpose(corners_T, -1, -2)\n self.tensor = self.corner2hbox(corners)\n\n def project_(self, homography_matrix: Union[Tensor, np.ndarray]) -> None:\n \"\"\"Geometric transformat boxes in-place.\n\n Args:\n homography_matrix (Tensor or np.ndarray]):\n Shape (3, 3) for geometric transformation.\n \"\"\"\n boxes = self.tensor\n if isinstance(homography_matrix, np.ndarray):\n homography_matrix = boxes.new_tensor(homography_matrix)\n corners = self.hbox2corner(boxes)\n corners = torch.cat(\n [corners, corners.new_ones(*corners.shape[:-1], 1)], dim=-1)\n corners_T = torch.transpose(corners, -1, -2)\n corners_T = torch.matmul(homography_matrix, corners_T)\n corners = torch.transpose(corners_T, -1, -2)\n # Convert to homogeneous coordinates by normalization\n corners = corners[..., :2] / corners[..., 2:3]\n self.tensor = self.corner2hbox(corners)\n\n @staticmethod\n def hbox2corner(boxes: Tensor) -> Tensor:\n \"\"\"Convert box coordinates from (x1, y1, x2, y2) to corners ((x1, y1),\n (x2, y1), (x1, y2), (x2, y2)).\n\n Args:\n boxes (Tensor): Horizontal box tensor with shape of (..., 4).\n\n Returns:\n Tensor: Corner tensor with shape of (..., 4, 2).\n \"\"\"\n x1, y1, x2, y2 = torch.split(boxes, 1, dim=-1)\n corners = torch.cat([x1, y1, x2, y1, x1, y2, x2, y2], dim=-1)\n return corners.reshape(*corners.shape[:-1], 4, 2)\n\n @staticmethod\n def corner2hbox(corners: Tensor) -> Tensor:\n \"\"\"Convert box coordinates from corners ((x1, y1), (x2, y1), (x1, y2),\n (x2, y2)) to (x1, y1, x2, y2).\n\n Args:\n corners (Tensor): Corner tensor with shape of (..., 4, 2).\n\n Returns:\n Tensor: Horizontal box tensor with shape of (..., 4).\n \"\"\"\n if corners.numel() == 0:\n return corners.new_zeros((0, 4))\n min_xy = corners.min(dim=-2)[0]\n max_xy = corners.max(dim=-2)[0]\n return torch.cat([min_xy, max_xy], dim=-1)\n\n def rescale_(self, scale_factor: Tuple[float, float]) -> None:\n \"\"\"Rescale boxes w.r.t. rescale_factor in-place.\n\n Note:\n Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes\n w.r.t ``scale_facotr``. The difference is that ``resize_`` only\n changes the width and the height of boxes, but ``rescale_`` also\n rescales the box centers simultaneously.\n\n Args:\n scale_factor (Tuple[float, float]): factors for scaling boxes.\n The length should be 2.\n \"\"\"\n boxes = self.tensor\n assert len(scale_factor) == 2\n scale_factor = boxes.new_tensor(scale_factor).repeat(2)\n self.tensor = boxes * scale_factor\n\n def resize_(self, scale_factor: Tuple[float, float]) -> None:\n \"\"\"Resize the box width and height w.r.t scale_factor in-place.\n\n Note:\n Both ``rescale_`` and ``resize_`` will enlarge or shrink boxes\n w.r.t ``scale_facotr``. The difference is that ``resize_`` only\n changes the width and the height of boxes, but ``rescale_`` also\n rescales the box centers simultaneously.\n\n Args:\n scale_factor (Tuple[float, float]): factors for scaling box\n shapes. The length should be 2.\n \"\"\"\n boxes = self.tensor\n assert len(scale_factor) == 2\n ctrs = (boxes[..., 2:] + boxes[..., :2]) / 2\n wh = boxes[..., 2:] - boxes[..., :2]\n scale_factor = boxes.new_tensor(scale_factor)\n wh = wh * scale_factor\n xy1 = ctrs - 0.5 * wh\n xy2 = ctrs + 0.5 * wh\n self.tensor = torch.cat([xy1, xy2], dim=-1)\n\n def is_inside(self,\n img_shape: Tuple[int, int],\n all_inside: bool = False,\n allowed_border: int = 0) -> BoolTensor:\n \"\"\"Find boxes inside the image.\n\n Args:\n img_shape (Tuple[int, int]): A tuple of image height and width.\n all_inside (bool): Whether the boxes are all inside the image or\n part inside the image. Defaults to False.\n allowed_border (int): Boxes that extend beyond the image shape\n boundary by more than ``allowed_border`` are considered\n \"outside\" Defaults to 0.\n Returns:\n BoolTensor: A BoolTensor indicating whether the box is inside\n the image. Assuming the original boxes have shape (m, n, 4),\n the output has shape (m, n).\n \"\"\"\n img_h, img_w = img_shape\n boxes = self.tensor\n if all_inside:\n return (boxes[:, 0] >= -allowed_border) & \\\n (boxes[:, 1] >= -allowed_border) & \\\n (boxes[:, 2] < img_w + allowed_border) & \\\n (boxes[:, 3] < img_h + allowed_border)\n else:\n return (boxes[..., 0] < img_w + allowed_border) & \\\n (boxes[..., 1] < img_h + allowed_border) & \\\n (boxes[..., 2] > -allowed_border) & \\\n (boxes[..., 3] > -allowed_border)\n\n def find_inside_points(self,\n points: Tensor,\n is_aligned: bool = False) -> BoolTensor:\n \"\"\"Find inside box points. Boxes dimension must be 2.\n\n Args:\n points (Tensor): Points coordinates. Has shape of (m, 2).\n is_aligned (bool): Whether ``points`` has been aligned with boxes\n or not. If True, the length of boxes and ``points`` should be\n the same. Defaults to False.\n\n Returns:\n BoolTensor: A BoolTensor indicating whether a point is inside\n boxes. Assuming the boxes has shape of (n, 4), if ``is_aligned``\n is False. The index has shape of (m, n). If ``is_aligned`` is\n True, m should be equal to n and the index has shape of (m, ).\n \"\"\"\n boxes = self.tensor\n assert boxes.dim() == 2, 'boxes dimension must be 2.'\n\n if not is_aligned:\n boxes = boxes[None, :, :]\n points = points[:, None, :]\n else:\n assert boxes.size(0) == points.size(0)\n\n x_min, y_min, x_max, y_max = boxes.unbind(dim=-1)\n return (points[..., 0] >= x_min) & (points[..., 0] <= x_max) & \\\n (points[..., 1] >= y_min) & (points[..., 1] <= y_max)\n\n def create_masks(self, img_shape: Tuple[int, int]) -> BitmapMasks:\n \"\"\"\n Args:\n img_shape (Tuple[int, int]): A tuple of image height and width.\n\n Returns:\n :obj:`BitmapMasks`: Converted masks\n \"\"\"\n img_h, img_w = img_shape\n boxes = self.tensor\n\n xmin, ymin = boxes[:, 0:1], boxes[:, 1:2]\n xmax, ymax = boxes[:, 2:3], boxes[:, 3:4]\n gt_masks = np.zeros((len(boxes), img_h, img_w), dtype=np.uint8)\n for i in range(len(boxes)):\n gt_masks[i,\n int(ymin[i]):int(ymax[i]),\n int(xmin[i]):int(xmax[i])] = 1\n return BitmapMasks(gt_masks, img_h, img_w)\n\n @staticmethod\n def overlaps(boxes1: BaseBoxes,\n boxes2: BaseBoxes,\n mode: str = 'iou',\n is_aligned: bool = False,\n eps: float = 1e-6) -> Tensor:\n \"\"\"Calculate overlap between two set of boxes with their types\n converted to ``HorizontalBoxes``.\n\n Args:\n boxes1 (:obj:`BaseBoxes`): BaseBoxes with shape of (m, box_dim)\n or empty.\n boxes2 (:obj:`BaseBoxes`): BaseBoxes with shape of (n, box_dim)\n or empty.\n mode (str): \"iou\" (intersection over union), \"iof\" (intersection\n over foreground). Defaults to \"iou\".\n is_aligned (bool): If True, then m and n must be equal. Defaults\n to False.\n eps (float): A value added to the denominator for numerical\n stability. Defaults to 1e-6.\n\n Returns:\n Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)\n \"\"\"\n boxes1 = boxes1.convert_to('hbox')\n boxes2 = boxes2.convert_to('hbox')\n return bbox_overlaps(\n boxes1.tensor,\n boxes2.tensor,\n mode=mode,\n is_aligned=is_aligned,\n eps=eps)\n\n @staticmethod\n def from_instance_masks(masks: MaskType) -> 'HorizontalBoxes':\n \"\"\"Create horizontal boxes from instance masks.\n\n Args:\n masks (:obj:`BitmapMasks` or :obj:`PolygonMasks`): BitmapMasks or\n PolygonMasks instance with length of n.\n\n Returns:\n :obj:`HorizontalBoxes`: Converted boxes with shape of (n, 4).\n \"\"\"\n num_masks = len(masks)\n boxes = np.zeros((num_masks, 4), dtype=np.float32)\n if isinstance(masks, BitmapMasks):\n x_any = masks.masks.any(axis=1)\n y_any = masks.masks.any(axis=2)\n for idx in range(num_masks):\n x = np.where(x_any[idx, :])[0]\n y = np.where(y_any[idx, :])[0]\n if len(x) > 0 and len(y) > 0:\n # use +1 for x_max and y_max so that the right and bottom\n # boundary of instance masks are fully included by the box\n boxes[idx, :] = np.array(\n [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=np.float32)\n elif isinstance(masks, PolygonMasks):\n for idx, poly_per_obj in enumerate(masks.masks):\n # simply use a number that is big enough for comparison with\n # coordinates\n xy_min = np.array([masks.width * 2, masks.height * 2],\n dtype=np.float32)\n xy_max = np.zeros(2, dtype=np.float32)\n for p in poly_per_obj:\n xy = np.array(p).reshape(-1, 2).astype(np.float32)\n xy_min = np.minimum(xy_min, np.min(xy, axis=0))\n xy_max = np.maximum(xy_max, np.max(xy, axis=0))\n boxes[idx, :2] = xy_min\n boxes[idx, 2:] = xy_max\n else:\n raise TypeError(\n '`masks` must be `BitmapMasks` or `PolygonMasks`, '\n f'but got {type(masks)}.')\n return HorizontalBoxes(boxes)" }, { "identifier": "BitmapMasks", "path": "mmdet/structures/mask/structures.py", "snippet": "class BitmapMasks(BaseInstanceMasks):\n \"\"\"This class represents masks in the form of bitmaps.\n\n Args:\n masks (ndarray): ndarray of masks in shape (N, H, W), where N is\n the number of objects.\n height (int): height of masks\n width (int): width of masks\n\n Example:\n >>> from mmdet.data_elements.mask.structures import * # NOQA\n >>> num_masks, H, W = 3, 32, 32\n >>> rng = np.random.RandomState(0)\n >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int64)\n >>> self = BitmapMasks(masks, height=H, width=W)\n\n >>> # demo crop_and_resize\n >>> num_boxes = 5\n >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)\n >>> out_shape = (14, 14)\n >>> inds = torch.randint(0, len(self), size=(num_boxes,))\n >>> device = 'cpu'\n >>> interpolation = 'bilinear'\n >>> new = self.crop_and_resize(\n ... bboxes, out_shape, inds, device, interpolation)\n >>> assert len(new) == num_boxes\n >>> assert new.height, new.width == out_shape\n \"\"\"\n\n def __init__(self, masks, height, width):\n self.height = height\n self.width = width\n if len(masks) == 0:\n self.masks = np.empty((0, self.height, self.width), dtype=np.uint8)\n else:\n assert isinstance(masks, (list, np.ndarray))\n if isinstance(masks, list):\n assert isinstance(masks[0], np.ndarray)\n assert masks[0].ndim == 2 # (H, W)\n else:\n assert masks.ndim == 3 # (N, H, W)\n\n self.masks = np.stack(masks).reshape(-1, height, width)\n assert self.masks.shape[1] == self.height\n assert self.masks.shape[2] == self.width\n\n def __getitem__(self, index):\n \"\"\"Index the BitmapMask.\n\n Args:\n index (int | ndarray): Indices in the format of integer or ndarray.\n\n Returns:\n :obj:`BitmapMasks`: Indexed bitmap masks.\n \"\"\"\n masks = self.masks[index].reshape(-1, self.height, self.width)\n return BitmapMasks(masks, self.height, self.width)\n\n def __iter__(self):\n return iter(self.masks)\n\n def __repr__(self):\n s = self.__class__.__name__ + '('\n s += f'num_masks={len(self.masks)}, '\n s += f'height={self.height}, '\n s += f'width={self.width})'\n return s\n\n def __len__(self):\n \"\"\"Number of masks.\"\"\"\n return len(self.masks)\n\n def rescale(self, scale, interpolation='nearest'):\n \"\"\"See :func:`BaseInstanceMasks.rescale`.\"\"\"\n if len(self.masks) == 0:\n new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)\n rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8)\n else:\n rescaled_masks = np.stack([\n mmcv.imrescale(mask, scale, interpolation=interpolation)\n for mask in self.masks\n ])\n height, width = rescaled_masks.shape[1:]\n return BitmapMasks(rescaled_masks, height, width)\n\n def resize(self, out_shape, interpolation='nearest'):\n \"\"\"See :func:`BaseInstanceMasks.resize`.\"\"\"\n if len(self.masks) == 0:\n resized_masks = np.empty((0, *out_shape), dtype=np.uint8)\n else:\n resized_masks = np.stack([\n mmcv.imresize(\n mask, out_shape[::-1], interpolation=interpolation)\n for mask in self.masks\n ])\n return BitmapMasks(resized_masks, *out_shape)\n\n def flip(self, flip_direction='horizontal'):\n \"\"\"See :func:`BaseInstanceMasks.flip`.\"\"\"\n assert flip_direction in ('horizontal', 'vertical', 'diagonal')\n\n if len(self.masks) == 0:\n flipped_masks = self.masks\n else:\n flipped_masks = np.stack([\n mmcv.imflip(mask, direction=flip_direction)\n for mask in self.masks\n ])\n return BitmapMasks(flipped_masks, self.height, self.width)\n\n def pad(self, out_shape, pad_val=0):\n \"\"\"See :func:`BaseInstanceMasks.pad`.\"\"\"\n if len(self.masks) == 0:\n padded_masks = np.empty((0, *out_shape), dtype=np.uint8)\n else:\n padded_masks = np.stack([\n mmcv.impad(mask, shape=out_shape, pad_val=pad_val)\n for mask in self.masks\n ])\n return BitmapMasks(padded_masks, *out_shape)\n\n def crop(self, bbox):\n \"\"\"See :func:`BaseInstanceMasks.crop`.\"\"\"\n assert isinstance(bbox, np.ndarray)\n assert bbox.ndim == 1\n\n # clip the boundary\n bbox = bbox.copy()\n bbox[0::2] = np.clip(bbox[0::2], 0, self.width)\n bbox[1::2] = np.clip(bbox[1::2], 0, self.height)\n x1, y1, x2, y2 = bbox\n w = np.maximum(x2 - x1, 1)\n h = np.maximum(y2 - y1, 1)\n\n if len(self.masks) == 0:\n cropped_masks = np.empty((0, h, w), dtype=np.uint8)\n else:\n cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w]\n return BitmapMasks(cropped_masks, h, w)\n\n def crop_and_resize(self,\n bboxes,\n out_shape,\n inds,\n device='cpu',\n interpolation='bilinear',\n binarize=True):\n \"\"\"See :func:`BaseInstanceMasks.crop_and_resize`.\"\"\"\n if len(self.masks) == 0:\n empty_masks = np.empty((0, *out_shape), dtype=np.uint8)\n return BitmapMasks(empty_masks, *out_shape)\n\n # convert bboxes to tensor\n if isinstance(bboxes, np.ndarray):\n bboxes = torch.from_numpy(bboxes).to(device=device)\n if isinstance(inds, np.ndarray):\n inds = torch.from_numpy(inds).to(device=device)\n\n num_bbox = bboxes.shape[0]\n fake_inds = torch.arange(\n num_bbox, device=device).to(dtype=bboxes.dtype)[:, None]\n rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5\n rois = rois.to(device=device)\n if num_bbox > 0:\n gt_masks_th = torch.from_numpy(self.masks).to(device).index_select(\n 0, inds).to(dtype=rois.dtype)\n targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape,\n 1.0, 0, 'avg', True).squeeze(1)\n if binarize:\n resized_masks = (targets >= 0.5).cpu().numpy()\n else:\n resized_masks = targets.cpu().numpy()\n else:\n resized_masks = []\n return BitmapMasks(resized_masks, *out_shape)\n\n def expand(self, expanded_h, expanded_w, top, left):\n \"\"\"See :func:`BaseInstanceMasks.expand`.\"\"\"\n if len(self.masks) == 0:\n expanded_mask = np.empty((0, expanded_h, expanded_w),\n dtype=np.uint8)\n else:\n expanded_mask = np.zeros((len(self), expanded_h, expanded_w),\n dtype=np.uint8)\n expanded_mask[:, top:top + self.height,\n left:left + self.width] = self.masks\n return BitmapMasks(expanded_mask, expanded_h, expanded_w)\n\n def translate(self,\n out_shape,\n offset,\n direction='horizontal',\n border_value=0,\n interpolation='bilinear'):\n \"\"\"Translate the BitmapMasks.\n\n Args:\n out_shape (tuple[int]): Shape for output mask, format (h, w).\n offset (int | float): The offset for translate.\n direction (str): The translate direction, either \"horizontal\"\n or \"vertical\".\n border_value (int | float): Border value. Default 0 for masks.\n interpolation (str): Same as :func:`mmcv.imtranslate`.\n\n Returns:\n BitmapMasks: Translated BitmapMasks.\n\n Example:\n >>> from mmdet.data_elements.mask.structures import BitmapMasks\n >>> self = BitmapMasks.random(dtype=np.uint8)\n >>> out_shape = (32, 32)\n >>> offset = 4\n >>> direction = 'horizontal'\n >>> border_value = 0\n >>> interpolation = 'bilinear'\n >>> # Note, There seem to be issues when:\n >>> # * the mask dtype is not supported by cv2.AffineWarp\n >>> new = self.translate(out_shape, offset, direction,\n >>> border_value, interpolation)\n >>> assert len(new) == len(self)\n >>> assert new.height, new.width == out_shape\n \"\"\"\n if len(self.masks) == 0:\n translated_masks = np.empty((0, *out_shape), dtype=np.uint8)\n else:\n masks = self.masks\n if masks.shape[-2:] != out_shape:\n empty_masks = np.zeros((masks.shape[0], *out_shape),\n dtype=masks.dtype)\n min_h = min(out_shape[0], masks.shape[1])\n min_w = min(out_shape[1], masks.shape[2])\n empty_masks[:, :min_h, :min_w] = masks[:, :min_h, :min_w]\n masks = empty_masks\n translated_masks = mmcv.imtranslate(\n masks.transpose((1, 2, 0)),\n offset,\n direction,\n border_value=border_value,\n interpolation=interpolation)\n if translated_masks.ndim == 2:\n translated_masks = translated_masks[:, :, None]\n translated_masks = translated_masks.transpose(\n (2, 0, 1)).astype(self.masks.dtype)\n return BitmapMasks(translated_masks, *out_shape)\n\n def shear(self,\n out_shape,\n magnitude,\n direction='horizontal',\n border_value=0,\n interpolation='bilinear'):\n \"\"\"Shear the BitmapMasks.\n\n Args:\n out_shape (tuple[int]): Shape for output mask, format (h, w).\n magnitude (int | float): The magnitude used for shear.\n direction (str): The shear direction, either \"horizontal\"\n or \"vertical\".\n border_value (int | tuple[int]): Value used in case of a\n constant border.\n interpolation (str): Same as in :func:`mmcv.imshear`.\n\n Returns:\n BitmapMasks: The sheared masks.\n \"\"\"\n if len(self.masks) == 0:\n sheared_masks = np.empty((0, *out_shape), dtype=np.uint8)\n else:\n sheared_masks = mmcv.imshear(\n self.masks.transpose((1, 2, 0)),\n magnitude,\n direction,\n border_value=border_value,\n interpolation=interpolation)\n if sheared_masks.ndim == 2:\n sheared_masks = sheared_masks[:, :, None]\n sheared_masks = sheared_masks.transpose(\n (2, 0, 1)).astype(self.masks.dtype)\n return BitmapMasks(sheared_masks, *out_shape)\n\n def rotate(self,\n out_shape,\n angle,\n center=None,\n scale=1.0,\n border_value=0,\n interpolation='bilinear'):\n \"\"\"Rotate the BitmapMasks.\n\n Args:\n out_shape (tuple[int]): Shape for output mask, format (h, w).\n angle (int | float): Rotation angle in degrees. Positive values\n mean counter-clockwise rotation.\n center (tuple[float], optional): Center point (w, h) of the\n rotation in source image. If not specified, the center of\n the image will be used.\n scale (int | float): Isotropic scale factor.\n border_value (int | float): Border value. Default 0 for masks.\n interpolation (str): Same as in :func:`mmcv.imrotate`.\n\n Returns:\n BitmapMasks: Rotated BitmapMasks.\n \"\"\"\n if len(self.masks) == 0:\n rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype)\n else:\n rotated_masks = mmcv.imrotate(\n self.masks.transpose((1, 2, 0)),\n angle,\n center=center,\n scale=scale,\n border_value=border_value,\n interpolation=interpolation)\n if rotated_masks.ndim == 2:\n # case when only one mask, (h, w)\n rotated_masks = rotated_masks[:, :, None] # (h, w, 1)\n rotated_masks = rotated_masks.transpose(\n (2, 0, 1)).astype(self.masks.dtype)\n return BitmapMasks(rotated_masks, *out_shape)\n\n @property\n def areas(self):\n \"\"\"See :py:attr:`BaseInstanceMasks.areas`.\"\"\"\n return self.masks.sum((1, 2))\n\n def to_ndarray(self):\n \"\"\"See :func:`BaseInstanceMasks.to_ndarray`.\"\"\"\n return self.masks\n\n def to_tensor(self, dtype, device):\n \"\"\"See :func:`BaseInstanceMasks.to_tensor`.\"\"\"\n return torch.tensor(self.masks, dtype=dtype, device=device)\n\n @classmethod\n def random(cls,\n num_masks=3,\n height=32,\n width=32,\n dtype=np.uint8,\n rng=None):\n \"\"\"Generate random bitmap masks for demo / testing purposes.\n\n Example:\n >>> from mmdet.data_elements.mask.structures import BitmapMasks\n >>> self = BitmapMasks.random()\n >>> print('self = {}'.format(self))\n self = BitmapMasks(num_masks=3, height=32, width=32)\n \"\"\"\n from mmdet.utils.util_random import ensure_rng\n rng = ensure_rng(rng)\n masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype)\n self = cls(masks, height=height, width=width)\n return self\n\n @classmethod\n def cat(cls: Type[T], masks: Sequence[T]) -> T:\n \"\"\"Concatenate a sequence of masks into one single mask instance.\n\n Args:\n masks (Sequence[BitmapMasks]): A sequence of mask instances.\n\n Returns:\n BitmapMasks: Concatenated mask instance.\n \"\"\"\n assert isinstance(masks, Sequence)\n if len(masks) == 0:\n raise ValueError('masks should not be an empty list.')\n assert all(isinstance(m, cls) for m in masks)\n\n mask_array = np.concatenate([m.masks for m in masks], axis=0)\n return cls(mask_array, *mask_array.shape[1:])" }, { "identifier": "PolygonMasks", "path": "mmdet/structures/mask/structures.py", "snippet": "class PolygonMasks(BaseInstanceMasks):\n \"\"\"This class represents masks in the form of polygons.\n\n Polygons is a list of three levels. The first level of the list\n corresponds to objects, the second level to the polys that compose the\n object, the third level to the poly coordinates\n\n Args:\n masks (list[list[ndarray]]): The first level of the list\n corresponds to objects, the second level to the polys that\n compose the object, the third level to the poly coordinates\n height (int): height of masks\n width (int): width of masks\n\n Example:\n >>> from mmdet.data_elements.mask.structures import * # NOQA\n >>> masks = [\n >>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ]\n >>> ]\n >>> height, width = 16, 16\n >>> self = PolygonMasks(masks, height, width)\n\n >>> # demo translate\n >>> new = self.translate((16, 16), 4., direction='horizontal')\n >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2])\n >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4)\n\n >>> # demo crop_and_resize\n >>> num_boxes = 3\n >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes)\n >>> out_shape = (16, 16)\n >>> inds = torch.randint(0, len(self), size=(num_boxes,))\n >>> device = 'cpu'\n >>> interpolation = 'bilinear'\n >>> new = self.crop_and_resize(\n ... bboxes, out_shape, inds, device, interpolation)\n >>> assert len(new) == num_boxes\n >>> assert new.height, new.width == out_shape\n \"\"\"\n\n def __init__(self, masks, height, width):\n assert isinstance(masks, list)\n if len(masks) > 0:\n assert isinstance(masks[0], list)\n assert isinstance(masks[0][0], np.ndarray)\n\n self.height = height\n self.width = width\n self.masks = masks\n\n def __getitem__(self, index):\n \"\"\"Index the polygon masks.\n\n Args:\n index (ndarray | List): The indices.\n\n Returns:\n :obj:`PolygonMasks`: The indexed polygon masks.\n \"\"\"\n if isinstance(index, np.ndarray):\n if index.dtype == bool:\n index = np.where(index)[0].tolist()\n else:\n index = index.tolist()\n if isinstance(index, list):\n masks = [self.masks[i] for i in index]\n else:\n try:\n masks = self.masks[index]\n except Exception:\n raise ValueError(\n f'Unsupported input of type {type(index)} for indexing!')\n if len(masks) and isinstance(masks[0], np.ndarray):\n masks = [masks] # ensure a list of three levels\n return PolygonMasks(masks, self.height, self.width)\n\n def __iter__(self):\n return iter(self.masks)\n\n def __repr__(self):\n s = self.__class__.__name__ + '('\n s += f'num_masks={len(self.masks)}, '\n s += f'height={self.height}, '\n s += f'width={self.width})'\n return s\n\n def __len__(self):\n \"\"\"Number of masks.\"\"\"\n return len(self.masks)\n\n def rescale(self, scale, interpolation=None):\n \"\"\"see :func:`BaseInstanceMasks.rescale`\"\"\"\n new_w, new_h = mmcv.rescale_size((self.width, self.height), scale)\n if len(self.masks) == 0:\n rescaled_masks = PolygonMasks([], new_h, new_w)\n else:\n rescaled_masks = self.resize((new_h, new_w))\n return rescaled_masks\n\n def resize(self, out_shape, interpolation=None):\n \"\"\"see :func:`BaseInstanceMasks.resize`\"\"\"\n if len(self.masks) == 0:\n resized_masks = PolygonMasks([], *out_shape)\n else:\n h_scale = out_shape[0] / self.height\n w_scale = out_shape[1] / self.width\n resized_masks = []\n for poly_per_obj in self.masks:\n resized_poly = []\n for p in poly_per_obj:\n p = p.copy()\n p[0::2] = p[0::2] * w_scale\n p[1::2] = p[1::2] * h_scale\n resized_poly.append(p)\n resized_masks.append(resized_poly)\n resized_masks = PolygonMasks(resized_masks, *out_shape)\n return resized_masks\n\n def flip(self, flip_direction='horizontal'):\n \"\"\"see :func:`BaseInstanceMasks.flip`\"\"\"\n assert flip_direction in ('horizontal', 'vertical', 'diagonal')\n if len(self.masks) == 0:\n flipped_masks = PolygonMasks([], self.height, self.width)\n else:\n flipped_masks = []\n for poly_per_obj in self.masks:\n flipped_poly_per_obj = []\n for p in poly_per_obj:\n p = p.copy()\n if flip_direction == 'horizontal':\n p[0::2] = self.width - p[0::2]\n elif flip_direction == 'vertical':\n p[1::2] = self.height - p[1::2]\n else:\n p[0::2] = self.width - p[0::2]\n p[1::2] = self.height - p[1::2]\n flipped_poly_per_obj.append(p)\n flipped_masks.append(flipped_poly_per_obj)\n flipped_masks = PolygonMasks(flipped_masks, self.height,\n self.width)\n return flipped_masks\n\n def crop(self, bbox):\n \"\"\"see :func:`BaseInstanceMasks.crop`\"\"\"\n assert isinstance(bbox, np.ndarray)\n assert bbox.ndim == 1\n\n # clip the boundary\n bbox = bbox.copy()\n bbox[0::2] = np.clip(bbox[0::2], 0, self.width)\n bbox[1::2] = np.clip(bbox[1::2], 0, self.height)\n x1, y1, x2, y2 = bbox\n w = np.maximum(x2 - x1, 1)\n h = np.maximum(y2 - y1, 1)\n\n if len(self.masks) == 0:\n cropped_masks = PolygonMasks([], h, w)\n else:\n # reference: https://github.com/facebookresearch/fvcore/blob/main/fvcore/transforms/transform.py # noqa\n crop_box = geometry.box(x1, y1, x2, y2).buffer(0.0)\n cropped_masks = []\n # suppress shapely warnings util it incorporates GEOS>=3.11.2\n # reference: https://github.com/shapely/shapely/issues/1345\n initial_settings = np.seterr()\n np.seterr(invalid='ignore')\n for poly_per_obj in self.masks:\n cropped_poly_per_obj = []\n for p in poly_per_obj:\n p = p.copy()\n p = geometry.Polygon(p.reshape(-1, 2)).buffer(0.0)\n # polygon must be valid to perform intersection.\n if not p.is_valid:\n continue\n cropped = p.intersection(crop_box)\n if cropped.is_empty:\n continue\n if isinstance(cropped,\n geometry.collection.BaseMultipartGeometry):\n cropped = cropped.geoms\n else:\n cropped = [cropped]\n # one polygon may be cropped to multiple ones\n for poly in cropped:\n # ignore lines or points\n if not isinstance(\n poly, geometry.Polygon) or not poly.is_valid:\n continue\n coords = np.asarray(poly.exterior.coords)\n # remove an extra identical vertex at the end\n coords = coords[:-1]\n coords[:, 0] -= x1\n coords[:, 1] -= y1\n cropped_poly_per_obj.append(coords.reshape(-1))\n # a dummy polygon to avoid misalignment between masks and boxes\n if len(cropped_poly_per_obj) == 0:\n cropped_poly_per_obj = [np.array([0, 0, 0, 0, 0, 0])]\n cropped_masks.append(cropped_poly_per_obj)\n np.seterr(**initial_settings)\n cropped_masks = PolygonMasks(cropped_masks, h, w)\n return cropped_masks\n\n def pad(self, out_shape, pad_val=0):\n \"\"\"padding has no effect on polygons`\"\"\"\n return PolygonMasks(self.masks, *out_shape)\n\n def expand(self, *args, **kwargs):\n \"\"\"TODO: Add expand for polygon\"\"\"\n raise NotImplementedError\n\n def crop_and_resize(self,\n bboxes,\n out_shape,\n inds,\n device='cpu',\n interpolation='bilinear',\n binarize=True):\n \"\"\"see :func:`BaseInstanceMasks.crop_and_resize`\"\"\"\n out_h, out_w = out_shape\n if len(self.masks) == 0:\n return PolygonMasks([], out_h, out_w)\n\n if not binarize:\n raise ValueError('Polygons are always binary, '\n 'setting binarize=False is unsupported')\n\n resized_masks = []\n for i in range(len(bboxes)):\n mask = self.masks[inds[i]]\n bbox = bboxes[i, :]\n x1, y1, x2, y2 = bbox\n w = np.maximum(x2 - x1, 1)\n h = np.maximum(y2 - y1, 1)\n h_scale = out_h / max(h, 0.1) # avoid too large scale\n w_scale = out_w / max(w, 0.1)\n\n resized_mask = []\n for p in mask:\n p = p.copy()\n # crop\n # pycocotools will clip the boundary\n p[0::2] = p[0::2] - bbox[0]\n p[1::2] = p[1::2] - bbox[1]\n\n # resize\n p[0::2] = p[0::2] * w_scale\n p[1::2] = p[1::2] * h_scale\n resized_mask.append(p)\n resized_masks.append(resized_mask)\n return PolygonMasks(resized_masks, *out_shape)\n\n def translate(self,\n out_shape,\n offset,\n direction='horizontal',\n border_value=None,\n interpolation=None):\n \"\"\"Translate the PolygonMasks.\n\n Example:\n >>> self = PolygonMasks.random(dtype=np.int64)\n >>> out_shape = (self.height, self.width)\n >>> new = self.translate(out_shape, 4., direction='horizontal')\n >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2])\n >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501\n \"\"\"\n assert border_value is None or border_value == 0, \\\n 'Here border_value is not '\\\n f'used, and defaultly should be None or 0. got {border_value}.'\n if len(self.masks) == 0:\n translated_masks = PolygonMasks([], *out_shape)\n else:\n translated_masks = []\n for poly_per_obj in self.masks:\n translated_poly_per_obj = []\n for p in poly_per_obj:\n p = p.copy()\n if direction == 'horizontal':\n p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1])\n elif direction == 'vertical':\n p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0])\n translated_poly_per_obj.append(p)\n translated_masks.append(translated_poly_per_obj)\n translated_masks = PolygonMasks(translated_masks, *out_shape)\n return translated_masks\n\n def shear(self,\n out_shape,\n magnitude,\n direction='horizontal',\n border_value=0,\n interpolation='bilinear'):\n \"\"\"See :func:`BaseInstanceMasks.shear`.\"\"\"\n if len(self.masks) == 0:\n sheared_masks = PolygonMasks([], *out_shape)\n else:\n sheared_masks = []\n if direction == 'horizontal':\n shear_matrix = np.stack([[1, magnitude],\n [0, 1]]).astype(np.float32)\n elif direction == 'vertical':\n shear_matrix = np.stack([[1, 0], [magnitude,\n 1]]).astype(np.float32)\n for poly_per_obj in self.masks:\n sheared_poly = []\n for p in poly_per_obj:\n p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n]\n new_coords = np.matmul(shear_matrix, p) # [2, n]\n new_coords[0, :] = np.clip(new_coords[0, :], 0,\n out_shape[1])\n new_coords[1, :] = np.clip(new_coords[1, :], 0,\n out_shape[0])\n sheared_poly.append(\n new_coords.transpose((1, 0)).reshape(-1))\n sheared_masks.append(sheared_poly)\n sheared_masks = PolygonMasks(sheared_masks, *out_shape)\n return sheared_masks\n\n def rotate(self,\n out_shape,\n angle,\n center=None,\n scale=1.0,\n border_value=0,\n interpolation='bilinear'):\n \"\"\"See :func:`BaseInstanceMasks.rotate`.\"\"\"\n if len(self.masks) == 0:\n rotated_masks = PolygonMasks([], *out_shape)\n else:\n rotated_masks = []\n rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale)\n for poly_per_obj in self.masks:\n rotated_poly = []\n for p in poly_per_obj:\n p = p.copy()\n coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2]\n # pad 1 to convert from format [x, y] to homogeneous\n # coordinates format [x, y, 1]\n coords = np.concatenate(\n (coords, np.ones((coords.shape[0], 1), coords.dtype)),\n axis=1) # [n, 3]\n rotated_coords = np.matmul(\n rotate_matrix[None, :, :],\n coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2]\n rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0,\n out_shape[1])\n rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0,\n out_shape[0])\n rotated_poly.append(rotated_coords.reshape(-1))\n rotated_masks.append(rotated_poly)\n rotated_masks = PolygonMasks(rotated_masks, *out_shape)\n return rotated_masks\n\n def to_bitmap(self):\n \"\"\"convert polygon masks to bitmap masks.\"\"\"\n bitmap_masks = self.to_ndarray()\n return BitmapMasks(bitmap_masks, self.height, self.width)\n\n @property\n def areas(self):\n \"\"\"Compute areas of masks.\n\n This func is modified from `detectron2\n <https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387>`_.\n The function only works with Polygons using the shoelace formula.\n\n Return:\n ndarray: areas of each instance\n \"\"\" # noqa: W501\n area = []\n for polygons_per_obj in self.masks:\n area_per_obj = 0\n for p in polygons_per_obj:\n area_per_obj += self._polygon_area(p[0::2], p[1::2])\n area.append(area_per_obj)\n return np.asarray(area)\n\n def _polygon_area(self, x, y):\n \"\"\"Compute the area of a component of a polygon.\n\n Using the shoelace formula:\n https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates\n\n Args:\n x (ndarray): x coordinates of the component\n y (ndarray): y coordinates of the component\n\n Return:\n float: the are of the component\n \"\"\" # noqa: 501\n return 0.5 * np.abs(\n np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))\n\n def to_ndarray(self):\n \"\"\"Convert masks to the format of ndarray.\"\"\"\n if len(self.masks) == 0:\n return np.empty((0, self.height, self.width), dtype=np.uint8)\n bitmap_masks = []\n for poly_per_obj in self.masks:\n bitmap_masks.append(\n polygon_to_bitmap(poly_per_obj, self.height, self.width))\n return np.stack(bitmap_masks)\n\n def to_tensor(self, dtype, device):\n \"\"\"See :func:`BaseInstanceMasks.to_tensor`.\"\"\"\n if len(self.masks) == 0:\n return torch.empty((0, self.height, self.width),\n dtype=dtype,\n device=device)\n ndarray_masks = self.to_ndarray()\n return torch.tensor(ndarray_masks, dtype=dtype, device=device)\n\n @classmethod\n def random(cls,\n num_masks=3,\n height=32,\n width=32,\n n_verts=5,\n dtype=np.float32,\n rng=None):\n \"\"\"Generate random polygon masks for demo / testing purposes.\n\n Adapted from [1]_\n\n References:\n .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501\n\n Example:\n >>> from mmdet.data_elements.mask.structures import PolygonMasks\n >>> self = PolygonMasks.random()\n >>> print('self = {}'.format(self))\n \"\"\"\n from mmdet.utils.util_random import ensure_rng\n rng = ensure_rng(rng)\n\n def _gen_polygon(n, irregularity, spikeyness):\n \"\"\"Creates the polygon by sampling points on a circle around the\n centre. Random noise is added by varying the angular spacing\n between sequential points, and by varying the radial distance of\n each point from the centre.\n\n Based on original code by Mike Ounsworth\n\n Args:\n n (int): number of vertices\n irregularity (float): [0,1] indicating how much variance there\n is in the angular spacing of vertices. [0,1] will map to\n [0, 2pi/numberOfVerts]\n spikeyness (float): [0,1] indicating how much variance there is\n in each vertex from the circle of radius aveRadius. [0,1]\n will map to [0, aveRadius]\n\n Returns:\n a list of vertices, in CCW order.\n \"\"\"\n from scipy.stats import truncnorm\n\n # Generate around the unit circle\n cx, cy = (0.0, 0.0)\n radius = 1\n\n tau = np.pi * 2\n\n irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n\n spikeyness = np.clip(spikeyness, 1e-9, 1)\n\n # generate n angle steps\n lower = (tau / n) - irregularity\n upper = (tau / n) + irregularity\n angle_steps = rng.uniform(lower, upper, n)\n\n # normalize the steps so that point 0 and point n+1 are the same\n k = angle_steps.sum() / (2 * np.pi)\n angles = (angle_steps / k).cumsum() + rng.uniform(0, tau)\n\n # Convert high and low values to be wrt the standard normal range\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html\n low = 0\n high = 2 * radius\n mean = radius\n std = spikeyness\n a = (low - mean) / std\n b = (high - mean) / std\n tnorm = truncnorm(a=a, b=b, loc=mean, scale=std)\n\n # now generate the points\n radii = tnorm.rvs(n, random_state=rng)\n x_pts = cx + radii * np.cos(angles)\n y_pts = cy + radii * np.sin(angles)\n\n points = np.hstack([x_pts[:, None], y_pts[:, None]])\n\n # Scale to 0-1 space\n points = points - points.min(axis=0)\n points = points / points.max(axis=0)\n\n # Randomly place within 0-1 space\n points = points * (rng.rand() * .8 + .2)\n min_pt = points.min(axis=0)\n max_pt = points.max(axis=0)\n\n high = (1 - max_pt)\n low = (0 - min_pt)\n offset = (rng.rand(2) * (high - low)) + low\n points = points + offset\n return points\n\n def _order_vertices(verts):\n \"\"\"\n References:\n https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise\n \"\"\"\n mlat = verts.T[0].sum() / len(verts)\n mlng = verts.T[1].sum() / len(verts)\n\n tau = np.pi * 2\n angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) +\n tau) % tau\n sortx = angle.argsort()\n verts = verts.take(sortx, axis=0)\n return verts\n\n # Generate a random exterior for each requested mask\n masks = []\n for _ in range(num_masks):\n exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9))\n exterior = (exterior * [(width, height)]).astype(dtype)\n masks.append([exterior.ravel()])\n\n self = cls(masks, height, width)\n return self\n\n @classmethod\n def cat(cls: Type[T], masks: Sequence[T]) -> T:\n \"\"\"Concatenate a sequence of masks into one single mask instance.\n\n Args:\n masks (Sequence[PolygonMasks]): A sequence of mask instances.\n\n Returns:\n PolygonMasks: Concatenated mask instance.\n \"\"\"\n assert isinstance(masks, Sequence)\n if len(masks) == 0:\n raise ValueError('masks should not be an empty list.')\n assert all(isinstance(m, cls) for m in masks)\n\n mask_list = list(itertools.chain(*[m.masks for m in masks]))\n return cls(mask_list, masks[0].height, masks[0].width)" }, { "identifier": "log_img_scale", "path": "mmdet/utils/logger.py", "snippet": "def log_img_scale(img_scale, shape_order='hw', skip_square=False):\n \"\"\"Log image size.\n\n Args:\n img_scale (tuple): Image size to be logged.\n shape_order (str, optional): The order of image shape.\n 'hw' for (height, width) and 'wh' for (width, height).\n Defaults to 'hw'.\n skip_square (bool, optional): Whether to skip logging for square\n img_scale. Defaults to False.\n\n Returns:\n bool: Whether to have done logging.\n \"\"\"\n if shape_order == 'hw':\n height, width = img_scale\n elif shape_order == 'wh':\n width, height = img_scale\n else:\n raise ValueError(f'Invalid shape_order {shape_order}.')\n\n if skip_square and (height == width):\n return False\n\n caller = get_caller_name()\n print_log(\n f'image shape: height={height}, width={width} in {caller}',\n logger='current')\n\n return True" } ]
import copy import inspect import math import warnings import cv2 import mmcv import numpy as np import albumentations from typing import List, Optional, Sequence, Tuple, Union from mmcv.image import imresize from mmcv.image.geometric import _scale_size from mmcv.transforms import BaseTransform from mmcv.transforms import Pad as MMCV_Pad from mmcv.transforms import RandomFlip as MMCV_RandomFlip from mmcv.transforms import Resize as MMCV_Resize from mmcv.transforms.utils import avoid_cache_randomness, cache_randomness from mmengine.dataset import BaseDataset from mmengine.utils import is_str from numpy import random from mmdet.registry import TRANSFORMS from mmdet.structures.bbox import HorizontalBoxes, autocast_box_type from mmdet.structures.mask import BitmapMasks, PolygonMasks from mmdet.utils import log_img_scale from imagecorruptions import corrupt from albumentations import Compose
17,550
results['gt_bboxes'].translate_([left, top]) # expand masks if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'].expand( int(h * ratio), int(w * ratio), top, left) # expand segmentation map if results.get('gt_seg_map', None) is not None: gt_seg = results['gt_seg_map'] expand_gt_seg = np.full((int(h * ratio), int(w * ratio)), self.seg_ignore_label, dtype=gt_seg.dtype) expand_gt_seg[top:top + h, left:left + w] = gt_seg results['gt_seg_map'] = expand_gt_seg return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, ' repr_str += f'ratio_range={self.ratio_range}, ' repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' repr_str += f'prob={self.prob})' return repr_str @TRANSFORMS.register_module() class MinIoURandomCrop(BaseTransform): """Random crop the image & bboxes & masks & segmentation map, the cropped patches have minimum IoU requirement with original image & bboxes & masks. & segmentation map, the IoU threshold is randomly selected from min_ious. Required Keys: - img - img_shape - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_ignore_flags (bool) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_bboxes_labels - gt_masks - gt_ignore_flags - gt_seg_map Args: min_ious (Sequence[float]): minimum IoU threshold for all intersections with bounding boxes. min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, where a >= min_crop_size). bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, min_ious: Sequence[float] = (0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size: float = 0.3, bbox_clip_border: bool = True) -> None: self.min_ious = min_ious self.sample_mode = (1, *min_ious, 0) self.min_crop_size = min_crop_size self.bbox_clip_border = bbox_clip_border @cache_randomness def _random_mode(self) -> Number: return random.choice(self.sample_mode) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to crop images and bounding boxes with minimum IoU constraint. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images and bounding boxes cropped, \ 'img_shape' key is updated. """ assert 'img' in results, '`img` is not found in results' assert 'gt_bboxes' in results, '`gt_bboxes` is not found in results' img = results['img'] boxes = results['gt_bboxes'] h, w, c = img.shape while True: mode = self._random_mode() self.mode = mode if mode == 1: return results min_iou = self.mode for i in range(50): new_w = random.uniform(self.min_crop_size * w, w) new_h = random.uniform(self.min_crop_size * h, h) # h / w in [0.5, 2] if new_h / new_w < 0.5 or new_h / new_w > 2: continue left = random.uniform(w - new_w) top = random.uniform(h - new_h) patch = np.array( (int(left), int(top), int(left + new_w), int(top + new_h))) # Line or point crop is not allowed if patch[2] == patch[0] or patch[3] == patch[1]: continue overlaps = boxes.overlaps(
# Copyright (c) OpenMMLab. All rights reserved. try: except ImportError: corrupt = None try: except ImportError: albumentations = None Compose = None Number = Union[int, float] def _fixed_scale_size( size: Tuple[int, int], scale: Union[float, int, tuple], ) -> Tuple[int, int]: """Rescale a size by a ratio. Args: size (tuple[int]): (w, h). scale (float | tuple(float)): Scaling factor. Returns: tuple[int]: scaled size. """ if isinstance(scale, (float, int)): scale = (scale, scale) w, h = size # don't need o.5 offset return int(w * float(scale[0])), int(h * float(scale[1])) def rescale_size(old_size: tuple, scale: Union[float, int, tuple], return_scale: bool = False) -> tuple: """Calculate the new size to be rescaled to. Args: old_size (tuple[int]): The old size (w, h) of image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image size. Returns: tuple[int]: The new rescaled image size. """ w, h = old_size if isinstance(scale, (float, int)): if scale <= 0: raise ValueError(f'Invalid scale {scale}, must be positive.') scale_factor = scale elif isinstance(scale, tuple): max_long_edge = max(scale) max_short_edge = min(scale) scale_factor = min(max_long_edge / max(h, w), max_short_edge / min(h, w)) else: raise TypeError( f'Scale must be a number or tuple of int, but got {type(scale)}') # only change this new_size = _fixed_scale_size((w, h), scale_factor) if return_scale: return new_size, scale_factor else: return new_size def imrescale( img: np.ndarray, scale: Union[float, Tuple[int, int]], return_scale: bool = False, interpolation: str = 'bilinear', backend: Optional[str] = None ) -> Union[np.ndarray, Tuple[np.ndarray, float]]: """Resize image while keeping the aspect ratio. Args: img (ndarray): The input image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image. interpolation (str): Same as :func:`resize`. backend (str | None): Same as :func:`resize`. Returns: ndarray: The rescaled image. """ h, w = img.shape[:2] new_size, scale_factor = rescale_size((w, h), scale, return_scale=True) rescaled_img = imresize( img, new_size, interpolation=interpolation, backend=backend) if return_scale: return rescaled_img, scale_factor else: return rescaled_img @TRANSFORMS.register_module() class Resize(MMCV_Resize): """Resize images & bbox & seg. This transform resizes the input image according to ``scale`` or ``scale_factor``. Bboxes, masks, and seg map are then resized with the same scale factor. if ``scale`` and ``scale_factor`` are both set, it will use ``scale`` to resize. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_masks - gt_seg_map Added Keys: - scale - scale_factor - keep_ratio - homography_matrix Args: scale (int or tuple): Images scales for resizing. Defaults to None scale_factor (float or tuple[float]): Scale factors for resizing. Defaults to None. keep_ratio (bool): Whether to keep the aspect ratio when resizing the image. Defaults to False. clip_object_border (bool): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. backend (str): Image resize backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. Defaults to 'bilinear'. """ def _resize_masks(self, results: dict) -> None: """Resize masks with ``results['scale']``""" if results.get('gt_masks', None) is not None: if self.keep_ratio: results['gt_masks'] = results['gt_masks'].rescale( results['scale']) else: results['gt_masks'] = results['gt_masks'].resize( results['img_shape']) def _resize_bboxes(self, results: dict) -> None: """Resize bounding boxes with ``results['scale_factor']``.""" if results.get('gt_bboxes', None) is not None: results['gt_bboxes'].rescale_(results['scale_factor']) if self.clip_object_border: results['gt_bboxes'].clip_(results['img_shape']) def _record_homography_matrix(self, results: dict) -> None: """Record the homography matrix for the Resize.""" w_scale, h_scale = results['scale_factor'] homography_matrix = np.array( [[w_scale, 0, 0], [0, h_scale, 0], [0, 0, 1]], dtype=np.float32) if results.get('homography_matrix', None) is None: results['homography_matrix'] = homography_matrix else: results['homography_matrix'] = homography_matrix @ results[ 'homography_matrix'] @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to resize images, bounding boxes and semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map', 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys are updated in result dict. """ if self.scale: results['scale'] = self.scale else: img_shape = results['img'].shape[:2] results['scale'] = _scale_size(img_shape[::-1], self.scale_factor) self._resize_img(results) self._resize_bboxes(results) self._resize_masks(results) self._resize_seg(results) self._record_homography_matrix(results) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(scale={self.scale}, ' repr_str += f'scale_factor={self.scale_factor}, ' repr_str += f'keep_ratio={self.keep_ratio}, ' repr_str += f'clip_object_border={self.clip_object_border}), ' repr_str += f'backend={self.backend}), ' repr_str += f'interpolation={self.interpolation})' return repr_str @TRANSFORMS.register_module() class FixScaleResize(Resize): """Compared to Resize, FixScaleResize fixes the scaling issue when `keep_ratio=true`.""" def _resize_img(self, results): """Resize images with ``results['scale']``.""" if results.get('img', None) is not None: if self.keep_ratio: img, scale_factor = imrescale( results['img'], results['scale'], interpolation=self.interpolation, return_scale=True, backend=self.backend) new_h, new_w = img.shape[:2] h, w = results['img'].shape[:2] w_scale = new_w / w h_scale = new_h / h else: img, w_scale, h_scale = mmcv.imresize( results['img'], results['scale'], interpolation=self.interpolation, return_scale=True, backend=self.backend) results['img'] = img results['img_shape'] = img.shape[:2] results['scale_factor'] = (w_scale, h_scale) results['keep_ratio'] = self.keep_ratio @TRANSFORMS.register_module() class ResizeShortestEdge(BaseTransform): """Resize the image and mask while keeping the aspect ratio unchanged. Modified from https://github.com/facebookresearch/detectron2/blob/main/detectron2/data/transforms/augmentation_impl.py#L130 # noqa:E501 This transform attempts to scale the shorter edge to the given `scale`, as long as the longer edge does not exceed `max_size`. If `max_size` is reached, then downscale so that the longer edge does not exceed `max_size`. Required Keys: - img - gt_seg_map (optional) Modified Keys: - img - img_shape - gt_seg_map (optional)) Added Keys: - scale - scale_factor - keep_ratio Args: scale (Union[int, Tuple[int, int]]): The target short edge length. If it's tuple, will select the min value as the short edge length. max_size (int): The maximum allowed longest edge length. """ def __init__(self, scale: Union[int, Tuple[int, int]], max_size: Optional[int] = None, resize_type: str = 'Resize', **resize_kwargs) -> None: super().__init__() self.scale = scale self.max_size = max_size self.resize_cfg = dict(type=resize_type, **resize_kwargs) self.resize = TRANSFORMS.build({'scale': 0, **self.resize_cfg}) def _get_output_shape( self, img: np.ndarray, short_edge_length: Union[int, Tuple[int, int]]) -> Tuple[int, int]: """Compute the target image shape with the given `short_edge_length`. Args: img (np.ndarray): The input image. short_edge_length (Union[int, Tuple[int, int]]): The target short edge length. If it's tuple, will select the min value as the short edge length. """ h, w = img.shape[:2] if isinstance(short_edge_length, int): size = short_edge_length * 1.0 elif isinstance(short_edge_length, tuple): size = min(short_edge_length) * 1.0 scale = size / min(h, w) if h < w: new_h, new_w = size, scale * w else: new_h, new_w = scale * h, size if self.max_size and max(new_h, new_w) > self.max_size: scale = self.max_size * 1.0 / max(new_h, new_w) new_h *= scale new_w *= scale new_h = int(new_h + 0.5) new_w = int(new_w + 0.5) return new_w, new_h def transform(self, results: dict) -> dict: self.resize.scale = self._get_output_shape(results['img'], self.scale) return self.resize(results) @TRANSFORMS.register_module() class FixShapeResize(Resize): """Resize images & bbox & seg to the specified size. This transform resizes the input image according to ``width`` and ``height``. Bboxes, masks, and seg map are then resized with the same parameters. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_masks - gt_seg_map Added Keys: - scale - scale_factor - keep_ratio - homography_matrix Args: width (int): width for resizing. height (int): height for resizing. Defaults to None. pad_val (Number | dict[str, Number], optional): Padding value for if the pad_mode is "constant". If it is a single number, the value to pad the image is the number and to pad the semantic segmentation map is 255. If it is a dict, it should have the following keys: - img: The value to pad the image. - seg: The value to pad the semantic segmentation map. Defaults to dict(img=0, seg=255). keep_ratio (bool): Whether to keep the aspect ratio when resizing the image. Defaults to False. clip_object_border (bool): Whether to clip the objects outside the border of the image. In some dataset like MOT17, the gt bboxes are allowed to cross the border of images. Therefore, we don't need to clip the gt bboxes in these cases. Defaults to True. backend (str): Image resize backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos" for 'cv2' backend, "nearest", "bilinear" for 'pillow' backend. Defaults to 'bilinear'. """ def __init__(self, width: int, height: int, pad_val: Union[Number, dict] = dict(img=0, seg=255), keep_ratio: bool = False, clip_object_border: bool = True, backend: str = 'cv2', interpolation: str = 'bilinear') -> None: assert width is not None and height is not None, ( '`width` and' '`height` can not be `None`') self.width = width self.height = height self.scale = (width, height) self.backend = backend self.interpolation = interpolation self.keep_ratio = keep_ratio self.clip_object_border = clip_object_border if keep_ratio is True: # padding to the fixed size when keep_ratio=True self.pad_transform = Pad(size=self.scale, pad_val=pad_val) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to resize images, bounding boxes and semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map', 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys are updated in result dict. """ img = results['img'] h, w = img.shape[:2] if self.keep_ratio: scale_factor = min(self.width / w, self.height / h) results['scale_factor'] = (scale_factor, scale_factor) real_w, real_h = int(w * float(scale_factor) + 0.5), int(h * float(scale_factor) + 0.5) img, scale_factor = mmcv.imrescale( results['img'], (real_w, real_h), interpolation=self.interpolation, return_scale=True, backend=self.backend) # the w_scale and h_scale has minor difference # a real fix should be done in the mmcv.imrescale in the future results['img'] = img results['img_shape'] = img.shape[:2] results['keep_ratio'] = self.keep_ratio results['scale'] = (real_w, real_h) else: results['scale'] = (self.width, self.height) results['scale_factor'] = (self.width / w, self.height / h) super()._resize_img(results) self._resize_bboxes(results) self._resize_masks(results) self._resize_seg(results) self._record_homography_matrix(results) if self.keep_ratio: self.pad_transform(results) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(width={self.width}, height={self.height}, ' repr_str += f'keep_ratio={self.keep_ratio}, ' repr_str += f'clip_object_border={self.clip_object_border}), ' repr_str += f'backend={self.backend}), ' repr_str += f'interpolation={self.interpolation})' return repr_str @TRANSFORMS.register_module() class RandomFlip(MMCV_RandomFlip): """Flip the image & bbox & mask & segmentation map. Added or Updated keys: flip, flip_direction, img, gt_bboxes, and gt_seg_map. There are 3 flip modes: - ``prob`` is float, ``direction`` is string: the image will be ``direction``ly flipped with probability of ``prob`` . E.g., ``prob=0.5``, ``direction='horizontal'``, then image will be horizontally flipped with probability of 0.5. - ``prob`` is float, ``direction`` is list of string: the image will be ``direction[i]``ly flipped with probability of ``prob/len(direction)``. E.g., ``prob=0.5``, ``direction=['horizontal', 'vertical']``, then image will be horizontally flipped with probability of 0.25, vertically with probability of 0.25. - ``prob`` is list of float, ``direction`` is list of string: given ``len(prob) == len(direction)``, the image will be ``direction[i]``ly flipped with probability of ``prob[i]``. E.g., ``prob=[0.3, 0.5]``, ``direction=['horizontal', 'vertical']``, then image will be horizontally flipped with probability of 0.3, vertically with probability of 0.5. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - gt_bboxes - gt_masks - gt_seg_map Added Keys: - flip - flip_direction - homography_matrix Args: prob (float | list[float], optional): The flipping probability. Defaults to None. direction(str | list[str]): The flipping direction. Options If input is a list, the length must equal ``prob``. Each element in ``prob`` indicates the flip probability of corresponding direction. Defaults to 'horizontal'. """ def _record_homography_matrix(self, results: dict) -> None: """Record the homography matrix for the RandomFlip.""" cur_dir = results['flip_direction'] h, w = results['img'].shape[:2] if cur_dir == 'horizontal': homography_matrix = np.array([[-1, 0, w], [0, 1, 0], [0, 0, 1]], dtype=np.float32) elif cur_dir == 'vertical': homography_matrix = np.array([[1, 0, 0], [0, -1, h], [0, 0, 1]], dtype=np.float32) elif cur_dir == 'diagonal': homography_matrix = np.array([[-1, 0, w], [0, -1, h], [0, 0, 1]], dtype=np.float32) else: homography_matrix = np.eye(3, dtype=np.float32) if results.get('homography_matrix', None) is None: results['homography_matrix'] = homography_matrix else: results['homography_matrix'] = homography_matrix @ results[ 'homography_matrix'] @autocast_box_type() def _flip(self, results: dict) -> None: """Flip images, bounding boxes, and semantic segmentation map.""" # flip image results['img'] = mmcv.imflip( results['img'], direction=results['flip_direction']) img_shape = results['img'].shape[:2] # flip bboxes if results.get('gt_bboxes', None) is not None: results['gt_bboxes'].flip_(img_shape, results['flip_direction']) # flip masks if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'].flip( results['flip_direction']) # flip segs if results.get('gt_seg_map', None) is not None: results['gt_seg_map'] = mmcv.imflip( results['gt_seg_map'], direction=results['flip_direction']) # record homography matrix for flip self._record_homography_matrix(results) @TRANSFORMS.register_module() class RandomShift(BaseTransform): """Shift the image and box given shift pixels and probability. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) - gt_bboxes_labels (np.int64) - gt_ignore_flags (bool) (optional) Modified Keys: - img - gt_bboxes - gt_bboxes_labels - gt_ignore_flags (bool) (optional) Args: prob (float): Probability of shifts. Defaults to 0.5. max_shift_px (int): The max pixels for shifting. Defaults to 32. filter_thr_px (int): The width and height threshold for filtering. The bbox and the rest of the targets below the width and height threshold will be filtered. Defaults to 1. """ def __init__(self, prob: float = 0.5, max_shift_px: int = 32, filter_thr_px: int = 1) -> None: assert 0 <= prob <= 1 assert max_shift_px >= 0 self.prob = prob self.max_shift_px = max_shift_px self.filter_thr_px = int(filter_thr_px) @cache_randomness def _random_prob(self) -> float: return random.uniform(0, 1) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to random shift images, bounding boxes. Args: results (dict): Result dict from loading pipeline. Returns: dict: Shift results. """ if self._random_prob() < self.prob: img_shape = results['img'].shape[:2] random_shift_x = random.randint(-self.max_shift_px, self.max_shift_px) random_shift_y = random.randint(-self.max_shift_px, self.max_shift_px) new_x = max(0, random_shift_x) ori_x = max(0, -random_shift_x) new_y = max(0, random_shift_y) ori_y = max(0, -random_shift_y) # TODO: support mask and semantic segmentation maps. bboxes = results['gt_bboxes'].clone() bboxes.translate_([random_shift_x, random_shift_y]) # clip border bboxes.clip_(img_shape) # remove invalid bboxes valid_inds = (bboxes.widths > self.filter_thr_px).numpy() & ( bboxes.heights > self.filter_thr_px).numpy() # If the shift does not contain any gt-bbox area, skip this # image. if not valid_inds.any(): return results bboxes = bboxes[valid_inds] results['gt_bboxes'] = bboxes results['gt_bboxes_labels'] = results['gt_bboxes_labels'][ valid_inds] if results.get('gt_ignore_flags', None) is not None: results['gt_ignore_flags'] = \ results['gt_ignore_flags'][valid_inds] # shift img img = results['img'] new_img = np.zeros_like(img) img_h, img_w = img.shape[:2] new_h = img_h - np.abs(random_shift_y) new_w = img_w - np.abs(random_shift_x) new_img[new_y:new_y + new_h, new_x:new_x + new_w] \ = img[ori_y:ori_y + new_h, ori_x:ori_x + new_w] results['img'] = new_img return results def __repr__(self): repr_str = self.__class__.__name__ repr_str += f'(prob={self.prob}, ' repr_str += f'max_shift_px={self.max_shift_px}, ' repr_str += f'filter_thr_px={self.filter_thr_px})' return repr_str @TRANSFORMS.register_module() class Pad(MMCV_Pad): """Pad the image & segmentation map. There are three padding modes: (1) pad to a fixed size and (2) pad to the minimum size that is divisible by some number. and (3)pad to square. Also, pad to square and pad to the minimum size can be used as the same time. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_masks - gt_seg_map Added Keys: - pad_shape - pad_fixed_size - pad_size_divisor Args: size (tuple, optional): Fixed padding size. Expected padding shape (width, height). Defaults to None. size_divisor (int, optional): The divisor of padded size. Defaults to None. pad_to_square (bool): Whether to pad the image into a square. Currently only used for YOLOX. Defaults to False. pad_val (Number | dict[str, Number], optional) - Padding value for if the pad_mode is "constant". If it is a single number, the value to pad the image is the number and to pad the semantic segmentation map is 255. If it is a dict, it should have the following keys: - img: The value to pad the image. - seg: The value to pad the semantic segmentation map. Defaults to dict(img=0, seg=255). padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. Defaults to 'constant'. - constant: pads with a constant value, this value is specified with pad_val. - edge: pads with the last value at the edge of the image. - reflect: pads with reflection of image without repeating the last value on the edge. For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode will result in [3, 2, 1, 2, 3, 4, 3, 2]. - symmetric: pads with reflection of image repeating the last value on the edge. For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode will result in [2, 1, 1, 2, 3, 4, 4, 3] """ def _pad_masks(self, results: dict) -> None: """Pad masks according to ``results['pad_shape']``.""" if results.get('gt_masks', None) is not None: pad_val = self.pad_val.get('masks', 0) pad_shape = results['pad_shape'][:2] results['gt_masks'] = results['gt_masks'].pad( pad_shape, pad_val=pad_val) def transform(self, results: dict) -> dict: """Call function to pad images, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: dict: Updated result dict. """ self._pad_img(results) self._pad_seg(results) self._pad_masks(results) return results @TRANSFORMS.register_module() class RandomCrop(BaseTransform): """Random crop the image & bboxes & masks. The absolute ``crop_size`` is sampled based on ``crop_type`` and ``image_size``, then the cropped results are generated. Required Keys: - img - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_ignore_flags (bool) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes (optional) - gt_bboxes_labels (optional) - gt_masks (optional) - gt_ignore_flags (optional) - gt_seg_map (optional) - gt_instances_ids (options, only used in MOT/VIS) Added Keys: - homography_matrix Args: crop_size (tuple): The relative ratio or absolute pixels of (width, height). crop_type (str, optional): One of "relative_range", "relative", "absolute", "absolute_range". "relative" randomly crops (h * crop_size[0], w * crop_size[1]) part from an input of size (h, w). "relative_range" uniformly samples relative crop size from range [crop_size[0], 1] and [crop_size[1], 1] for height and width respectively. "absolute" crops from an input with absolute size (crop_size[0], crop_size[1]). "absolute_range" uniformly samples crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w in range [crop_size[0], min(w, crop_size[1])]. Defaults to "absolute". allow_negative_crop (bool, optional): Whether to allow a crop that does not contain any bbox area. Defaults to False. recompute_bbox (bool, optional): Whether to re-compute the boxes based on cropped instance masks. Defaults to False. bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Note: - If the image is smaller than the absolute crop size, return the original image. - The keys for bboxes, labels and masks must be aligned. That is, ``gt_bboxes`` corresponds to ``gt_labels`` and ``gt_masks``, and ``gt_bboxes_ignore`` corresponds to ``gt_labels_ignore`` and ``gt_masks_ignore``. - If the crop does not contain any gt-bbox region and ``allow_negative_crop`` is set to False, skip this image. """ def __init__(self, crop_size: tuple, crop_type: str = 'absolute', allow_negative_crop: bool = False, recompute_bbox: bool = False, bbox_clip_border: bool = True) -> None: if crop_type not in [ 'relative_range', 'relative', 'absolute', 'absolute_range' ]: raise ValueError(f'Invalid crop_type {crop_type}.') if crop_type in ['absolute', 'absolute_range']: assert crop_size[0] > 0 and crop_size[1] > 0 assert isinstance(crop_size[0], int) and isinstance( crop_size[1], int) if crop_type == 'absolute_range': assert crop_size[0] <= crop_size[1] else: assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1 self.crop_size = crop_size self.crop_type = crop_type self.allow_negative_crop = allow_negative_crop self.bbox_clip_border = bbox_clip_border self.recompute_bbox = recompute_bbox def _crop_data(self, results: dict, crop_size: Tuple[int, int], allow_negative_crop: bool) -> Union[dict, None]: """Function to randomly crop images, bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. crop_size (Tuple[int, int]): Expected absolute size after cropping, (h, w). allow_negative_crop (bool): Whether to allow a crop that does not contain any bbox area. Returns: results (Union[dict, None]): Randomly cropped results, 'img_shape' key in result dict is updated according to crop size. None will be returned when there is no valid bbox after cropping. """ assert crop_size[0] > 0 and crop_size[1] > 0 img = results['img'] margin_h = max(img.shape[0] - crop_size[0], 0) margin_w = max(img.shape[1] - crop_size[1], 0) offset_h, offset_w = self._rand_offset((margin_h, margin_w)) crop_y1, crop_y2 = offset_h, offset_h + crop_size[0] crop_x1, crop_x2 = offset_w, offset_w + crop_size[1] # Record the homography matrix for the RandomCrop homography_matrix = np.array( [[1, 0, -offset_w], [0, 1, -offset_h], [0, 0, 1]], dtype=np.float32) if results.get('homography_matrix', None) is None: results['homography_matrix'] = homography_matrix else: results['homography_matrix'] = homography_matrix @ results[ 'homography_matrix'] # crop the image img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...] img_shape = img.shape results['img'] = img results['img_shape'] = img_shape[:2] # crop bboxes accordingly and clip to the image boundary if results.get('gt_bboxes', None) is not None: bboxes = results['gt_bboxes'] bboxes.translate_([-offset_w, -offset_h]) if self.bbox_clip_border: bboxes.clip_(img_shape[:2]) valid_inds = bboxes.is_inside(img_shape[:2]).numpy() # If the crop does not contain any gt-bbox area and # allow_negative_crop is False, skip this image. if (not valid_inds.any() and not allow_negative_crop): return None results['gt_bboxes'] = bboxes[valid_inds] if results.get('gt_ignore_flags', None) is not None: results['gt_ignore_flags'] = \ results['gt_ignore_flags'][valid_inds] if results.get('gt_bboxes_labels', None) is not None: results['gt_bboxes_labels'] = \ results['gt_bboxes_labels'][valid_inds] if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'][ valid_inds.nonzero()[0]].crop( np.asarray([crop_x1, crop_y1, crop_x2, crop_y2])) if self.recompute_bbox: results['gt_bboxes'] = results['gt_masks'].get_bboxes( type(results['gt_bboxes'])) # We should remove the instance ids corresponding to invalid boxes. if results.get('gt_instances_ids', None) is not None: results['gt_instances_ids'] = \ results['gt_instances_ids'][valid_inds] # crop semantic seg if results.get('gt_seg_map', None) is not None: results['gt_seg_map'] = results['gt_seg_map'][crop_y1:crop_y2, crop_x1:crop_x2] return results @cache_randomness def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]: """Randomly generate crop offset. Args: margin (Tuple[int, int]): The upper bound for the offset generated randomly. Returns: Tuple[int, int]: The random offset for the crop. """ margin_h, margin_w = margin offset_h = np.random.randint(0, margin_h + 1) offset_w = np.random.randint(0, margin_w + 1) return offset_h, offset_w @cache_randomness def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]: """Randomly generates the absolute crop size based on `crop_type` and `image_size`. Args: image_size (Tuple[int, int]): (h, w). Returns: crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels. """ h, w = image_size if self.crop_type == 'absolute': return min(self.crop_size[1], h), min(self.crop_size[0], w) elif self.crop_type == 'absolute_range': crop_h = np.random.randint( min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1) crop_w = np.random.randint( min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1) return crop_h, crop_w elif self.crop_type == 'relative': crop_w, crop_h = self.crop_size return int(h * crop_h + 0.5), int(w * crop_w + 0.5) else: # 'relative_range' crop_size = np.asarray(self.crop_size, dtype=np.float32) crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size) return int(h * crop_h + 0.5), int(w * crop_w + 0.5) @autocast_box_type() def transform(self, results: dict) -> Union[dict, None]: """Transform function to randomly crop images, bounding boxes, masks, semantic segmentation maps. Args: results (dict): Result dict from loading pipeline. Returns: results (Union[dict, None]): Randomly cropped results, 'img_shape' key in result dict is updated according to crop size. None will be returned when there is no valid bbox after cropping. """ image_size = results['img'].shape[:2] crop_size = self._get_crop_size(image_size) results = self._crop_data(results, crop_size, self.allow_negative_crop) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(crop_size={self.crop_size}, ' repr_str += f'crop_type={self.crop_type}, ' repr_str += f'allow_negative_crop={self.allow_negative_crop}, ' repr_str += f'recompute_bbox={self.recompute_bbox}, ' repr_str += f'bbox_clip_border={self.bbox_clip_border})' return repr_str @TRANSFORMS.register_module() class SegRescale(BaseTransform): """Rescale semantic segmentation maps. This transform rescale the ``gt_seg_map`` according to ``scale_factor``. Required Keys: - gt_seg_map Modified Keys: - gt_seg_map Args: scale_factor (float): The scale factor of the final output. Defaults to 1. backend (str): Image rescale backend, choices are 'cv2' and 'pillow'. These two backends generates slightly different results. Defaults to 'cv2'. """ def __init__(self, scale_factor: float = 1, backend: str = 'cv2') -> None: self.scale_factor = scale_factor self.backend = backend def transform(self, results: dict) -> dict: """Transform function to scale the semantic segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with semantic segmentation map scaled. """ if self.scale_factor != 1: results['gt_seg_map'] = mmcv.imrescale( results['gt_seg_map'], self.scale_factor, interpolation='nearest', backend=self.backend) return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(scale_factor={self.scale_factor}, ' repr_str += f'backend={self.backend})' return repr_str @TRANSFORMS.register_module() class PhotoMetricDistortion(BaseTransform): """Apply photometric distortion to image sequentially, every transformation is applied with a probability of 0.5. The position of random contrast is in second or second to last. 1. random brightness 2. random contrast (mode 0) 3. convert color from BGR to HSV 4. random saturation 5. random hue 6. convert color from HSV to BGR 7. random contrast (mode 1) 8. randomly swap channels Required Keys: - img (np.uint8) Modified Keys: - img (np.float32) Args: brightness_delta (int): delta of brightness. contrast_range (sequence): range of contrast. saturation_range (sequence): range of saturation. hue_delta (int): delta of hue. """ def __init__(self, brightness_delta: int = 32, contrast_range: Sequence[Number] = (0.5, 1.5), saturation_range: Sequence[Number] = (0.5, 1.5), hue_delta: int = 18) -> None: self.brightness_delta = brightness_delta self.contrast_lower, self.contrast_upper = contrast_range self.saturation_lower, self.saturation_upper = saturation_range self.hue_delta = hue_delta @cache_randomness def _random_flags(self) -> Sequence[Number]: mode = random.randint(2) brightness_flag = random.randint(2) contrast_flag = random.randint(2) saturation_flag = random.randint(2) hue_flag = random.randint(2) swap_flag = random.randint(2) delta_value = random.uniform(-self.brightness_delta, self.brightness_delta) alpha_value = random.uniform(self.contrast_lower, self.contrast_upper) saturation_value = random.uniform(self.saturation_lower, self.saturation_upper) hue_value = random.uniform(-self.hue_delta, self.hue_delta) swap_value = random.permutation(3) return (mode, brightness_flag, contrast_flag, saturation_flag, hue_flag, swap_flag, delta_value, alpha_value, saturation_value, hue_value, swap_value) def transform(self, results: dict) -> dict: """Transform function to perform photometric distortion on images. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images distorted. """ assert 'img' in results, '`img` is not found in results' img = results['img'] img = img.astype(np.float32) (mode, brightness_flag, contrast_flag, saturation_flag, hue_flag, swap_flag, delta_value, alpha_value, saturation_value, hue_value, swap_value) = self._random_flags() # random brightness if brightness_flag: img += delta_value # mode == 0 --> do random contrast first # mode == 1 --> do random contrast last if mode == 1: if contrast_flag: img *= alpha_value # convert color from BGR to HSV img = mmcv.bgr2hsv(img) # random saturation if saturation_flag: img[..., 1] *= saturation_value # For image(type=float32), after convert bgr to hsv by opencv, # valid saturation value range is [0, 1] if saturation_value > 1: img[..., 1] = img[..., 1].clip(0, 1) # random hue if hue_flag: img[..., 0] += hue_value img[..., 0][img[..., 0] > 360] -= 360 img[..., 0][img[..., 0] < 0] += 360 # convert color from HSV to BGR img = mmcv.hsv2bgr(img) # random contrast if mode == 0: if contrast_flag: img *= alpha_value # randomly swap channels if swap_flag: img = img[..., swap_value] results['img'] = img return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(brightness_delta={self.brightness_delta}, ' repr_str += 'contrast_range=' repr_str += f'{(self.contrast_lower, self.contrast_upper)}, ' repr_str += 'saturation_range=' repr_str += f'{(self.saturation_lower, self.saturation_upper)}, ' repr_str += f'hue_delta={self.hue_delta})' return repr_str @TRANSFORMS.register_module() class Expand(BaseTransform): """Random expand the image & bboxes & masks & segmentation map. Randomly place the original image on a canvas of ``ratio`` x original image size filled with mean values. The ratio is in the range of ratio_range. Required Keys: - img - img_shape - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_masks - gt_seg_map Args: mean (sequence): mean value of dataset. to_rgb (bool): if need to convert the order of mean to align with RGB. ratio_range (sequence)): range of expand ratio. seg_ignore_label (int): label of ignore segmentation map. prob (float): probability of applying this transformation """ def __init__(self, mean: Sequence[Number] = (0, 0, 0), to_rgb: bool = True, ratio_range: Sequence[Number] = (1, 4), seg_ignore_label: int = None, prob: float = 0.5) -> None: self.to_rgb = to_rgb self.ratio_range = ratio_range if to_rgb: self.mean = mean[::-1] else: self.mean = mean self.min_ratio, self.max_ratio = ratio_range self.seg_ignore_label = seg_ignore_label self.prob = prob @cache_randomness def _random_prob(self) -> float: return random.uniform(0, 1) @cache_randomness def _random_ratio(self) -> float: return random.uniform(self.min_ratio, self.max_ratio) @cache_randomness def _random_left_top(self, ratio: float, h: int, w: int) -> Tuple[int, int]: left = int(random.uniform(0, w * ratio - w)) top = int(random.uniform(0, h * ratio - h)) return left, top @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to expand images, bounding boxes, masks, segmentation map. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images, bounding boxes, masks, segmentation map expanded. """ if self._random_prob() > self.prob: return results assert 'img' in results, '`img` is not found in results' img = results['img'] h, w, c = img.shape ratio = self._random_ratio() # speedup expand when meets large image if np.all(self.mean == self.mean[0]): expand_img = np.empty((int(h * ratio), int(w * ratio), c), img.dtype) expand_img.fill(self.mean[0]) else: expand_img = np.full((int(h * ratio), int(w * ratio), c), self.mean, dtype=img.dtype) left, top = self._random_left_top(ratio, h, w) expand_img[top:top + h, left:left + w] = img results['img'] = expand_img results['img_shape'] = expand_img.shape[:2] # expand bboxes if results.get('gt_bboxes', None) is not None: results['gt_bboxes'].translate_([left, top]) # expand masks if results.get('gt_masks', None) is not None: results['gt_masks'] = results['gt_masks'].expand( int(h * ratio), int(w * ratio), top, left) # expand segmentation map if results.get('gt_seg_map', None) is not None: gt_seg = results['gt_seg_map'] expand_gt_seg = np.full((int(h * ratio), int(w * ratio)), self.seg_ignore_label, dtype=gt_seg.dtype) expand_gt_seg[top:top + h, left:left + w] = gt_seg results['gt_seg_map'] = expand_gt_seg return results def __repr__(self) -> str: repr_str = self.__class__.__name__ repr_str += f'(mean={self.mean}, to_rgb={self.to_rgb}, ' repr_str += f'ratio_range={self.ratio_range}, ' repr_str += f'seg_ignore_label={self.seg_ignore_label}, ' repr_str += f'prob={self.prob})' return repr_str @TRANSFORMS.register_module() class MinIoURandomCrop(BaseTransform): """Random crop the image & bboxes & masks & segmentation map, the cropped patches have minimum IoU requirement with original image & bboxes & masks. & segmentation map, the IoU threshold is randomly selected from min_ious. Required Keys: - img - img_shape - gt_bboxes (BaseBoxes[torch.float32]) (optional) - gt_bboxes_labels (np.int64) (optional) - gt_masks (BitmapMasks | PolygonMasks) (optional) - gt_ignore_flags (bool) (optional) - gt_seg_map (np.uint8) (optional) Modified Keys: - img - img_shape - gt_bboxes - gt_bboxes_labels - gt_masks - gt_ignore_flags - gt_seg_map Args: min_ious (Sequence[float]): minimum IoU threshold for all intersections with bounding boxes. min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w, where a >= min_crop_size). bbox_clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, min_ious: Sequence[float] = (0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size: float = 0.3, bbox_clip_border: bool = True) -> None: self.min_ious = min_ious self.sample_mode = (1, *min_ious, 0) self.min_crop_size = min_crop_size self.bbox_clip_border = bbox_clip_border @cache_randomness def _random_mode(self) -> Number: return random.choice(self.sample_mode) @autocast_box_type() def transform(self, results: dict) -> dict: """Transform function to crop images and bounding boxes with minimum IoU constraint. Args: results (dict): Result dict from loading pipeline. Returns: dict: Result dict with images and bounding boxes cropped, \ 'img_shape' key is updated. """ assert 'img' in results, '`img` is not found in results' assert 'gt_bboxes' in results, '`gt_bboxes` is not found in results' img = results['img'] boxes = results['gt_bboxes'] h, w, c = img.shape while True: mode = self._random_mode() self.mode = mode if mode == 1: return results min_iou = self.mode for i in range(50): new_w = random.uniform(self.min_crop_size * w, w) new_h = random.uniform(self.min_crop_size * h, h) # h / w in [0.5, 2] if new_h / new_w < 0.5 or new_h / new_w > 2: continue left = random.uniform(w - new_w) top = random.uniform(h - new_h) patch = np.array( (int(left), int(top), int(left + new_w), int(top + new_h))) # Line or point crop is not allowed if patch[2] == patch[0] or patch[3] == patch[1]: continue overlaps = boxes.overlaps(
HorizontalBoxes(patch.reshape(-1, 4).astype(np.float32)),
2
2023-12-11 15:23:03+00:00
24k
open-mmlab/PIA
animatediff/pipelines/validation_pipeline.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n\n # Additional\n use_motion_module = True,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # Image to Video Conv\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n @property\n def attn_processors(self) -> Dict[str, AttnProcessor]:\n r\"\"\"\n Returns:\n `dict` of attention processors: A dictionary containing all attention processors used in the model with\n indexed by its weight name.\n \"\"\"\n # set recursively\n processors = {}\n\n def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttnProcessor]):\n if hasattr(module, \"set_processor\"):\n processors[f\"{name}.processor\"] = module.processor\n\n for sub_name, child in module.named_children():\n fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n return processors\n\n for name, module in self.named_children():\n fn_recursive_add_processors(name, module, processors)\n\n return processors\n\n def set_attn_processor(self, processor: Union[AttnProcessor, Dict[str, AttnProcessor]]):\n r\"\"\"\n Parameters:\n `processor (`dict` of `AttnProcessor` or `AttnProcessor`):\n The instantiated processor class or a dictionary of processor classes that will be set as the processor\n of **all** `CrossAttention` layers.\n In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainablae attention processors.:\n\n \"\"\"\n count = len(self.attn_processors.keys())\n\n if isinstance(processor, dict) and len(processor) != count:\n raise ValueError(\n f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n )\n\n def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n if hasattr(module, \"set_processor\"):\n if not isinstance(processor, dict):\n print(f'Set {module}')\n module.set_processor(processor)\n else:\n print(f'Set {module}')\n module.set_processor(processor.pop(f\"{name}.processor\"))\n\n for sub_name, child in module.named_children():\n fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n for name, module in self.named_children():\n fn_recursive_attn_processor(name, module, processor)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n mask_sample: torch.FloatTensor,\n masked_sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n image_embeds: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # image to video b c f h w\n sample = torch.cat([sample, mask_sample, masked_sample], dim=1).to(sample.device)\n\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * - 10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # prepare for ip-adapter\n if image_embeds is not None:\n image_embeds = self.encoder_hid_proj(\n image_embeds).to(encoder_hidden_states.dtype)\n encoder_hidden_states = torch.cat(\n [encoder_hidden_states, image_embeds], dim=1)\n\n # pre-process\n # b c f h w\n # 2 4 16 64 64\n sample = self.conv_in(sample)\n # down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n down_block_res_samples += res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n\n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n\n return model" }, { "identifier": "convert_ldm_unet_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False, controlnet=False):\n \"\"\"\n Takes a state dict and a config, and returns a converted checkpoint.\n \"\"\"\n\n # extract state_dict for UNet\n unet_state_dict = {}\n keys = list(checkpoint.keys())\n\n if controlnet:\n unet_key = \"control_model.\"\n else:\n unet_key = \"model.diffusion_model.\"\n\n # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA\n if sum(k.startswith(\"model_ema\") for k in keys) > 100 and extract_ema:\n print(f\"Checkpoint {path} has both EMA and non-EMA weights.\")\n print(\n \"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA\"\n \" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag.\"\n )\n for key in keys:\n if key.startswith(\"model.diffusion_model\"):\n flat_ema_key = \"model_ema.\" + \"\".join(key.split(\".\")[1:])\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(flat_ema_key)\n else:\n if sum(k.startswith(\"model_ema\") for k in keys) > 100:\n print(\n \"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA\"\n \" weights (usually better for inference), please make sure to add the `--extract_ema` flag.\"\n )\n\n for key in keys:\n if key.startswith(unet_key):\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"time_embedding.linear_1.weight\"] = unet_state_dict[\"time_embed.0.weight\"]\n new_checkpoint[\"time_embedding.linear_1.bias\"] = unet_state_dict[\"time_embed.0.bias\"]\n new_checkpoint[\"time_embedding.linear_2.weight\"] = unet_state_dict[\"time_embed.2.weight\"]\n new_checkpoint[\"time_embedding.linear_2.bias\"] = unet_state_dict[\"time_embed.2.bias\"]\n\n if config[\"class_embed_type\"] is None:\n # No parameters to port\n ...\n elif config[\"class_embed_type\"] == \"timestep\" or config[\"class_embed_type\"] == \"projection\":\n new_checkpoint[\"class_embedding.linear_1.weight\"] = unet_state_dict[\"label_emb.0.0.weight\"]\n new_checkpoint[\"class_embedding.linear_1.bias\"] = unet_state_dict[\"label_emb.0.0.bias\"]\n new_checkpoint[\"class_embedding.linear_2.weight\"] = unet_state_dict[\"label_emb.0.2.weight\"]\n new_checkpoint[\"class_embedding.linear_2.bias\"] = unet_state_dict[\"label_emb.0.2.bias\"]\n else:\n raise NotImplementedError(f\"Not implemented `class_embed_type`: {config['class_embed_type']}\")\n\n new_checkpoint[\"conv_in.weight\"] = unet_state_dict[\"input_blocks.0.0.weight\"]\n new_checkpoint[\"conv_in.bias\"] = unet_state_dict[\"input_blocks.0.0.bias\"]\n\n if not controlnet:\n new_checkpoint[\"conv_norm_out.weight\"] = unet_state_dict[\"out.0.weight\"]\n new_checkpoint[\"conv_norm_out.bias\"] = unet_state_dict[\"out.0.bias\"]\n new_checkpoint[\"conv_out.weight\"] = unet_state_dict[\"out.2.weight\"]\n new_checkpoint[\"conv_out.bias\"] = unet_state_dict[\"out.2.bias\"]\n\n # Retrieves the keys for the input blocks only\n num_input_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"input_blocks\" in layer})\n input_blocks = {\n layer_id: [key for key in unet_state_dict if f\"input_blocks.{layer_id}\" in key]\n for layer_id in range(num_input_blocks)\n }\n\n # Retrieves the keys for the middle blocks only\n num_middle_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"middle_block\" in layer})\n middle_blocks = {\n layer_id: [key for key in unet_state_dict if f\"middle_block.{layer_id}\" in key]\n for layer_id in range(num_middle_blocks)\n }\n\n # Retrieves the keys for the output blocks only\n num_output_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"output_blocks\" in layer})\n output_blocks = {\n layer_id: [key for key in unet_state_dict if f\"output_blocks.{layer_id}\" in key]\n for layer_id in range(num_output_blocks)\n }\n\n for i in range(1, num_input_blocks):\n block_id = (i - 1) // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = (i - 1) % (config[\"layers_per_block\"] + 1)\n\n resnets = [\n key for key in input_blocks[i] if f\"input_blocks.{i}.0\" in key and f\"input_blocks.{i}.0.op\" not in key\n ]\n attentions = [key for key in input_blocks[i] if f\"input_blocks.{i}.1\" in key]\n\n if f\"input_blocks.{i}.0.op.weight\" in unet_state_dict:\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.weight\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.weight\"\n )\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.bias\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.bias\"\n )\n\n paths = renew_resnet_paths(resnets)\n meta_path = {\"old\": f\"input_blocks.{i}.0\", \"new\": f\"down_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\"old\": f\"input_blocks.{i}.1\", \"new\": f\"down_blocks.{block_id}.attentions.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n resnet_0 = middle_blocks[0]\n attentions = middle_blocks[1]\n resnet_1 = middle_blocks[2]\n\n resnet_0_paths = renew_resnet_paths(resnet_0)\n assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)\n\n resnet_1_paths = renew_resnet_paths(resnet_1)\n assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)\n\n attentions_paths = renew_attention_paths(attentions)\n meta_path = {\"old\": \"middle_block.1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(\n attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n for i in range(num_output_blocks):\n block_id = i // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = i % (config[\"layers_per_block\"] + 1)\n output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]\n output_block_list = {}\n\n for layer in output_block_layers:\n layer_id, layer_name = layer.split(\".\")[0], shave_segments(layer, 1)\n if layer_id in output_block_list:\n output_block_list[layer_id].append(layer_name)\n else:\n output_block_list[layer_id] = [layer_name]\n\n if len(output_block_list) > 1:\n resnets = [key for key in output_blocks[i] if f\"output_blocks.{i}.0\" in key]\n attentions = [key for key in output_blocks[i] if f\"output_blocks.{i}.1\" in key]\n\n resnet_0_paths = renew_resnet_paths(resnets)\n paths = renew_resnet_paths(resnets)\n\n meta_path = {\"old\": f\"output_blocks.{i}.0\", \"new\": f\"up_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n output_block_list = {k: sorted(v) for k, v in output_block_list.items()}\n if [\"conv.bias\", \"conv.weight\"] in output_block_list.values():\n index = list(output_block_list.values()).index([\"conv.bias\", \"conv.weight\"])\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.weight\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.weight\"\n ]\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.bias\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.bias\"\n ]\n\n # Clear attentions as they have been attributed above.\n if len(attentions) == 2:\n attentions = []\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\n \"old\": f\"output_blocks.{i}.1\",\n \"new\": f\"up_blocks.{block_id}.attentions.{layer_in_block_id}\",\n }\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n else:\n resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)\n for path in resnet_0_paths:\n old_path = \".\".join([\"output_blocks\", str(i), path[\"old\"]])\n new_path = \".\".join([\"up_blocks\", str(block_id), \"resnets\", str(layer_in_block_id), path[\"new\"]])\n\n new_checkpoint[new_path] = unet_state_dict[old_path]\n\n if controlnet:\n # conditioning embedding\n\n orig_index = 0\n\n new_checkpoint[\"controlnet_cond_embedding.conv_in.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_in.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n orig_index += 2\n\n diffusers_index = 0\n\n while diffusers_index < 6:\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n diffusers_index += 1\n orig_index += 2\n\n new_checkpoint[\"controlnet_cond_embedding.conv_out.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_out.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n # down blocks\n for i in range(num_input_blocks):\n new_checkpoint[f\"controlnet_down_blocks.{i}.weight\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.weight\")\n new_checkpoint[f\"controlnet_down_blocks.{i}.bias\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.bias\")\n\n # mid block\n new_checkpoint[\"controlnet_mid_block.weight\"] = unet_state_dict.pop(\"middle_block_out.0.weight\")\n new_checkpoint[\"controlnet_mid_block.bias\"] = unet_state_dict.pop(\"middle_block_out.0.bias\")\n\n return new_checkpoint" }, { "identifier": "convert_ldm_clip_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_clip_checkpoint(checkpoint):\n keys = list(checkpoint.keys())\n\n text_model_dict = {}\n for key in keys:\n if key.startswith(\"cond_stage_model.transformer\"):\n text_model_dict[key[len(\"cond_stage_model.transformer.\") :]] = checkpoint[key]\n\n return text_model_dict" }, { "identifier": "convert_ldm_vae_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_vae_checkpoint(checkpoint, config, only_decoder=False, only_encoder=False):\n # extract state dict for VAE\n vae_state_dict = {}\n vae_key = \"first_stage_model.\"\n keys = list(checkpoint.keys())\n for key in keys:\n if key.startswith(vae_key):\n vae_state_dict[key.replace(vae_key, \"\")] = checkpoint.get(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"encoder.conv_in.weight\"] = vae_state_dict[\"encoder.conv_in.weight\"]\n new_checkpoint[\"encoder.conv_in.bias\"] = vae_state_dict[\"encoder.conv_in.bias\"]\n new_checkpoint[\"encoder.conv_out.weight\"] = vae_state_dict[\"encoder.conv_out.weight\"]\n new_checkpoint[\"encoder.conv_out.bias\"] = vae_state_dict[\"encoder.conv_out.bias\"]\n new_checkpoint[\"encoder.conv_norm_out.weight\"] = vae_state_dict[\"encoder.norm_out.weight\"]\n new_checkpoint[\"encoder.conv_norm_out.bias\"] = vae_state_dict[\"encoder.norm_out.bias\"]\n\n new_checkpoint[\"decoder.conv_in.weight\"] = vae_state_dict[\"decoder.conv_in.weight\"]\n new_checkpoint[\"decoder.conv_in.bias\"] = vae_state_dict[\"decoder.conv_in.bias\"]\n new_checkpoint[\"decoder.conv_out.weight\"] = vae_state_dict[\"decoder.conv_out.weight\"]\n new_checkpoint[\"decoder.conv_out.bias\"] = vae_state_dict[\"decoder.conv_out.bias\"]\n new_checkpoint[\"decoder.conv_norm_out.weight\"] = vae_state_dict[\"decoder.norm_out.weight\"]\n new_checkpoint[\"decoder.conv_norm_out.bias\"] = vae_state_dict[\"decoder.norm_out.bias\"]\n\n new_checkpoint[\"quant_conv.weight\"] = vae_state_dict[\"quant_conv.weight\"]\n new_checkpoint[\"quant_conv.bias\"] = vae_state_dict[\"quant_conv.bias\"]\n new_checkpoint[\"post_quant_conv.weight\"] = vae_state_dict[\"post_quant_conv.weight\"]\n new_checkpoint[\"post_quant_conv.bias\"] = vae_state_dict[\"post_quant_conv.bias\"]\n\n # Retrieves the keys for the encoder down blocks only\n num_down_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"encoder.down\" in layer})\n down_blocks = {\n layer_id: [key for key in vae_state_dict if f\"down.{layer_id}\" in key] for layer_id in range(num_down_blocks)\n }\n\n # Retrieves the keys for the decoder up blocks only\n num_up_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"decoder.up\" in layer})\n up_blocks = {\n layer_id: [key for key in vae_state_dict if f\"up.{layer_id}\" in key] for layer_id in range(num_up_blocks)\n }\n\n for i in range(num_down_blocks):\n resnets = [key for key in down_blocks[i] if f\"down.{i}\" in key and f\"down.{i}.downsample\" not in key]\n\n if f\"encoder.down.{i}.downsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.weight\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.weight\"\n )\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.bias\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.bias\"\n )\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"down.{i}.block\", \"new\": f\"down_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"encoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"encoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"encoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n for i in range(num_up_blocks):\n block_id = num_up_blocks - 1 - i\n resnets = [\n key for key in up_blocks[block_id] if f\"up.{block_id}\" in key and f\"up.{block_id}.upsample\" not in key\n ]\n\n if f\"decoder.up.{block_id}.upsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.weight\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.weight\"\n ]\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.bias\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.bias\"\n ]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"up.{block_id}.block\", \"new\": f\"up_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"decoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"decoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"decoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n if only_decoder:\n new_checkpoint = {k: v for k, v in new_checkpoint.items() if k.startswith('decoder') or k.startswith('post_quant')}\n elif only_encoder:\n new_checkpoint = {k: v for k, v in new_checkpoint.items() if k.startswith('encoder') or k.startswith('quant')}\n\n return new_checkpoint" }, { "identifier": "convert_lora", "path": "animatediff/utils/convert_lora_safetensor_to_diffusers.py", "snippet": "def convert_lora(pipeline, state_dict, LORA_PREFIX_UNET=\"lora_unet\", LORA_PREFIX_TEXT_ENCODER=\"lora_te\", alpha=0.6):\n # load base model\n # pipeline = StableDiffusionPipeline.from_pretrained(base_model_path, torch_dtype=torch.float32)\n\n # load LoRA weight from .safetensors\n # state_dict = load_file(checkpoint_path)\n\n visited = []\n\n # directly update weight in diffusers model\n for key in state_dict:\n # it is suggested to print out the key, it usually will be something like below\n # \"lora_te_text_model_encoder_layers_0_self_attn_k_proj.lora_down.weight\"\n\n # as we have set the alpha beforehand, so just skip\n if \".alpha\" in key or key in visited:\n continue\n\n if \"text\" in key:\n layer_infos = key.split(\".\")[0].split(LORA_PREFIX_TEXT_ENCODER + \"_\")[-1].split(\"_\")\n curr_layer = pipeline.text_encoder\n else:\n layer_infos = key.split(\".\")[0].split(LORA_PREFIX_UNET + \"_\")[-1].split(\"_\")\n curr_layer = pipeline.unet\n\n # find the target layer\n temp_name = layer_infos.pop(0)\n while len(layer_infos) > -1:\n try:\n curr_layer = curr_layer.__getattr__(temp_name)\n if len(layer_infos) > 0:\n temp_name = layer_infos.pop(0)\n elif len(layer_infos) == 0:\n break\n except Exception:\n if len(temp_name) > 0:\n temp_name += \"_\" + layer_infos.pop(0)\n else:\n temp_name = layer_infos.pop(0)\n\n pair_keys = []\n if \"lora_down\" in key:\n pair_keys.append(key.replace(\"lora_down\", \"lora_up\"))\n pair_keys.append(key)\n else:\n pair_keys.append(key)\n pair_keys.append(key.replace(\"lora_up\", \"lora_down\"))\n\n # update weight\n if len(state_dict[pair_keys[0]].shape) == 4:\n weight_up = state_dict[pair_keys[0]].squeeze(3).squeeze(2).to(torch.float32)\n weight_down = state_dict[pair_keys[1]].squeeze(3).squeeze(2).to(torch.float32)\n curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).unsqueeze(2).unsqueeze(3).to(curr_layer.weight.data.device)\n else:\n weight_up = state_dict[pair_keys[0]].to(torch.float32)\n weight_down = state_dict[pair_keys[1]].to(torch.float32)\n curr_layer.weight.data += alpha * torch.mm(weight_up, weight_down).to(curr_layer.weight.data.device)\n\n # update visited list\n for item in pair_keys:\n visited.append(item)\n\n return pipeline" }, { "identifier": "prepare_mask_coef", "path": "animatediff/utils/util.py", "snippet": "def prepare_mask_coef(video_length:int, cond_frame:int, sim_range:list=[0.2, 1.0]):\n\n assert len(sim_range) == 2, \\\n 'sim_range should has the length of 2, including the min and max similarity'\n\n assert video_length > 1, \\\n 'video_length should be greater than 1'\n\n assert video_length > cond_frame,\\\n 'video_length should be greater than cond_frame'\n\n diff = abs(sim_range[0] - sim_range[1]) / (video_length - 1)\n coef = [1.0] * video_length\n for f in range(video_length):\n f_diff = diff * abs(cond_frame - f)\n f_diff = 1 - f_diff\n coef[f] *= f_diff\n\n return coef" }, { "identifier": "save_videos_grid", "path": "animatediff/utils/util.py", "snippet": "def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):\n videos = rearrange(videos, \"b c t h w -> t b c h w\")\n outputs = []\n for x in videos:\n x = torchvision.utils.make_grid(x, nrow=n_rows)\n x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)\n if rescale:\n x = (x + 1.0) / 2.0 # -1,1 -> 0,1\n x = torch.clamp((x * 255), 0, 255).numpy().astype(np.uint8)\n outputs.append(x)\n\n os.makedirs(os.path.dirname(path), exist_ok=True)\n imageio.mimsave(path, outputs, fps=fps)" }, { "identifier": "InflatedConv3d", "path": "animatediff/models/resnet.py", "snippet": "class InflatedConv3d(nn.Conv2d):\n def forward(self, x):\n video_length = x.shape[2]\n\n x = rearrange(x, \"b c f h w -> (b f) c h w\")\n x = super().forward(x)\n x = rearrange(x, \"(b f) c h w -> b c f h w\", f=video_length)\n\n return x" } ]
import inspect import random import argparse import numpy as np import torch import os from typing import Callable, List, Optional, Union from dataclasses import dataclass from tqdm import tqdm from omegaconf import OmegaConf from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from safetensors import safe_open from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipelines import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from animatediff.models.unet import UNet3DConditionModel from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint from animatediff.utils.convert_lora_safetensor_to_diffusers import convert_lora from animatediff.utils.util import prepare_mask_coef, save_videos_grid from animatediff.models.resnet import InflatedConv3d from PIL import Image from accelerate import cpu_offload
14,614
raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): shape = shape # shape = (1,) + shape[1:] latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], use_image: bool, video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, **kwargs, ): # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype if use_image != False: shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) image = Image.open(f'test_image/init_image{use_image}.png').convert('RGB') image = preprocess_image(image).to(device) if isinstance(generator, list): image_latent = [ self.vae.encode(image[k : k + 1]).latent_dist.sample(generator[k]) for k in range(batch_size) ] image_latent = torch.cat(image_latent, dim=0).to(device=device) else: image_latent = self.vae.encode(image).latent_dist.sample(generator).to(device=device) image_latent = torch.nn.functional.interpolate(image_latent, size=[shape[-2], shape[-1]]) image_latent_padding = image_latent.clone() * 0.18215 mask = torch.zeros((shape[0], 1, shape[2], shape[3], shape[4])).to(device)
# Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py PIL_INTERPOLATION = { "linear": Image.Resampling.BILINEAR, "bilinear": Image.Resampling.BILINEAR, "bicubic": Image.Resampling.BICUBIC, "lanczos": Image.Resampling.LANCZOS, "nearest": Image.Resampling.NEAREST, } def preprocess_image(image): if isinstance(image, torch.Tensor): return image elif isinstance(image, Image.Image): image = [image] if isinstance(image[0], Image.Image): w, h = image[0].size w, h = map(lambda x: x - x % 8, (w, h)) # resize to integer multiple of 8 image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image] image = np.concatenate(image, axis=0) if len(image.shape) == 3: image = image.reshape(image.shape[0], image.shape[1], image.shape[2], 1) image = np.array(image).astype(np.float32) / 255.0 image = image.transpose(0, 3, 1, 2) image = 2.0 * image - 1.0 image = torch.from_numpy(image) elif isinstance(image[0], torch.Tensor): image = torch.cat(image, dim=0) return image logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class ValidationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0])): video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None): shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): shape = shape # shape = (1,) + shape[1:] latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents @torch.no_grad() def __call__( self, prompt: Union[str, List[str]], use_image: bool, video_length: Optional[int], height: Optional[int] = None, width: Optional[int] = None, num_inference_steps: int = 50, guidance_scale: float = 7.5, negative_prompt: Optional[Union[str, List[str]]] = None, num_videos_per_prompt: Optional[int] = 1, eta: float = 0.0, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, latents: Optional[torch.FloatTensor] = None, output_type: Optional[str] = "tensor", return_dict: bool = True, callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None, callback_steps: Optional[int] = 1, **kwargs, ): # Default height and width to unet height = height or self.unet.config.sample_size * self.vae_scale_factor width = width or self.unet.config.sample_size * self.vae_scale_factor # Check inputs. Raise error if not correct self.check_inputs(prompt, height, width, callback_steps) # Define call parameters # batch_size = 1 if isinstance(prompt, str) else len(prompt) batch_size = 1 if latents is not None: batch_size = latents.shape[0] if isinstance(prompt, list): batch_size = len(prompt) device = self._execution_device # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` # corresponds to doing no classifier free guidance. do_classifier_free_guidance = guidance_scale > 1.0 # Encode input prompt prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size if negative_prompt is not None: negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size text_embeddings = self._encode_prompt( prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt ) # Prepare timesteps self.scheduler.set_timesteps(num_inference_steps, device=device) timesteps = self.scheduler.timesteps # Prepare latent variables num_channels_latents = self.unet.in_channels latents = self.prepare_latents( batch_size * num_videos_per_prompt, num_channels_latents, video_length, height, width, text_embeddings.dtype, device, generator, latents, ) latents_dtype = latents.dtype if use_image != False: shape = (batch_size, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor) image = Image.open(f'test_image/init_image{use_image}.png').convert('RGB') image = preprocess_image(image).to(device) if isinstance(generator, list): image_latent = [ self.vae.encode(image[k : k + 1]).latent_dist.sample(generator[k]) for k in range(batch_size) ] image_latent = torch.cat(image_latent, dim=0).to(device=device) else: image_latent = self.vae.encode(image).latent_dist.sample(generator).to(device=device) image_latent = torch.nn.functional.interpolate(image_latent, size=[shape[-2], shape[-1]]) image_latent_padding = image_latent.clone() * 0.18215 mask = torch.zeros((shape[0], 1, shape[2], shape[3], shape[4])).to(device)
mask_coef = prepare_mask_coef(video_length, 0, kwargs['mask_sim_range'])
5
2023-12-21 03:29:34+00:00
24k
chinhsuanwu/ifusion
model/zero123.py
[ { "identifier": "inject_trainable_lora_extended", "path": "ldm/lora.py", "snippet": "def inject_trainable_lora_extended(\n model: nn.Module,\n target_replace_module: Set[str] = UNET_EXTENDED_TARGET_REPLACE,\n r: int = 4,\n loras=None, # path to lora .pt\n eval=True,\n):\n \"\"\"\n inject lora into model, and returns lora parameter groups.\n \"\"\"\n\n require_grad_params = []\n names = []\n\n if loras != None:\n loras = torch.load(loras, map_location=model.device)\n\n for _module, name, _child_module in _find_modules(\n model, target_replace_module, search_class=[nn.Linear, nn.Conv2d]\n ):\n if _child_module.__class__ == nn.Linear:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedLinear(\n _child_module.in_features,\n _child_module.out_features,\n _child_module.bias is not None,\n r=r,\n )\n _tmp.linear.weight = weight\n if bias is not None:\n _tmp.linear.bias = bias\n elif _child_module.__class__ == nn.Conv2d:\n weight = _child_module.weight\n bias = _child_module.bias\n _tmp = LoraInjectedConv2d(\n _child_module.in_channels,\n _child_module.out_channels,\n _child_module.kernel_size,\n _child_module.stride,\n _child_module.padding,\n _child_module.dilation,\n _child_module.groups,\n _child_module.bias is not None,\n r=r,\n )\n\n _tmp.conv.weight = weight\n if bias is not None:\n _tmp.conv.bias = bias\n\n # switch the module\n _tmp.to(_child_module.weight.device).to(_child_module.weight.dtype)\n if bias is not None:\n _tmp.to(_child_module.bias.device).to(_child_module.bias.dtype)\n\n _module._modules[name] = _tmp\n\n require_grad_params.append(_module._modules[name].lora_up.parameters())\n require_grad_params.append(_module._modules[name].lora_down.parameters())\n\n if loras != None:\n _module._modules[name].lora_up.weight = nn.Parameter(loras.pop(0).to(model.dtype))\n _module._modules[name].lora_down.weight = nn.Parameter(loras.pop(0).to(model.dtype))\n\n _module._modules[name].lora_up.weight.requires_grad = True if not eval else False\n _module._modules[name].lora_down.weight.requires_grad = True if not eval else False\n names.append(name)\n\n return require_grad_params, names" }, { "identifier": "monkeypatch_remove_lora", "path": "ldm/lora.py", "snippet": "def monkeypatch_remove_lora(model):\n for _module, name, _child_module in _find_modules(\n model, search_class=[LoraInjectedLinear, LoraInjectedConv2d]\n ):\n if isinstance(_child_module, LoraInjectedLinear):\n _source = _child_module.linear\n weight, bias = _source.weight, _source.bias\n\n _tmp = nn.Linear(\n _source.in_features, _source.out_features, bias is not None\n )\n\n _tmp.weight = weight\n if bias is not None:\n _tmp.bias = bias\n\n else:\n _source = _child_module.conv\n weight, bias = _source.weight, _source.bias\n\n _tmp = nn.Conv2d(\n in_channels=_source.in_channels,\n out_channels=_source.out_channels,\n kernel_size=_source.kernel_size,\n stride=_source.stride,\n padding=_source.padding,\n dilation=_source.dilation,\n groups=_source.groups,\n bias=bias is not None,\n )\n\n _tmp.weight = weight\n if bias is not None:\n _tmp.bias = bias\n\n _module._modules[name] = _tmp" }, { "identifier": "save_lora_weight", "path": "ldm/lora.py", "snippet": "def save_lora_weight(\n model,\n path=\"./lora.pt\",\n target_replace_module=DEFAULT_TARGET_REPLACE,\n):\n weights = []\n for _up, _down in extract_lora_ups_down(\n model, target_replace_module=target_replace_module\n ):\n weights.append(_up.weight.to(\"cpu\").to(torch.float16))\n weights.append(_down.weight.to(\"cpu\").to(torch.float16))\n\n torch.save(weights, path)" }, { "identifier": "LatentDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(\n self,\n first_stage_config,\n cond_stage_config,\n num_timesteps_cond=None,\n cond_stage_key=\"image_cond\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n unet_trainable=True,\n *args,\n **kwargs,\n ):\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs[\"timesteps\"]\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = \"concat\" if concat_mode else \"crossattn\"\n if cond_stage_config == \"__is_unconditional__\":\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.unet_trainable = unet_trainable\n self.cond_stage_key = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer(\"scale_factor\", torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n\n # construct linear projection layer for concatenating image CLIP embedding and RT\n self.cc_projection = nn.Linear(772, 768)\n nn.init.eye_(list(self.cc_projection.parameters())[0][:768, :768])\n nn.init.zeros_(list(self.cc_projection.parameters())[1])\n self.cc_projection.requires_grad_(True)\n\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n\n def make_cond_schedule(\n self,\n ):\n self.cond_ids = torch.full(\n size=(self.num_timesteps,),\n fill_value=self.num_timesteps - 1,\n dtype=torch.long,\n )\n ids = torch.round(\n torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)\n ).long()\n self.cond_ids[: self.num_timesteps_cond] = ids\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n # only for very first batch\n if (\n self.scale_by_std\n and self.current_epoch == 0\n and self.global_step == 0\n and batch_idx == 0\n and not self.restarted_from_ckpt\n ):\n assert (\n self.scale_factor == 1.0\n ), \"rather not use custom rescaling and std-rescaling simultaneously\"\n # set rescale weight to 1./std of encodings\n print(\"### USING STD-RESCALING ###\")\n x = super().get_input(batch, self.first_stage_key)\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n del self.scale_factor\n self.register_buffer(\"scale_factor\", 1.0 / z.flatten().std())\n print(f\"setting self.scale_factor to {self.scale_factor}\")\n print(\"### USING STD-RESCALING ###\")\n\n def register_schedule(\n self,\n given_betas=None,\n beta_schedule=\"linear\",\n timesteps=1000,\n linear_start=1e-4,\n linear_end=2e-2,\n cosine_s=8e-3,\n ):\n super().register_schedule(\n given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s\n )\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != \"__is_first_stage__\"\n assert config != \"__is_unconditional__\"\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n\n def _get_denoise_row_from_list(\n self, samples, desc=\"\", force_no_decoder_quantization=False\n ):\n denoise_row = []\n for zd in tqdm(samples, desc=desc):\n denoise_row.append(\n self.decode_first_stage(\n zd.to(self.device), force_not_quantize=force_no_decoder_quantization\n )\n )\n n_imgs_per_row = len(denoise_row)\n denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W\n denoise_grid = rearrange(denoise_row, \"n b c h w -> b n c h w\")\n denoise_grid = rearrange(denoise_grid, \"b n c h w -> (b n) c h w\")\n denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)\n return denoise_grid\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(\n f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\"\n )\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, \"encode\") and callable(\n self.cond_stage_model.encode\n ):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(\n torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1\n )[0]\n return edge_dist\n\n def get_weighting(self, h, w, Ly, Lx, device):\n weighting = self.delta_border(h, w)\n weighting = torch.clip(\n weighting,\n self.split_input_params[\"clip_min_weight\"],\n self.split_input_params[\"clip_max_weight\"],\n )\n weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)\n\n if self.split_input_params[\"tie_braker\"]:\n L_weighting = self.delta_border(Ly, Lx)\n L_weighting = torch.clip(\n L_weighting,\n self.split_input_params[\"clip_min_tie_weight\"],\n self.split_input_params[\"clip_max_tie_weight\"],\n )\n\n L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)\n weighting = weighting * L_weighting\n return weighting\n\n def get_fold_unfold(\n self, x, kernel_size, stride, uf=1, df=1\n ): # todo load once not every time, shorten code\n \"\"\"\n :param x: img of size (bs, c, h, w)\n :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])\n \"\"\"\n bs, nc, h, w = x.shape\n\n # number of crops in image\n Ly = (h - kernel_size[0]) // stride[0] + 1\n Lx = (w - kernel_size[1]) // stride[1] + 1\n\n if uf == 1 and df == 1:\n fold_params = dict(\n kernel_size=kernel_size, dilation=1, padding=0, stride=stride\n )\n unfold = torch.nn.Unfold(**fold_params)\n\n fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)\n\n weighting = self.get_weighting(\n kernel_size[0], kernel_size[1], Ly, Lx, x.device\n ).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))\n\n elif uf > 1 and df == 1:\n fold_params = dict(\n kernel_size=kernel_size, dilation=1, padding=0, stride=stride\n )\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(\n kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),\n dilation=1,\n padding=0,\n stride=(stride[0] * uf, stride[1] * uf),\n )\n fold = torch.nn.Fold(\n output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2\n )\n\n weighting = self.get_weighting(\n kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device\n ).to(x.dtype)\n normalization = fold(weighting).view(\n 1, 1, h * uf, w * uf\n ) # normalizes the overlap\n weighting = weighting.view(\n (1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)\n )\n\n elif df > 1 and uf == 1:\n fold_params = dict(\n kernel_size=kernel_size, dilation=1, padding=0, stride=stride\n )\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(\n kernel_size=(kernel_size[0] // df, kernel_size[0] // df),\n dilation=1,\n padding=0,\n stride=(stride[0] // df, stride[1] // df),\n )\n fold = torch.nn.Fold(\n output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2\n )\n\n weighting = self.get_weighting(\n kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device\n ).to(x.dtype)\n normalization = fold(weighting).view(\n 1, 1, h // df, w // df\n ) # normalizes the overlap\n weighting = weighting.view(\n (1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)\n )\n\n else:\n raise NotImplementedError\n\n return fold, unfold, normalization, weighting\n\n @torch.no_grad()\n def get_input(\n self,\n batch,\n k,\n return_first_stage_outputs=False,\n force_c_encode=False,\n cond_key=None,\n return_original_cond=False,\n bs=None,\n uncond=0.05,\n ):\n x = super().get_input(batch, k)\n T = batch[\"T\"].to(memory_format=torch.contiguous_format).float()\n\n if bs is not None:\n x = x[:bs]\n T = T[:bs].to(self.device)\n\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n cond_key = cond_key or self.cond_stage_key\n xc = super().get_input(batch, cond_key).to(self.device)\n if bs is not None:\n xc = xc[:bs]\n cond = {}\n\n # To support classifier-free guidance, randomly drop out only text conditioning 5%, only image conditioning 5%, and both 5%.\n random = torch.rand(x.size(0), device=x.device)\n prompt_mask = rearrange(random < 2 * uncond, \"n -> n 1 1\")\n input_mask = 1 - rearrange(\n (random >= uncond).float() * (random < 3 * uncond).float(), \"n -> n 1 1 1\"\n )\n null_prompt = self.get_learned_conditioning([\"\"])\n\n # z.shape: [8, 4, 64, 64]; c.shape: [8, 1, 768]\n # print('=========== xc shape ===========', xc.shape)\n with torch.enable_grad():\n clip_emb = self.get_learned_conditioning(xc).detach()\n null_prompt = self.get_learned_conditioning([\"\"]).detach()\n cond[\"c_crossattn\"] = [\n self.cc_projection(\n torch.cat(\n [\n torch.where(prompt_mask, null_prompt, clip_emb),\n T[:, None, :],\n ],\n dim=-1,\n )\n )\n ]\n cond[\"c_concat\"] = [\n input_mask * self.encode_first_stage((xc.to(self.device))).mode().detach()\n ]\n out = [z, cond]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_original_cond:\n out.append(xc)\n return out\n\n # @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, \"b h w c -> b c h w\").contiguous()\n\n z = 1.0 / self.scale_factor * z\n\n if hasattr(self, \"split_input_params\"):\n if self.split_input_params[\"patch_distributed_vq\"]:\n ks = self.split_input_params[\"ks\"] # eg. (128, 128)\n stride = self.split_input_params[\"stride\"] # eg. (64, 64)\n uf = self.split_input_params[\"vqf\"]\n bs, nc, h, w = z.shape\n if ks[0] > h or ks[1] > w:\n ks = (min(ks[0], h), min(ks[1], w))\n print(\"reducing Kernel\")\n\n if stride[0] > h or stride[1] > w:\n stride = (min(stride[0], h), min(stride[1], w))\n print(\"reducing stride\")\n\n fold, unfold, normalization, weighting = self.get_fold_unfold(\n z, ks, stride, uf=uf\n )\n\n z = unfold(z) # (bn, nc * prod(**ks), L)\n # 1. Reshape to img shape\n z = z.view(\n (z.shape[0], -1, ks[0], ks[1], z.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n\n # 2. apply model loop over last dim\n if isinstance(self.first_stage_model, VQModelInterface):\n output_list = [\n self.first_stage_model.decode(\n z[:, :, :, :, i],\n force_not_quantize=predict_cids or force_not_quantize,\n )\n for i in range(z.shape[-1])\n ]\n else:\n output_list = [\n self.first_stage_model.decode(z[:, :, :, :, i])\n for i in range(z.shape[-1])\n ]\n\n o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L)\n o = o * weighting\n # Reverse 1. reshape to img shape\n o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)\n # stitch crops together\n decoded = fold(o)\n decoded = decoded / normalization # norm is shape (1, 1, h, w)\n return decoded\n else:\n if isinstance(self.first_stage_model, VQModelInterface):\n return self.first_stage_model.decode(\n z, force_not_quantize=predict_cids or force_not_quantize\n )\n else:\n return self.first_stage_model.decode(z)\n\n else:\n if isinstance(self.first_stage_model, VQModelInterface):\n return self.first_stage_model.decode(\n z, force_not_quantize=predict_cids or force_not_quantize\n )\n else:\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n if hasattr(self, \"split_input_params\"):\n if self.split_input_params[\"patch_distributed_vq\"]:\n ks = self.split_input_params[\"ks\"] # eg. (128, 128)\n stride = self.split_input_params[\"stride\"] # eg. (64, 64)\n df = self.split_input_params[\"vqf\"]\n self.split_input_params[\"original_image_size\"] = x.shape[-2:]\n bs, nc, h, w = x.shape\n if ks[0] > h or ks[1] > w:\n ks = (min(ks[0], h), min(ks[1], w))\n print(\"reducing Kernel\")\n\n if stride[0] > h or stride[1] > w:\n stride = (min(stride[0], h), min(stride[1], w))\n print(\"reducing stride\")\n\n fold, unfold, normalization, weighting = self.get_fold_unfold(\n x, ks, stride, df=df\n )\n z = unfold(x) # (bn, nc * prod(**ks), L)\n # Reshape to img shape\n z = z.view(\n (z.shape[0], -1, ks[0], ks[1], z.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n\n output_list = [\n self.first_stage_model.encode(z[:, :, :, :, i])\n for i in range(z.shape[-1])\n ]\n\n o = torch.stack(output_list, axis=-1)\n o = o * weighting\n\n # Reverse reshape to img shape\n o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)\n # stitch crops together\n decoded = fold(o)\n decoded = decoded / normalization\n return decoded\n\n else:\n return self.first_stage_model.encode(x)\n else:\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, step_ratio=None, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c, step_ratio=step_ratio)\n return loss\n\n def forward(self, x, c, step_ratio=None, *args, **kwargs):\n if step_ratio is not None:\n t = np.round((1 - step_ratio) * self.num_timesteps).clip(0, self.num_timesteps - 1)\n t = torch.full((x.shape[0],), t, dtype=torch.long, device=self.device)\n else:\n t = torch.randint(\n 0, self.num_timesteps, (x.shape[0],), device=self.device\n ).long()\n if self.model.conditioning_key is not None:\n assert c is not None\n # if self.cond_stage_trainable:\n # c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset\n def rescale_bbox(bbox):\n x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])\n y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])\n w = min(bbox[2] / crop_coordinates[2], 1 - x0)\n h = min(bbox[3] / crop_coordinates[3], 1 - y0)\n return x0, y0, w, h\n\n return [rescale_bbox(b) for b in bboxes]\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n if isinstance(cond, dict):\n # hybrid case, cond is exptected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n key = (\n \"c_concat\" if self.model.conditioning_key == \"concat\" else \"c_crossattn\"\n )\n cond = {key: cond}\n\n if hasattr(self, \"split_input_params\"):\n assert len(cond) == 1 # todo can only deal with one conditioning atm\n assert not return_ids\n ks = self.split_input_params[\"ks\"] # eg. (128, 128)\n stride = self.split_input_params[\"stride\"] # eg. (64, 64)\n\n h, w = x_noisy.shape[-2:]\n\n fold, unfold, normalization, weighting = self.get_fold_unfold(\n x_noisy, ks, stride\n )\n\n z = unfold(x_noisy) # (bn, nc * prod(**ks), L)\n # Reshape to img shape\n z = z.view(\n (z.shape[0], -1, ks[0], ks[1], z.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])]\n\n if (\n self.cond_stage_key in [\"image\", \"LR_image\", \"segmentation\", \"bbox_img\"]\n and self.model.conditioning_key\n ): # todo check for completeness\n c_key = next(iter(cond.keys())) # get key\n c = next(iter(cond.values())) # get value\n assert len(c) == 1 # todo extend to list with more than one elem\n c = c[0] # get element\n\n c = unfold(c)\n c = c.view(\n (c.shape[0], -1, ks[0], ks[1], c.shape[-1])\n ) # (bn, nc, ks[0], ks[1], L )\n\n cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]\n\n elif self.cond_stage_key == \"coordinates_bbox\":\n assert (\n \"original_image_size\" in self.split_input_params\n ), \"BoudingBoxRescaling is missing original_image_size\"\n\n # assuming padding of unfold is always 0 and its dilation is always 1\n n_patches_per_row = int((w - ks[0]) / stride[0] + 1)\n full_img_h, full_img_w = self.split_input_params[\"original_image_size\"]\n # as we are operating on latents, we need the factor from the original image size to the\n # spatial latent size to properly rescale the crops for regenerating the bbox annotations\n num_downs = self.first_stage_model.encoder.num_resolutions - 1\n rescale_latent = 2 ** (num_downs)\n\n # get top left postions of patches as conforming for the bbbox tokenizer, therefore we\n # need to rescale the tl patch coordinates to be in between (0,1)\n tl_patch_coordinates = [\n (\n rescale_latent\n * stride[0]\n * (patch_nr % n_patches_per_row)\n / full_img_w,\n rescale_latent\n * stride[1]\n * (patch_nr // n_patches_per_row)\n / full_img_h,\n )\n for patch_nr in range(z.shape[-1])\n ]\n\n # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w)\n patch_limits = [\n (\n x_tl,\n y_tl,\n rescale_latent * ks[0] / full_img_w,\n rescale_latent * ks[1] / full_img_h,\n )\n for x_tl, y_tl in tl_patch_coordinates\n ]\n # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates]\n\n # tokenize crop coordinates for the bounding boxes of the respective patches\n patch_limits_tknzd = [\n torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(\n self.device\n )\n for bbox in patch_limits\n ] # list of length l with tensors of shape (1, 2)\n # cut tknzd crop position from conditioning\n assert isinstance(cond, dict), \"cond must be dict to be fed into model\"\n cut_cond = cond[\"c_crossattn\"][0][..., :-2].to(self.device)\n\n adapted_cond = torch.stack(\n [torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]\n )\n adapted_cond = rearrange(adapted_cond, \"l b n -> (l b) n\")\n adapted_cond = self.get_learned_conditioning(adapted_cond)\n adapted_cond = rearrange(\n adapted_cond, \"(l b) n d -> l b n d\", l=z.shape[-1]\n )\n\n cond_list = [{\"c_crossattn\": [e]} for e in adapted_cond]\n\n else:\n cond_list = [\n cond for i in range(z.shape[-1])\n ] # Todo make this more efficient\n\n # apply model by loop over crops\n output_list = [\n self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])\n ]\n assert not isinstance(\n output_list[0], tuple\n ) # todo cant deal with multiple model outputs check this never happens\n\n o = torch.stack(output_list, axis=-1)\n o = o * weighting\n # Reverse reshape to img shape\n o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L)\n # stitch crops together\n x_recon = fold(o) / normalization\n\n else:\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (\n extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t\n - pred_xstart\n ) / extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(\n mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0\n )\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.apply_model(x_noisy, t, cond)\n\n loss_dict = {}\n prefix = \"train\" if self.training else \"val\"\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n else:\n raise NotImplementedError()\n\n loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])\n loss_dict.update({f\"{prefix}/loss_simple\": loss_simple.mean()})\n\n if self.logvar.device != self.device:\n self.logvar = self.logvar.to(self.device)\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f\"{prefix}/loss_gamma\": loss.mean()})\n loss_dict.update({\"logvar\": self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f\"{prefix}/loss_vlb\": loss_vlb})\n loss += self.original_elbo_weight * loss_vlb\n loss_dict.update({f\"{prefix}/loss\": loss})\n\n return loss, loss_dict\n\n def p_mean_variance(\n self,\n x,\n c,\n t,\n clip_denoised: bool,\n return_codebook_ids=False,\n quantize_denoised=False,\n return_x0=False,\n score_corrector=None,\n corrector_kwargs=None,\n ):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(\n self, model_out, x, t, c, **corrector_kwargs\n )\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1.0, 1.0)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(\n x_start=x_recon, x_t=x, t=t\n )\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(\n self,\n x,\n c,\n t,\n clip_denoised=False,\n repeat_noise=False,\n return_codebook_ids=False,\n quantize_denoised=False,\n return_x0=False,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n ):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(\n x=x,\n c=c,\n t=t,\n clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n )\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.0:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (\n 0.5 * model_log_variance\n ).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return (\n model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise,\n x0,\n )\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(\n self,\n cond,\n shape,\n verbose=True,\n callback=None,\n quantize_denoised=False,\n img_callback=None,\n mask=None,\n x0=None,\n temperature=1.0,\n noise_dropout=0.0,\n score_corrector=None,\n corrector_kwargs=None,\n batch_size=None,\n x_T=None,\n start_T=None,\n log_every_t=None,\n ):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {\n key: cond[key][:batch_size]\n if not isinstance(cond[key], list)\n else list(map(lambda x: x[:batch_size], cond[key]))\n for key in cond\n }\n else:\n cond = (\n [c[:batch_size] for c in cond]\n if isinstance(cond, list)\n else cond[:batch_size]\n )\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = (\n tqdm(\n reversed(range(0, timesteps)),\n desc=\"Progressive Generation\",\n total=timesteps,\n )\n if verbose\n else reversed(range(0, timesteps))\n )\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != \"hybrid\"\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(\n img,\n cond,\n ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised,\n return_x0=True,\n temperature=temperature[i],\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n )\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1.0 - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback:\n callback(i)\n if img_callback:\n img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(\n self,\n cond,\n shape,\n return_intermediates=False,\n x_T=None,\n verbose=True,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n start_T=None,\n log_every_t=None,\n ):\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = (\n tqdm(reversed(range(0, timesteps)), desc=\"Sampling t\", total=timesteps)\n if verbose\n else reversed(range(0, timesteps))\n )\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != \"hybrid\"\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(\n img,\n cond,\n ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised,\n )\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1.0 - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback:\n callback(i)\n if img_callback:\n img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(\n self,\n cond,\n batch_size=16,\n return_intermediates=False,\n x_T=None,\n verbose=True,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n shape=None,\n **kwargs,\n ):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {\n key: cond[key][:batch_size]\n if not isinstance(cond[key], list)\n else list(map(lambda x: x[:batch_size], cond[key]))\n for key in cond\n }\n else:\n cond = (\n [c[:batch_size] for c in cond]\n if isinstance(cond, list)\n else cond[:batch_size]\n )\n return self.p_sample_loop(\n cond,\n shape,\n return_intermediates=return_intermediates,\n x_T=x_T,\n verbose=verbose,\n timesteps=timesteps,\n quantize_denoised=quantize_denoised,\n mask=mask,\n x0=x0,\n )\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):\n if ddim:\n ddim_sampler = DDIMSampler(self)\n shape = (self.channels, self.image_size, self.image_size)\n samples, intermediates = ddim_sampler.sample(\n ddim_steps, batch_size, shape, cond, verbose=False, **kwargs\n )\n\n else:\n samples, intermediates = self.sample(\n cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs\n )\n\n return samples, intermediates\n\n @torch.no_grad()\n def get_unconditional_conditioning(\n self, batch_size, null_label=None, image_size=512\n ):\n if null_label is not None:\n xc = null_label\n if isinstance(xc, ListConfig):\n xc = list(xc)\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n if hasattr(xc, \"to\"):\n xc = xc.to(self.device)\n c = self.get_learned_conditioning(xc)\n else:\n # todo: get null label from cond_stage_model\n raise NotImplementedError()\n c = repeat(c, \"1 ... -> b ...\", b=batch_size).to(self.device)\n cond = {}\n cond[\"c_crossattn\"] = [c]\n cond[\"c_concat\"] = [\n torch.zeros([batch_size, 4, image_size // 8, image_size // 8]).to(\n self.device\n )\n ]\n return cond\n\n @torch.no_grad()\n def log_images(\n self,\n batch,\n N=8,\n n_row=4,\n sample=True,\n ddim_steps=200,\n ddim_eta=1.0,\n return_keys=None,\n quantize_denoised=True,\n inpaint=True,\n plot_denoise_rows=False,\n plot_progressive_rows=True,\n plot_diffusion_rows=True,\n unconditional_guidance_scale=1.0,\n unconditional_guidance_label=None,\n use_ema_scope=True,\n **kwargs,\n ):\n ema_scope = self.ema_scope if use_ema_scope else nullcontext\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, x, xrec, xc = self.get_input(\n batch,\n self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=N,\n )\n N = min(x.shape[0], N)\n n_row = min(x.shape[0], n_row)\n log[\"inputs\"] = x\n log[\"reconstruction\"] = xrec\n if self.model.conditioning_key is not None:\n if hasattr(self.cond_stage_model, \"decode\"):\n xc = self.cond_stage_model.decode(c)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in [\"caption\", \"txt\"]:\n xc = log_txt_as_img(\n (x.shape[2], x.shape[3]),\n batch[self.cond_stage_key],\n size=x.shape[2] // 25,\n )\n log[\"conditioning\"] = xc\n elif self.cond_stage_key == \"class_label\":\n xc = log_txt_as_img(\n (x.shape[2], x.shape[3]),\n batch[\"human_label\"],\n size=x.shape[2] // 25,\n )\n log[\"conditioning\"] = xc\n elif isimage(xc):\n log[\"conditioning\"] = xc\n if ismap(xc):\n log[\"original_conditioning\"] = self.to_rgb(xc)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), \"1 -> b\", b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, \"n b c h w -> b n c h w\")\n diffusion_grid = rearrange(diffusion_grid, \"b n c h w -> (b n) c h w\")\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n with ema_scope(\"Sampling\"):\n samples, z_denoise_row = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n ddim_steps=ddim_steps,\n eta=ddim_eta,\n )\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if (\n quantize_denoised\n and not isinstance(self.first_stage_model, AutoencoderKL)\n and not isinstance(self.first_stage_model, IdentityFirstStage)\n ):\n # also display when quantizing x0 while sampling\n with ema_scope(\"Plotting Quantized Denoised\"):\n samples, z_denoise_row = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n ddim_steps=ddim_steps,\n eta=ddim_eta,\n quantize_denoised=True,\n )\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,\n # quantize_denoised=True)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_x0_quantized\"] = x_samples\n\n if unconditional_guidance_scale > 1.0:\n uc = self.get_unconditional_conditioning(\n N, unconditional_guidance_label, image_size=x.shape[-1]\n )\n # uc = torch.zeros_like(c)\n with ema_scope(\"Sampling with classifier-free guidance\"):\n samples_cfg, _ = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n ddim_steps=ddim_steps,\n eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc,\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[\n f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"\n ] = x_samples_cfg\n\n if inpaint:\n # make a simple center square\n b, h, w = z.shape[0], z.shape[2], z.shape[3]\n mask = torch.ones(N, h, w).to(self.device)\n # zeros will be filled in\n mask[:, h // 4 : 3 * h // 4, w // 4 : 3 * w // 4] = 0.0\n mask = mask[:, None, ...]\n with ema_scope(\"Plotting Inpaint\"):\n samples, _ = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n eta=ddim_eta,\n ddim_steps=ddim_steps,\n x0=z[:N],\n mask=mask,\n )\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_inpainting\"] = x_samples\n log[\"mask\"] = mask\n\n # outpaint\n mask = 1.0 - mask\n with ema_scope(\"Plotting Outpaint\"):\n samples, _ = self.sample_log(\n cond=c,\n batch_size=N,\n ddim=use_ddim,\n eta=ddim_eta,\n ddim_steps=ddim_steps,\n x0=z[:N],\n mask=mask,\n )\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_outpainting\"] = x_samples\n\n if plot_progressive_rows:\n with ema_scope(\"Plotting Progressives\"):\n img, progressives = self.progressive_denoising(\n c,\n shape=(self.channels, self.image_size, self.image_size),\n batch_size=N,\n )\n prog_row = self._get_denoise_row_from_list(\n progressives, desc=\"Progressive Generation\"\n )\n log[\"progressive_row\"] = prog_row\n\n if return_keys:\n if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:\n return log\n else:\n return {key: log[key] for key in return_keys}\n return log\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = []\n if self.unet_trainable == \"attn\":\n print(\"Training only unet attention layers\")\n for n, m in self.model.named_modules():\n if isinstance(m, CrossAttention) and n.endswith(\"attn2\"):\n params.extend(m.parameters())\n if self.unet_trainable == \"conv_in\":\n print(\"Training only unet input conv layers\")\n params = list(self.model.diffusion_model.input_blocks[0][0].parameters())\n elif self.unet_trainable is True or self.unet_trainable == \"all\":\n print(\"Training the full unet\")\n params = list(self.model.parameters())\n else:\n raise ValueError(\n f\"Unrecognised setting for unet_trainable: {self.unet_trainable}\"\n )\n\n if self.cond_stage_trainable:\n print(f\"{self.__class__.__name__}: Also optimizing conditioner params!\")\n params = params + list(self.cond_stage_model.parameters())\n if self.learn_logvar:\n print(\"Diffusion model optimizing logvar\")\n params.append(self.logvar)\n\n if self.cc_projection is not None:\n params = params + list(self.cc_projection.parameters())\n print(\"========== optimizing for cc projection weight ==========\")\n\n opt = torch.optim.AdamW(\n [\n {\"params\": self.model.parameters(), \"lr\": lr},\n {\"params\": self.cc_projection.parameters(), \"lr\": 10.0 * lr},\n ],\n lr=lr,\n )\n if self.use_scheduler:\n assert \"target\" in self.scheduler_config\n scheduler = instantiate_from_config(self.scheduler_config)\n\n print(\"Setting up LambdaLR scheduler...\")\n scheduler = [\n {\n \"scheduler\": LambdaLR(opt, lr_lambda=scheduler.schedule),\n \"interval\": \"step\",\n \"frequency\": 1,\n }\n ]\n return [opt], scheduler\n return opt\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2.0 * (x - x.min()) / (x.max() - x.min()) - 1.0\n return x" }, { "identifier": "load_model_from_config", "path": "ldm/util.py", "snippet": "def load_model_from_config(config, ckpt, device, vram_O=False, verbose=False):\n print(f\"[INFO] Loading model from {ckpt}\")\n pl_sd = torch.load(ckpt, map_location=\"cpu\")\n\n if \"global_step\" in pl_sd and verbose:\n print(f'[INFO] Global Step: {pl_sd[\"global_step\"]}')\n\n sd = pl_sd[\"state_dict\"]\n\n model = instantiate_from_config(config.model)\n m, u = model.load_state_dict(sd, strict=False)\n\n if len(m) > 0 and verbose:\n print(\"[INFO] Missing keys: \\n\", m)\n if len(u) > 0 and verbose:\n print(\"[INFO] Unexpected keys: \\n\", u)\n\n # manually load ema and delete it to save GPU memory\n if model.use_ema:\n if verbose:\n print(\"[INFO] Loading EMA\")\n model.model_ema.copy_to(model.model)\n del model.model_ema\n\n if vram_O:\n # we don't need decoder\n del model.first_stage_model.decoder\n\n torch.cuda.empty_cache()\n model.eval().to(device)\n\n return model" }, { "identifier": "make_T", "path": "util/pose.py", "snippet": "def make_T(theta, azimuth, distance, in_deg=False):\n if in_deg:\n theta, azimuth = theta.deg2rad(), azimuth.deg2rad()\n return torch.stack(\n (\n theta,\n torch.sin(azimuth),\n torch.cos(azimuth),\n distance,\n )\n )" }, { "identifier": "default", "path": "util/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" } ]
import itertools import torch import torch.nn as nn from dataclasses import dataclass from diffusers import DDIMScheduler from einops import rearrange from omegaconf import OmegaConf from ldm.lora import ( inject_trainable_lora_extended, monkeypatch_remove_lora, save_lora_weight, ) from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import load_model_from_config from util.pose import make_T from util.typing import * from util.util import default
15,731
@dataclass class Config: pretrained_model_name_or_path: str = "ldm/ckpt/zero123-xl.ckpt" pretrained_config: str = "ldm/ckpt/sd-objaverse-finetune-c_concat-256.yaml" vram_O: bool = False min_step_percent: float = 0.02 max_step_percent: float = 0.98 config: Config def __init__(self, **kwargs) -> None: super().__init__() self.config = OmegaConf.structured(self.Config(**kwargs)) self.device = "cuda" self.require_grad_params = [] self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32 self.model: LatentDiffusion = load_model_from_config( self.pretrained_config, self.config.pretrained_model_name_or_path, device=self.device, vram_O=self.config.vram_O, ) for p in self.model.parameters(): p.requires_grad_(False) self.num_train_timesteps = self.pretrained_config.model.params.timesteps self.scheduler = DDIMScheduler( self.num_train_timesteps, self.pretrained_config.model.params.linear_start, self.pretrained_config.model.params.linear_end, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1, ) self.num_train_timesteps = self.scheduler.config.num_train_timesteps self.set_min_max_steps( min_step_percent=self.config.min_step_percent, max_step_percent=self.config.max_step_percent, ) print("[INFO] Loaded Zero123") @torch.cuda.amp.autocast(enabled=False) def set_min_max_steps( self, min_step_percent: float = 0.02, max_step_percent: float = 0.98 ): self.min_step = int(self.num_train_timesteps * min_step_percent) self.max_step = int(self.num_train_timesteps * max_step_percent) @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def get_image_embeds( self, image: Float[Tensor, "B 3 256 256"] ) -> Tuple[Float[Tensor, "B 1 768"], Float[Tensor, "B 4 32 32"]]: c_crossattn = self.model.get_learned_conditioning(image.to(self.weights_dtype)) c_concat = self.model.encode_first_stage(image.to(self.weights_dtype)).mode() return c_crossattn, c_concat @torch.cuda.amp.autocast(enabled=False) def encode_image( self, image: Float[Tensor, "B 3 256 256"] ) -> Float[Tensor, "B 4 32 32"]: input_dtype = image.dtype latent = self.model.get_first_stage_encoding( self.model.encode_first_stage(image.to(self.weights_dtype)) ) return latent.to(input_dtype) # [B, 4, 32, 32] Latent space image @torch.cuda.amp.autocast(enabled=False) def decode_latent( self, latent: Float[Tensor, "B 4 H W"], ) -> Float[Tensor, "B 3 512 512"]: input_dtype = latent.dtype image = self.model.decode_first_stage(latent) image = (image * 0.5 + 0.5).clamp(0, 1) return image.to(input_dtype) @staticmethod @torch.no_grad() def make_cond(cond): """Add zeros to the beginning of cond""" return {k: [torch.cat([torch.zeros_like(v), v])] for k, v in cond.items()} @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def clip_camera_projection( self, theta: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], distance: Float[Tensor, "B"], c_crossattn: Float[Tensor, "B 1 768"], in_deg: bool = False, ): T = make_T(theta, azimuth, distance, in_deg=in_deg).T[:, None, :] clip_emb = self.model.cc_projection(torch.cat([c_crossattn, T], dim=-1)) return clip_emb def inject_lora( self, ckpt_fp: str = None, rank: int = 12, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], eval: bool = False, ): print( f"[INFO] Injecting LoRA from " + (str(ckpt_fp) if ckpt_fp is not None else "scratch"), )
class Zero123(nn.Module): @dataclass class Config: pretrained_model_name_or_path: str = "ldm/ckpt/zero123-xl.ckpt" pretrained_config: str = "ldm/ckpt/sd-objaverse-finetune-c_concat-256.yaml" vram_O: bool = False min_step_percent: float = 0.02 max_step_percent: float = 0.98 config: Config def __init__(self, **kwargs) -> None: super().__init__() self.config = OmegaConf.structured(self.Config(**kwargs)) self.device = "cuda" self.require_grad_params = [] self.configure() def configure(self) -> None: print("[INFO] Loading Zero123...") self.pretrained_config = OmegaConf.load(self.config.pretrained_config) self.weights_dtype = torch.float32 self.model: LatentDiffusion = load_model_from_config( self.pretrained_config, self.config.pretrained_model_name_or_path, device=self.device, vram_O=self.config.vram_O, ) for p in self.model.parameters(): p.requires_grad_(False) self.num_train_timesteps = self.pretrained_config.model.params.timesteps self.scheduler = DDIMScheduler( self.num_train_timesteps, self.pretrained_config.model.params.linear_start, self.pretrained_config.model.params.linear_end, beta_schedule="scaled_linear", clip_sample=False, set_alpha_to_one=False, steps_offset=1, ) self.num_train_timesteps = self.scheduler.config.num_train_timesteps self.set_min_max_steps( min_step_percent=self.config.min_step_percent, max_step_percent=self.config.max_step_percent, ) print("[INFO] Loaded Zero123") @torch.cuda.amp.autocast(enabled=False) def set_min_max_steps( self, min_step_percent: float = 0.02, max_step_percent: float = 0.98 ): self.min_step = int(self.num_train_timesteps * min_step_percent) self.max_step = int(self.num_train_timesteps * max_step_percent) @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def get_image_embeds( self, image: Float[Tensor, "B 3 256 256"] ) -> Tuple[Float[Tensor, "B 1 768"], Float[Tensor, "B 4 32 32"]]: c_crossattn = self.model.get_learned_conditioning(image.to(self.weights_dtype)) c_concat = self.model.encode_first_stage(image.to(self.weights_dtype)).mode() return c_crossattn, c_concat @torch.cuda.amp.autocast(enabled=False) def encode_image( self, image: Float[Tensor, "B 3 256 256"] ) -> Float[Tensor, "B 4 32 32"]: input_dtype = image.dtype latent = self.model.get_first_stage_encoding( self.model.encode_first_stage(image.to(self.weights_dtype)) ) return latent.to(input_dtype) # [B, 4, 32, 32] Latent space image @torch.cuda.amp.autocast(enabled=False) def decode_latent( self, latent: Float[Tensor, "B 4 H W"], ) -> Float[Tensor, "B 3 512 512"]: input_dtype = latent.dtype image = self.model.decode_first_stage(latent) image = (image * 0.5 + 0.5).clamp(0, 1) return image.to(input_dtype) @staticmethod @torch.no_grad() def make_cond(cond): """Add zeros to the beginning of cond""" return {k: [torch.cat([torch.zeros_like(v), v])] for k, v in cond.items()} @torch.cuda.amp.autocast(enabled=False) @torch.no_grad() def clip_camera_projection( self, theta: Float[Tensor, "B"], azimuth: Float[Tensor, "B"], distance: Float[Tensor, "B"], c_crossattn: Float[Tensor, "B 1 768"], in_deg: bool = False, ): T = make_T(theta, azimuth, distance, in_deg=in_deg).T[:, None, :] clip_emb = self.model.cc_projection(torch.cat([c_crossattn, T], dim=-1)) return clip_emb def inject_lora( self, ckpt_fp: str = None, rank: int = 12, target_replace_module: List[str] = ["CrossAttention", "GEGLU"], eval: bool = False, ): print( f"[INFO] Injecting LoRA from " + (str(ckpt_fp) if ckpt_fp is not None else "scratch"), )
lora_params, _ = inject_trainable_lora_extended(
0
2023-12-17 12:45:38+00:00
24k
penghao-wu/vstar
VisualSearch/utils/dataset.py
[ { "identifier": "conversation", "path": "VisualSearch/model/llava/conversation.py", "snippet": "class SeparatorStyle(Enum):\nclass Conversation:\n SINGLE = auto()\n TWO = auto()\n MPT = auto()\n PLAIN = auto()\n LLAMA_2 = auto()\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n W, H = image.size\n H, W = longest_edge, shortest_edge\n H, W = shortest_edge, longest_edge\n def get_prompt(self):\n def append_message(self, role, message):\n def get_images(self, return_pil=False):\n def expand2square(pil_img, background_color=(122, 116, 104)):\n def to_gradio_chatbot(self):\n def copy(self):\n def dict(self):" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "VisualSearch/model/llava/constants.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" }, { "identifier": "IGNORE_INDEX", "path": "VisualSearch/model/llava/constants.py", "snippet": "IGNORE_INDEX = -100" }, { "identifier": "IMAGE_TOKEN_INDEX", "path": "VisualSearch/model/llava/constants.py", "snippet": "IMAGE_TOKEN_INDEX = -200" }, { "identifier": "tokenizer_image_token", "path": "VisualSearch/model/llava/mm_utils.py", "snippet": "def tokenizer_image_token(\n prompt, tokenizer, image_token_index=IMAGE_TOKEN_INDEX, return_tensors=None\n):\n prompt_chunks = [tokenizer(chunk).input_ids for chunk in prompt.split(\"<image>\")]\n\n def insert_separator(X, sep):\n return [ele for sublist in zip(X, [sep] * len(X)) for ele in sublist][:-1]\n\n input_ids = []\n offset = 0\n if (\n len(prompt_chunks) > 0\n and len(prompt_chunks[0]) > 0\n and prompt_chunks[0][0] == tokenizer.bos_token_id\n ):\n offset = 1\n input_ids.append(prompt_chunks[0][0])\n\n for x in insert_separator(prompt_chunks, [image_token_index] * (offset + 1)):\n input_ids.extend(x[offset:])\n\n if return_tensors is not None:\n if return_tensors == \"pt\":\n return torch.tensor(input_ids, dtype=torch.long)\n raise ValueError(f\"Unsupported tensor type: {return_tensors}\")\n return input_ids" }, { "identifier": "get_mask_from_json", "path": "VisualSearch/utils/data_processing.py", "snippet": "def get_mask_from_json(json_path, img):\n try:\n with open(json_path, \"r\") as r:\n anno = json.loads(r.read())\n except:\n with open(json_path, \"r\", encoding=\"cp1252\") as r:\n anno = json.loads(r.read())\n\n inform = anno[\"shapes\"]\n comments = anno[\"text\"]\n is_sentence = anno[\"is_sentence\"]\n\n height, width = img.shape[:2]\n\n ### sort polies by area\n area_list = []\n valid_poly_list = []\n for i in inform:\n label_id = i[\"label\"]\n points = i[\"points\"]\n if \"flag\" == label_id.lower(): ## meaningless deprecated annotations\n continue\n\n tmp_mask = np.zeros((height, width), dtype=np.uint8)\n cv2.polylines(tmp_mask, np.array([points], dtype=np.int32), True, 1, 1)\n cv2.fillPoly(tmp_mask, np.array([points], dtype=np.int32), 1)\n tmp_area = tmp_mask.sum()\n\n area_list.append(tmp_area)\n valid_poly_list.append(i)\n\n ### ground-truth mask\n sort_index = np.argsort(area_list)[::-1].astype(np.int32)\n sort_index = list(sort_index)\n sort_inform = []\n for s_idx in sort_index:\n sort_inform.append(valid_poly_list[s_idx])\n\n mask = np.zeros((height, width), dtype=np.uint8)\n for i in sort_inform:\n label_id = i[\"label\"]\n points = i[\"points\"]\n\n if \"ignore\" in label_id.lower():\n label_value = 255 # ignored during evaluation\n else:\n label_value = 1 # target\n\n cv2.polylines(mask, np.array([points], dtype=np.int32), True, label_value, 1)\n cv2.fillPoly(mask, np.array([points], dtype=np.int32), label_value)\n\n return mask, comments, is_sentence" }, { "identifier": "REFER", "path": "VisualSearch/utils/refer.py", "snippet": "class REFER:\n def __init__(self, data_root, dataset=\"refcoco\", splitBy=\"unc\"):\n # provide data_root folder which contains refclef, refcoco, refcoco+ and refcocog\n # also provide dataset name and splitBy information\n # e.g., dataset = 'refcoco', splitBy = 'unc'\n print(\"loading dataset %s into memory...\" % dataset)\n self.ROOT_DIR = osp.abspath(osp.dirname(__file__))\n self.DATA_DIR = osp.join(data_root, dataset)\n if dataset in [\"refcoco\", \"refcoco+\", \"refcocog\"]:\n self.IMAGE_DIR = osp.join(data_root, \"images/mscoco/images/train2014\")\n elif dataset == \"refclef\":\n self.IMAGE_DIR = osp.join(data_root, \"images/saiapr_tc-12\")\n else:\n print(\"No refer dataset is called [%s]\" % dataset)\n sys.exit()\n\n self.dataset = dataset\n\n # load refs from data/dataset/refs(dataset).json\n tic = time.time()\n\n ref_file = osp.join(self.DATA_DIR, \"refs(\" + splitBy + \").p\")\n print(\"ref_file: \", ref_file)\n self.data = {}\n self.data[\"dataset\"] = dataset\n self.data[\"refs\"] = pickle.load(open(ref_file, \"rb\"))\n\n # load annotations from data/dataset/instances.json\n instances_file = osp.join(self.DATA_DIR, \"instances.json\")\n instances = json.load(open(instances_file, \"rb\"))\n self.data[\"images\"] = instances[\"images\"]\n self.data[\"annotations\"] = instances[\"annotations\"]\n self.data[\"categories\"] = instances[\"categories\"]\n\n # create index\n self.createIndex()\n print(\"DONE (t=%.2fs)\" % (time.time() - tic))\n\n def createIndex(self):\n # create sets of mapping\n # 1) Refs: \t \t{ref_id: ref}\n # 2) Anns: \t \t{ann_id: ann}\n # 3) Imgs:\t\t \t{image_id: image}\n # 4) Cats: \t \t{category_id: category_name}\n # 5) Sents: \t{sent_id: sent}\n # 6) imgToRefs: \t{image_id: refs}\n # 7) imgToAnns: \t{image_id: anns}\n # 8) refToAnn: \t{ref_id: ann}\n # 9) annToRef: \t{ann_id: ref}\n # 10) catToRefs: \t{category_id: refs}\n # 11) sentToRef: \t{sent_id: ref}\n # 12) sentToTokens: {sent_id: tokens}\n print(\"creating index...\")\n # fetch info from instances\n Anns, Imgs, Cats, imgToAnns = {}, {}, {}, {}\n for ann in self.data[\"annotations\"]:\n Anns[ann[\"id\"]] = ann\n imgToAnns[ann[\"image_id\"]] = imgToAnns.get(ann[\"image_id\"], []) + [ann]\n for img in self.data[\"images\"]:\n Imgs[img[\"id\"]] = img\n for cat in self.data[\"categories\"]:\n Cats[cat[\"id\"]] = cat[\"name\"]\n\n # fetch info from refs\n Refs, imgToRefs, refToAnn, annToRef, catToRefs = {}, {}, {}, {}, {}\n Sents, sentToRef, sentToTokens = {}, {}, {}\n for ref in self.data[\"refs\"]:\n # ids\n ref_id = ref[\"ref_id\"]\n ann_id = ref[\"ann_id\"]\n category_id = ref[\"category_id\"]\n image_id = ref[\"image_id\"]\n\n # add mapping related to ref\n Refs[ref_id] = ref\n imgToRefs[image_id] = imgToRefs.get(image_id, []) + [ref]\n catToRefs[category_id] = catToRefs.get(category_id, []) + [ref]\n refToAnn[ref_id] = Anns[ann_id]\n annToRef[ann_id] = ref\n\n # add mapping of sent\n for sent in ref[\"sentences\"]:\n Sents[sent[\"sent_id\"]] = sent\n sentToRef[sent[\"sent_id\"]] = ref\n sentToTokens[sent[\"sent_id\"]] = sent[\"tokens\"]\n\n # create class members\n self.Refs = Refs\n self.Anns = Anns\n self.Imgs = Imgs\n self.Cats = Cats\n self.Sents = Sents\n self.imgToRefs = imgToRefs\n self.imgToAnns = imgToAnns\n self.refToAnn = refToAnn\n self.annToRef = annToRef\n self.catToRefs = catToRefs\n self.sentToRef = sentToRef\n self.sentToTokens = sentToTokens\n print(\"index created.\")\n\n def getRefIds(self, image_ids=[], cat_ids=[], ref_ids=[], split=\"\"):\n image_ids = image_ids if type(image_ids) == list else [image_ids]\n cat_ids = cat_ids if type(cat_ids) == list else [cat_ids]\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if len(image_ids) == len(cat_ids) == len(ref_ids) == len(split) == 0:\n refs = self.data[\"refs\"]\n else:\n if not len(image_ids) == 0:\n refs = [self.imgToRefs[image_id] for image_id in image_ids]\n else:\n refs = self.data[\"refs\"]\n if not len(cat_ids) == 0:\n refs = [ref for ref in refs if ref[\"category_id\"] in cat_ids]\n if not len(ref_ids) == 0:\n refs = [ref for ref in refs if ref[\"ref_id\"] in ref_ids]\n if not len(split) == 0:\n if split in [\"testA\", \"testB\", \"testC\"]:\n refs = [\n ref for ref in refs if split[-1] in ref[\"split\"]\n ] # we also consider testAB, testBC, ...\n elif split in [\"testAB\", \"testBC\", \"testAC\"]:\n refs = [\n ref for ref in refs if ref[\"split\"] == split\n ] # rarely used I guess...\n elif split == \"test\":\n refs = [ref for ref in refs if \"test\" in ref[\"split\"]]\n elif split == \"train\" or split == \"val\":\n refs = [ref for ref in refs if ref[\"split\"] == split]\n else:\n print(\"No such split [%s]\" % split)\n sys.exit()\n ref_ids = [ref[\"ref_id\"] for ref in refs]\n return ref_ids\n\n def getAnnIds(self, image_ids=[], cat_ids=[], ref_ids=[]):\n image_ids = image_ids if type(image_ids) == list else [image_ids]\n cat_ids = cat_ids if type(cat_ids) == list else [cat_ids]\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if len(image_ids) == len(cat_ids) == len(ref_ids) == 0:\n ann_ids = [ann[\"id\"] for ann in self.data[\"annotations\"]]\n else:\n if not len(image_ids) == 0:\n lists = [\n self.imgToAnns[image_id]\n for image_id in image_ids\n if image_id in self.imgToAnns\n ] # list of [anns]\n anns = list(itertools.chain.from_iterable(lists))\n else:\n anns = self.data[\"annotations\"]\n if not len(cat_ids) == 0:\n anns = [ann for ann in anns if ann[\"category_id\"] in cat_ids]\n ann_ids = [ann[\"id\"] for ann in anns]\n if not len(ref_ids) == 0:\n ids = set(ann_ids).intersection(\n set([self.Refs[ref_id][\"ann_id\"] for ref_id in ref_ids])\n )\n return ann_ids\n\n def getImgIds(self, ref_ids=[]):\n ref_ids = ref_ids if type(ref_ids) == list else [ref_ids]\n\n if not len(ref_ids) == 0:\n image_ids = list(set([self.Refs[ref_id][\"image_id\"] for ref_id in ref_ids]))\n else:\n image_ids = self.Imgs.keys()\n return image_ids\n\n def getCatIds(self):\n return self.Cats.keys()\n\n def loadRefs(self, ref_ids=[]):\n if type(ref_ids) == list:\n return [self.Refs[ref_id] for ref_id in ref_ids]\n elif type(ref_ids) == int:\n return [self.Refs[ref_ids]]\n\n def loadAnns(self, ann_ids=[]):\n if type(ann_ids) == list:\n return [self.Anns[ann_id] for ann_id in ann_ids]\n elif type(ann_ids) == int or type(ann_ids) == unicode:\n return [self.Anns[ann_ids]]\n\n def loadImgs(self, image_ids=[]):\n if type(image_ids) == list:\n return [self.Imgs[image_id] for image_id in image_ids]\n elif type(image_ids) == int:\n return [self.Imgs[image_ids]]\n\n def loadCats(self, cat_ids=[]):\n if type(cat_ids) == list:\n return [self.Cats[cat_id] for cat_id in cat_ids]\n elif type(cat_ids) == int:\n return [self.Cats[cat_ids]]\n\n def getRefBox(self, ref_id):\n ref = self.Refs[ref_id]\n ann = self.refToAnn[ref_id]\n return ann[\"bbox\"] # [x, y, w, h]\n\n def showRef(self, ref, seg_box=\"seg\"):\n ax = plt.gca()\n # show image\n image = self.Imgs[ref[\"image_id\"]]\n I = io.imread(osp.join(self.IMAGE_DIR, image[\"file_name\"]))\n ax.imshow(I)\n # show refer expression\n for sid, sent in enumerate(ref[\"sentences\"]):\n print(\"%s. %s\" % (sid + 1, sent[\"sent\"]))\n # show segmentations\n if seg_box == \"seg\":\n ann_id = ref[\"ann_id\"]\n ann = self.Anns[ann_id]\n polygons = []\n color = []\n c = \"none\"\n if type(ann[\"segmentation\"][0]) == list:\n # polygon used for refcoco*\n for seg in ann[\"segmentation\"]:\n poly = np.array(seg).reshape((len(seg) / 2, 2))\n polygons.append(Polygon(poly, True, alpha=0.4))\n color.append(c)\n p = PatchCollection(\n polygons,\n facecolors=color,\n edgecolors=(1, 1, 0, 0),\n linewidths=3,\n alpha=1,\n )\n ax.add_collection(p) # thick yellow polygon\n p = PatchCollection(\n polygons,\n facecolors=color,\n edgecolors=(1, 0, 0, 0),\n linewidths=1,\n alpha=1,\n )\n ax.add_collection(p) # thin red polygon\n else:\n # mask used for refclef\n rle = ann[\"segmentation\"]\n m = mask.decode(rle)\n img = np.ones((m.shape[0], m.shape[1], 3))\n color_mask = np.array([2.0, 166.0, 101.0]) / 255\n for i in range(3):\n img[:, :, i] = color_mask[i]\n ax.imshow(np.dstack((img, m * 0.5)))\n # show bounding-box\n elif seg_box == \"box\":\n ann_id = ref[\"ann_id\"]\n ann = self.Anns[ann_id]\n bbox = self.getRefBox(ref[\"ref_id\"])\n box_plot = Rectangle(\n (bbox[0], bbox[1]),\n bbox[2],\n bbox[3],\n fill=False,\n edgecolor=\"green\",\n linewidth=3,\n )\n ax.add_patch(box_plot)\n\n def getMask(self, ref):\n # return mask, area and mask-center\n ann = self.refToAnn[ref[\"ref_id\"]]\n image = self.Imgs[ref[\"image_id\"]]\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(ann[\"segmentation\"], image[\"height\"], image[\"width\"])\n else:\n rle = ann[\"segmentation\"]\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n # compute area\n area = sum(mask.area(rle)) # should be close to ann['area']\n return {\"mask\": m, \"area\": area}\n # # position\n # position_x = np.mean(np.where(m==1)[1]) # [1] means columns (matlab style) -> x (c style)\n # position_y = np.mean(np.where(m==1)[0]) # [0] means rows (matlab style) -> y (c style)\n # # mass position (if there were multiple regions, we use the largest one.)\n # label_m = label(m, connectivity=m.ndim)\n # regions = regionprops(label_m)\n # if len(regions) > 0:\n # \tlargest_id = np.argmax(np.array([props.filled_area for props in regions]))\n # \tlargest_props = regions[largest_id]\n # \tmass_y, mass_x = largest_props.centroid\n # else:\n # \tmass_x, mass_y = position_x, position_y\n # # if centroid is not in mask, we find the closest point to it from mask\n # if m[mass_y, mass_x] != 1:\n # \tprint('Finding closes mask point ...')\n # \tkernel = np.ones((10, 10),np.uint8)\n # \tme = cv2.erode(m, kernel, iterations = 1)\n # \tpoints = zip(np.where(me == 1)[0].tolist(), np.where(me == 1)[1].tolist()) # row, col style\n # \tpoints = np.array(points)\n # \tdist = np.sum((points - (mass_y, mass_x))**2, axis=1)\n # \tid = np.argsort(dist)[0]\n # \tmass_y, mass_x = points[id]\n # \t# return\n # return {'mask': m, 'area': area, 'position_x': position_x, 'position_y': position_y, 'mass_x': mass_x, 'mass_y': mass_y}\n # # show image and mask\n # I = io.imread(osp.join(self.IMAGE_DIR, image['file_name']))\n # plt.figure()\n # plt.imshow(I)\n # ax = plt.gca()\n # img = np.ones( (m.shape[0], m.shape[1], 3) )\n # color_mask = np.array([2.0,166.0,101.0])/255\n # for i in range(3):\n # img[:,:,i] = color_mask[i]\n # ax.imshow(np.dstack( (img, m*0.5) ))\n # plt.show()\n\n def showMask(self, ref):\n M = self.getMask(ref)\n msk = M[\"mask\"]\n ax = plt.gca()\n ax.imshow(msk)" }, { "identifier": "ReferSegDataset", "path": "VisualSearch/utils/refer_seg_dataset.py", "snippet": "class ReferSegDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n refer_seg_data=\"refclef||refcoco||refcoco+||refcocog\",\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n DATA_DIR = os.path.join(base_dir, \"refer_seg\")\n self.refer_seg_ds_list = refer_seg_data.split(\n \"||\"\n ) # ['refclef', 'refcoco', 'refcoco+', 'refcocog']\n self.refer_seg_data = {}\n for ds in self.refer_seg_ds_list:\n if ds == \"refcocog\":\n splitBy = \"umd\"\n else:\n splitBy = \"unc\"\n\n if ds == \"grefcoco\":\n refer_api = G_REFER(DATA_DIR, ds, splitBy)\n else:\n refer_api = REFER(DATA_DIR, ds, splitBy)\n ref_ids_train = refer_api.getRefIds(split=\"train\")\n images_ids_train = refer_api.getImgIds(ref_ids=ref_ids_train)\n refs_train = refer_api.loadRefs(ref_ids=ref_ids_train)\n\n refer_seg_ds = {}\n refer_seg_ds[\"images\"] = []\n loaded_images = refer_api.loadImgs(image_ids=images_ids_train)\n\n for item in loaded_images:\n item = item.copy()\n if ds == \"refclef\":\n item[\"file_name\"] = os.path.join(\n DATA_DIR, \"images/saiapr_tc-12\", item[\"file_name\"]\n )\n else:\n item[\"file_name\"] = os.path.join(\n DATA_DIR, \"images/mscoco/images/train2014\", item[\"file_name\"]\n )\n refer_seg_ds[\"images\"].append(item)\n refer_seg_ds[\"annotations\"] = refer_api.Anns # anns_train\n\n print(\n \"dataset {} (refs {}) (train split) has {} images and {} annotations.\".format(\n ds,\n splitBy,\n len(refer_seg_ds[\"images\"]),\n len(refer_seg_ds[\"annotations\"]),\n )\n )\n\n img2refs = {}\n for ref in refs_train:\n image_id = ref[\"image_id\"]\n img2refs[image_id] = img2refs.get(image_id, []) + [\n ref,\n ]\n refer_seg_ds[\"img2refs\"] = img2refs\n self.refer_seg_data[ds] = refer_seg_ds\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = random.randint(0, len(self.refer_seg_ds_list) - 1)\n ds = self.refer_seg_ds_list[ds]\n refer_seg_ds = self.refer_seg_data[ds]\n images = refer_seg_ds[\"images\"]\n annotations = refer_seg_ds[\"annotations\"]\n img2refs = refer_seg_ds[\"img2refs\"]\n idx = random.randint(0, len(images) - 1)\n image_info = images[idx]\n image_path = image_info[\"file_name\"]\n image_id = image_info[\"id\"]\n refs = img2refs[image_id]\n if len(refs) == 0:\n return self.__getitem__(0)\n\n sents = []\n ann_ids = []\n for ref in refs:\n for sent in ref[\"sentences\"]:\n text = sent[\"sent\"]\n sents.append(text)\n ann_ids.append(ref[\"ann_id\"])\n if len(sents) >= self.num_classes_per_sample:\n sampled_inds = np.random.choice(\n list(range(len(sents))), size=self.num_classes_per_sample, replace=False\n )\n else:\n sampled_inds = list(range(len(sents)))\n sampled_sents = np.vectorize(sents.__getitem__)(sampled_inds).tolist()\n # sampled_ann_ids = np.vectorize(ann_ids.__getitem__)(sampled_inds).tolist()\n sampled_ann_ids = [ann_ids[ind] for ind in sampled_inds]\n sampled_classes = sampled_sents\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n questions = []\n answers = []\n for text in sampled_classes:\n text = text.strip()\n assert len(text.split(\"||\")) == 1\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n answers.append(random.choice(self.answer_list))\n\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n\n flag = False\n masks = []\n bboxes_labels = []\n for ann_id in sampled_ann_ids:\n if isinstance(ann_id, list):\n assert False\n flag = True\n if -1 in ann_id:\n assert len(ann_id) == 1\n m = np.zeros((image_info[\"height\"], image_info[\"width\"])).astype(\n np.uint8\n )\n else:\n m_final = np.zeros(\n (image_info[\"height\"], image_info[\"width\"])\n ).astype(np.uint8)\n for ann_id_i in ann_id:\n ann = annotations[ann_id_i]\n\n if len(ann[\"segmentation\"]) == 0:\n m = np.zeros(\n (image_info[\"height\"], image_info[\"width\"])\n ).astype(np.uint8)\n else:\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(\n ann[\"segmentation\"],\n image_info[\"height\"],\n image_info[\"width\"],\n )\n else:\n rle = ann[\"segmentation\"]\n for i in range(len(rle)):\n if not isinstance(rle[i][\"counts\"], bytes):\n rle[i][\"counts\"] = rle[i][\"counts\"].encode()\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n m_final = m_final | m\n m = m_final\n masks.append(m)\n continue\n \n ann = annotations[ann_id]\n cur_bboxes = [ann['bbox']]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n \n if len(ann[\"segmentation\"]) == 0:\n m = np.zeros((image_info[\"height\"], image_info[\"width\"])).astype(\n np.uint8\n )\n masks.append(m)\n continue\n\n if type(ann[\"segmentation\"][0]) == list: # polygon\n rle = mask.frPyObjects(\n ann[\"segmentation\"], image_info[\"height\"], image_info[\"width\"]\n )\n else:\n rle = ann[\"segmentation\"]\n for i in range(len(rle)):\n if not isinstance(rle[i][\"counts\"], bytes):\n rle[i][\"counts\"] = rle[i][\"counts\"].encode()\n m = mask.decode(rle)\n m = np.sum(\n m, axis=2\n ) # sometimes there are multiple binary map (corresponding to multiple segs)\n m = m.astype(np.uint8) # convert to np.uint8\n masks.append(m)\n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [1]*len(bboxes_labels)\n masks = np.stack(masks, axis=0)\n\n\n masks = torch.from_numpy(masks)\n label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "SegDetDataset", "path": "VisualSearch/utils/general_segdet_dataset.py", "snippet": "class SegDetDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n general_segdet_data=\"objects365||cocostuff||paco_lvis\",\n general_segdet_sample_rate=[2,1,1]\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n self.data2list = {}\n self.data2classes = {}\n\n self.general_segdet_datas = general_segdet_data.split(\"||\")\n num_images = []\n for ds in self.general_segdet_datas:\n if ds == \"cocostuff\":\n classes, images, labels, bboxes = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, labels, bboxes)\n elif ds == \"objects365\":\n classes, images, bboxes = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, bboxes)\n else:\n classes, images, labels = eval(\"init_{}\".format(ds))(base_dir)\n self.data2list[ds] = (images, labels)\n self.data2classes[ds] = classes\n num_images.append(len(images))\n sample_rate = np.array(general_segdet_sample_rate)\n self.sample_rate = sample_rate / sample_rate.sum()\n\n if \"cocostuff\" in self.general_segdet_datas:\n self.cocostuff_class2index = {\n c: i for i, c in enumerate(self.data2classes[\"cocostuff\"])\n }\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = np.random.choice(list(range(len(self.general_segdet_datas))), p=self.sample_rate)\n ds = self.general_segdet_datas[ds]\n\n if ds in [\"paco_lvis\"]:\n class_map = self.data2classes[ds]\n img_ids, coco_api = self.data2list[ds]\n idx = random.randint(0, len(img_ids) - 1)\n img_id = img_ids[idx]\n image_info = coco_api.loadImgs([img_id])[0]\n file_name = image_info[\"file_name\"]\n if ds == \"pascal_part\":\n file_name = os.path.join(\n \"VOCdevkit\", \"VOC2010\", \"JPEGImages\", file_name\n )\n image_path = os.path.join(self.base_dir, \"vlpart\", ds, file_name)\n elif ds == \"paco_lvis\":\n image_path = os.path.join(self.base_dir, \"coco2017\", file_name)\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n annIds = coco_api.getAnnIds(imgIds=image_info[\"id\"])\n anns = coco_api.loadAnns(annIds)\n anns_category2instances = dict()\n for ann in anns:\n category_id = ann['category_id']\n if category_id not in anns_category2instances:\n anns_category2instances[category_id] = []\n anns_category2instances[category_id].append(ann)\n if len(anns_category2instances) == 0:\n return self.__getitem__(0)\n if len(anns_category2instances) >= self.num_classes_per_sample:\n sampled_anns = np.random.choice(\n list(anns_category2instances.keys()), size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_anns = list(anns_category2instances.keys())\n sampled_classes = []\n for category_id in sampled_anns:\n sampled_cls = class_map[category_id]\n if isinstance(sampled_cls, tuple):\n obj, part = sampled_cls\n if random.random() < 0.5:\n name = obj + \" \" + part\n else:\n name = \"the {} of the {}\".format(part, obj)\n else:\n name = sampled_cls\n name = name.replace('_', ' ')\n sampled_classes.append(name)\n\n elif ds in [\"cocostuff\"]:\n image, labels, bboxes_all = self.data2list[ds]\n idx = random.randint(0, len(image) - 1)\n image_path = image[idx]\n label_path = labels[idx]\n bboxes = bboxes_all[idx]\n label = Image.open(label_path)\n label = np.array(label)\n if ds == \"ade20k\":\n label[label == 0] = 255\n label -= 1\n label[label == 254] = 255\n elif ds == \"cocostuff\":\n for c, i in self.cocostuff_class2index.items():\n if \"-\" in c:\n label[label == i] = 255\n img = cv2.imread(image_path)\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n unique_label = np.unique(label).tolist()\n if 255 in unique_label:\n unique_label.remove(255)\n if len(unique_label) == 0:\n return self.__getitem__(0)\n\n classes = [self.data2classes[ds][class_id] for class_id in unique_label]\n if len(classes) >= self.num_classes_per_sample:\n sampled_classes = np.random.choice(\n classes, size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_classes = classes\n\n elif ds in ['objects365']:\n image, bboxes_all = self.data2list[ds]\n idx = random.randint(0, len(image) - 1)\n image_path = image[idx]\n bboxes = bboxes_all[idx]\n img = cv2.imread(image_path)\n image = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\"\n )[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n unique_label = set()\n for bbox_info in bboxes:\n unique_label.add(bbox_info['category_id'])\n unique_label = list(unique_label)\n if len(unique_label) == 0:\n return self.__getitem__(0)\n\n classes = [self.data2classes[ds][class_id] for class_id in unique_label]\n if len(classes) >= self.num_classes_per_sample:\n sampled_classes = np.random.choice(\n classes, size=self.num_classes_per_sample, replace=False\n ).tolist()\n else:\n sampled_classes = classes\n\n\n questions = []\n answers = []\n class_ids = []\n bboxes_labels = []\n for i, sampled_cls in enumerate(sampled_classes):\n text = sampled_cls\n if ds in ['objects365']:\n text = random.sample(text.split('/'), 1)[0]\n \n assert len(text.split(\"||\")) == 1\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n\n answers.append(random.choice(self.answer_list))\n\n if ds in [\"paco_lvis\", \"pascal_part\"]:\n category_id = sampled_anns[i]\n cur_bboxes = [instance['bbox'] for instance in anns_category2instances[category_id]]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n continue\n\n class_id = self.data2classes[ds].tolist().index(sampled_cls)\n class_ids.append(class_id)\n if ds in ['objects365']:\n cur_bboxes = [bbox['bbox'] for bbox in bboxes if bbox['category_id'] == class_id]\n else:\n cur_bboxes = [bbox['bbox'] for bbox in bboxes if bbox['category_id']-1 == class_id]\n cur_bboxes = cur_bboxes[:100]\n assert len(cur_bboxes) > 0\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [1]*len(bboxes_labels)\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n\n if ds in [\"paco_lvis\", \"pascal_part\"]:\n masks = []\n for category_id in sampled_anns:\n try:\n cur_anns = anns_category2instances[category_id]\n cur_mask = None\n for ann in cur_anns:\n if cur_mask is None:\n cur_mask = coco_api.annToMask(ann)\n else:\n cur_mask = cur_mask | coco_api.annToMask(ann)\n assert cur_mask is not None\n masks.append(cur_mask)\n except Exception as e:\n print(e)\n return self.__getitem__(0)\n\n masks = np.stack(masks, axis=0)\n masks = torch.from_numpy(masks)\n label = torch.ones(masks.shape[1], masks.shape[2]) * self.ignore_label\n elif ds in ['objects365']:\n masks = torch.rand(len(bboxes_labels), *original_size)\n label = torch.ones(original_size) * self.ignore_label\n masks_valid = [0]*len(bboxes_labels)\n else:\n label = torch.from_numpy(label).long()\n masks = []\n for class_id in class_ids:\n masks.append(label == class_id)\n masks = torch.stack(masks, dim=0)\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "MixedGroundingDataset", "path": "VisualSearch/utils/mixed_grounding_dataset.py", "snippet": "class MixedGroundingDataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n ):\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_dir = base_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n self.short_question_list = SHORT_QUESTION_LIST\n self.answer_list = ANSWER_LIST\n\n with open(os.path.join(base_dir, 'MixedGrounding', 'goldG_train.json')) as f:\n self.images = json.load(f)\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n\n idx = random.randint(0, len(self.images) - 1)\n image_info = self.images[idx]\n image_data_source = image_info['data_source']\n file_name = image_info[\"file_name\"]\n assert image_data_source in ['coco', 'vg', 'flickr']\n if image_data_source == 'coco':\n image_path = os.path.join(self.base_dir, 'coco2014/train2014', file_name)\n elif image_data_source == 'vg':\n image_path = os.path.join(self.base_dir, 'MixedGrounding/GQA/images', file_name)\n else:\n image_path = os.path.join(self.base_dir, 'MixedGrounding/flickr30k-images', file_name)\n caption = image_info['caption']\n instances = image_info['instances']\n if len(instances) == 0:\n return self.__getitem__(0)\n\n if len(instances) >= self.num_classes_per_sample:\n sampled_inds = np.random.choice(\n list(range(len(instances))), size=self.num_classes_per_sample, replace=False\n )\n else:\n sampled_inds = list(range(len(instances)))\n\n sampled_classes = sampled_inds\n \n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # preprocess image for clip\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n original_size = image.shape[:2]\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n questions = []\n answers = []\n bboxes_labels = []\n for sample_ind in sampled_inds:\n text = []\n tokens_positive = instances[sample_ind]['tokens_positive']\n for token in tokens_positive:\n text.append(caption[token[0]:token[1]])\n text = \" \".join(text)\n text = text.strip()\n question_template = random.choice(self.short_question_list)\n questions.append(question_template.format(class_name=text.lower()))\n answers.append(random.choice(self.answer_list))\n\n cur_bboxes = [instances[sample_ind]['bbox']]\n cur_bboxes = torch.tensor(cur_bboxes).view(-1, 4)\n # xywh to x1y1x2y2\n cur_bboxes[:, 2:] += cur_bboxes[:, :2]\n cur_bboxes[:, 0::2].clamp_(min=0, max=original_size[1])\n cur_bboxes[:, 1::2].clamp_(min=0, max=original_size[0])\n keep = (cur_bboxes[:, 3] > cur_bboxes[:, 1]) & (cur_bboxes[:, 2] > cur_bboxes[:, 0])\n cur_bboxes = cur_bboxes[keep]\n cur_bboxes = box_xyxy_to_cxcywh(cur_bboxes)\n cur_bboxes = cur_bboxes / torch.tensor([original_size[1], original_size[0], original_size[1], original_size[0]], dtype=torch.float32)\n if len(cur_bboxes) == 0:\n return self.__getitem__(0)\n bboxes_labels.append(cur_bboxes)\n\n conversations = []\n conv = conversation_lib.default_conversation.copy()\n\n i = 0\n while i < len(questions):\n conv.messages = []\n conv.append_message(conv.roles[0], questions[i])\n conv.append_message(conv.roles[1], answers[i])\n conversations.append(conv.get_prompt())\n i += 1\n \n bboxes_valid = [1]*len(bboxes_labels)\n masks_valid = [0]*len(bboxes_labels)\n masks = torch.rand(len(bboxes_labels), *original_size)\n label = torch.ones(original_size) * self.ignore_label\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "VQADataset", "path": "VisualSearch/utils/vqa_dataset.py", "snippet": "class VQADataset(torch.utils.data.Dataset):\n pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1)\n pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1)\n img_size = 1024\n ignore_label = 255\n\n def __init__(\n self,\n base_image_dir,\n tokenizer,\n vision_tower,\n samples_per_epoch=500 * 8 * 2 * 10,\n precision: str = \"fp32\",\n num_classes_per_sample: int = 3,\n exclude_val=False,\n vqa_data=\"possible_locations_conv_86k||llava_instruct_150k\",\n vqa_sample_rate=[2,1],\n ):\n self.exclude_val = exclude_val\n self.samples_per_epoch = samples_per_epoch\n self.num_classes_per_sample = num_classes_per_sample\n\n self.base_image_dir = base_image_dir\n self.tokenizer = tokenizer\n self.precision = precision\n self.transform = OwlViTProcessor.from_pretrained(\"google/owlvit-base-patch16\")\n self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower)\n\n DATA_DIR = os.path.join(base_image_dir, \"vsm_vqa_data\")\n self.vqa_image_root = os.path.join(base_image_dir, \"coco2017/train2017\")\n vqa_datas = vqa_data.split(\"||\")\n self.vqa_datas = []\n for data in vqa_datas:\n with open(os.path.join(DATA_DIR, \"{}.json\".format(data))) as f:\n data = json.load(f)\n self.vqa_datas.append(data)\n sample_rate = np.array(vqa_sample_rate)\n self.sample_rate = sample_rate / sample_rate.sum()\n\n def __len__(self):\n return self.samples_per_epoch\n\n def preprocess(self, x: torch.Tensor) -> torch.Tensor:\n \"\"\"Normalize pixel values and pad to a square input.\"\"\"\n # Normalize colors\n x = (x - self.pixel_mean) / self.pixel_std\n\n # Pad\n h, w = x.shape[-2:]\n padh = self.img_size - h\n padw = self.img_size - w\n x = F.pad(x, (0, padw, 0, padh))\n return x\n\n def __getitem__(self, idx):\n ds = np.random.choice(list(range(len(self.vqa_datas))), p=self.sample_rate)\n ds = self.vqa_datas[ds]\n idx = random.randint(0, len(ds) - 1)\n item = ds[idx]\n image_path = os.path.join(self.vqa_image_root, item[\"image\"])\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n ori_size = image.shape[:2]\n image_clip = self.clip_image_processor.preprocess(\n expand2square(Image.open(image_path).convert('RGB'), tuple(int(x*255) for x in self.clip_image_processor.image_mean)), return_tensors=\"pt\")[\"pixel_values\"][0]\n\n image = self.transform(images=image, return_tensors=\"pt\")['pixel_values'][0]\n resize = image.shape[:2]\n\n conv = conversation_lib.default_conversation.copy()\n source = item[\"conversations\"]\n source = preprocess_multimodal(\n copy.deepcopy(source),\n mm_use_im_start_end=conv.sep_style == conversation_lib.SeparatorStyle.TWO,\n )\n roles = {\"human\": conv.roles[0], \"gpt\": conv.roles[1]}\n conversations = []\n if roles[source[0][\"from\"]] != conv.roles[0]:\n # Skip the first one if it is not from human\n source = source[1:]\n conv.messages = []\n for j, sentence in enumerate(source):\n role = roles[sentence[\"from\"]]\n assert role == conv.roles[j % 2], f\"{j}\"\n conv.append_message(role, sentence[\"value\"])\n conversations.append(conv.get_prompt())\n\n questions = conversations\n sampled_classes = conversations\n\n masks = torch.rand(1, *ori_size)\n label = torch.ones(ori_size) * self.ignore_label\n bboxes_labels = [torch.tensor([[0.5,0.5,1.0,1.0]])]\n bboxes_valid = [0]\n masks_valid = [0]\n\n return (\n image_path,\n image,\n image_clip,\n conversations,\n masks,\n label,\n bboxes_labels,\n bboxes_valid,\n masks_valid,\n resize,\n questions,\n sampled_classes,\n )" }, { "identifier": "DEFAULT_IM_END_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IM_END_TOKEN = \"<im_end>\"" }, { "identifier": "DEFAULT_IM_START_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IM_START_TOKEN = \"<im_start>\"" }, { "identifier": "DEFAULT_IMAGE_TOKEN", "path": "VisualSearch/utils/utils.py", "snippet": "DEFAULT_IMAGE_TOKEN = \"<image>\"" }, { "identifier": "box_xyxy_to_cxcywh", "path": "VisualSearch/utils/utils.py", "snippet": "def box_xyxy_to_cxcywh(x):\n x0, y0, x1, y1 = x.unbind(-1)\n b = [(x0 + x1) / 2, (y0 + y1) / 2,\n (x1 - x0), (y1 - y0)]\n return torch.stack(b, dim=-1)" }, { "identifier": "expand2square", "path": "VisualSearch/utils/utils.py", "snippet": "def expand2square(pil_img, background_color):\n width, height = pil_img.size\n if width == height:\n return pil_img\n elif width > height:\n result = Image.new(pil_img.mode, (width, width), background_color)\n result.paste(pil_img, (0, 0))\n return result\n else:\n result = Image.new(pil_img.mode, (height, height), background_color)\n result.paste(pil_img, (0, 0))\n return result" } ]
import glob import os import random import cv2 import numpy as np import torch import torch.nn.functional as F from PIL import Image from pycocotools import mask from transformers import CLIPImageProcessor from transformers import OwlViTProcessor from VisualSearch.model.llava import conversation as conversation_lib from VisualSearch.model.llava.constants import (DEFAULT_IMAGE_TOKEN, IGNORE_INDEX, IMAGE_TOKEN_INDEX) from VisualSearch.model.llava.mm_utils import tokenizer_image_token from VisualSearch.utils.data_processing import get_mask_from_json from VisualSearch.utils.refer import REFER from VisualSearch.utils.refer_seg_dataset import ReferSegDataset from VisualSearch.utils.general_segdet_dataset import SegDetDataset from VisualSearch.utils.mixed_grounding_dataset import MixedGroundingDataset from VisualSearch.utils.vqa_dataset import VQADataset from VisualSearch.utils.utils import (DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN, DEFAULT_IMAGE_TOKEN) from VisualSearch.utils.utils import box_xyxy_to_cxcywh, expand2square
16,388
def __getitem__(self, idx): ind = np.random.choice(list(range(len(self.datasets))), p=self.sample_rate) data = self.all_datasets[ind] inference = False return *data[0], inference class ValDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, val_dataset, ): self.base_dir = base_dir splits = val_dataset.split("|") if len(splits) == 2: ds, split = splits images = glob.glob( os.path.join(self.base_dir, "reason_seg", ds, split, "*.jpg") ) self.images = images self.data_type = "reason_seg" elif len(splits) == 3: self.base_dir = os.path.join(self.base_dir, 'refer_seg') ds, splitBy, split = splits refer_api = REFER(self.base_dir, ds, splitBy) ref_ids_val = refer_api.getRefIds(split=split) images_ids_val = refer_api.getImgIds(ref_ids=ref_ids_val) refs_val = refer_api.loadRefs(ref_ids=ref_ids_val) refer_seg_ds = {} refer_seg_ds["images"] = [] loaded_images = refer_api.loadImgs(image_ids=images_ids_val) for item in loaded_images: item = item.copy() if ds == "refclef": item["file_name"] = os.path.join( self.base_dir, "images/saiapr_tc-12", item["file_name"] ) elif ds in ["refcoco", "refcoco+", "refcocog", "grefcoco"]: item["file_name"] = os.path.join( self.base_dir, "images/mscoco/images/train2014", item["file_name"], ) refer_seg_ds["images"].append(item) refer_seg_ds["annotations"] = refer_api.Anns # anns_val img2refs = {} for ref in refs_val: image_id = ref["image_id"] img2refs[image_id] = img2refs.get(image_id, []) + [ ref, ] refer_seg_ds["img2refs"] = img2refs self.refer_seg_ds = refer_seg_ds self.data_type = "refer_seg" self.ds = ds self.tokenizer = tokenizer self.transform = OwlViTProcessor.from_pretrained("google/owlvit-base-patch16") self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower) def __len__(self): if self.data_type == "refer_seg": return len(self.refer_seg_ds["images"]) else: return len(self.images) def preprocess(self, x: torch.Tensor) -> torch.Tensor: """Normalize pixel values and pad to a square input.""" # Normalize colors x = (x - self.pixel_mean) / self.pixel_std # Pad h, w = x.shape[-2:] padh = self.img_size - h padw = self.img_size - w x = F.pad(x, (0, padw, 0, padh)) return x def __getitem__(self, idx): if self.data_type == "refer_seg": refer_seg_ds = self.refer_seg_ds images = refer_seg_ds["images"] annotations = refer_seg_ds["annotations"] img2refs = refer_seg_ds["img2refs"] image_info = images[idx] image_path = image_info["file_name"] image_id = image_info["id"] refs = img2refs[image_id] if len(refs) == 0: raise ValueError("image {} has no refs".format(image_id)) sents = [] ann_ids = [] for ref in refs: for sent in ref["sentences"]: sents.append(sent["sent"].strip().lower()) ann_ids.append(ref["ann_id"]) sampled_sents = sents sampled_ann_ids = ann_ids image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) is_sentence = False else: image_path = self.images[idx] image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) json_path = image_path.replace(".jpg", ".json")
cv2.setNumThreads(1) def collate_fn( batch, tokenizer=None, conv_type="llava_v1", use_mm_start_end=True, local_rank=-1 ): image_path_list = [] images_list = [] images_clip_list = [] conversation_list = [] masks_list = [] label_list = [] bboxes_labels_list = [] bboxes_valid_list = [] masks_valid_list = [] resize_list = [] questions_list = [] sampled_classes_list = [] offset_list = [0] cnt = 0 inferences = [] for ( image_path, images, images_clip, conversations, masks, label, bboxes_labels, bboxes_valid, masks_valid, resize, questions, sampled_classes, inference, ) in batch: image_path_list.append(image_path) images_list.append(images) images_clip_list.append(images_clip) conversation_list.extend(conversations) label_list.append(label) masks_list.append(masks.float()) bboxes_labels_list.extend(bboxes_labels) bboxes_valid_list.extend(bboxes_valid) masks_valid_list.append(torch.tensor(masks_valid)) resize_list.append(resize) questions_list.append(questions) sampled_classes_list.append(sampled_classes) cnt += len(conversations) offset_list.append(cnt) inferences.append(inference) if use_mm_start_end: # replace <image> token for i in range(len(conversation_list)): replace_token = DEFAULT_IMAGE_TOKEN replace_token = ( DEFAULT_IM_START_TOKEN + replace_token + DEFAULT_IM_END_TOKEN ) conversation_list[i] = conversation_list[i].replace( DEFAULT_IMAGE_TOKEN, replace_token ) input_ids = [ tokenizer_image_token(prompt, tokenizer, return_tensors="pt") for prompt in conversation_list ] input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id ) attention_masks = input_ids.ne(tokenizer.pad_token_id) for i in range(len(bboxes_valid_list)): bboxes_valid = bboxes_valid_list[i] attention_mask = attention_masks[i] if not bboxes_valid: attention_mask = attention_mask & input_ids[i].ne(tokenizer("[LOC]", add_special_tokens=False).input_ids[0]) attention_masks[i] = attention_mask conv = conversation_lib.default_conversation.copy() targets = input_ids.clone() if conv_type == "llava_v1": sep = conv.sep + conv.roles[1] + ": " else: sep = "[/INST] " for conversation, target in zip(conversation_list, targets): total_len = int(target.ne(tokenizer.pad_token_id).sum()) rounds = conversation.split(conv.sep2) cur_len = 1 target[:cur_len] = IGNORE_INDEX for i, rou in enumerate(rounds): if rou == "": break parts = rou.split(sep) # if len(parts) != 2: # break assert len(parts) == 2, (len(parts), rou) parts[0] += sep if DEFAULT_IMAGE_TOKEN in conversation: round_len = len(tokenizer_image_token(rou, tokenizer)) instruction_len = len(tokenizer_image_token(parts[0], tokenizer)) - 2 else: round_len = len(tokenizer(rou).input_ids) instruction_len = len(tokenizer(parts[0]).input_ids) - 2 target[cur_len : cur_len + instruction_len] = IGNORE_INDEX cur_len += round_len target[cur_len:] = IGNORE_INDEX if False: z = target.clone() z = torch.where(z == IGNORE_INDEX, tokenizer.unk_token_id, z) if local_rank == 0: print( "conversation: ", conversation, "tokenizer.decode(z): ", tokenizer.decode(z), ) if cur_len < tokenizer.model_max_length: assert cur_len == total_len if inferences[0] == False: truncate_len = tokenizer.model_max_length - 255 if input_ids.shape[1] > truncate_len: input_ids = input_ids[:, :truncate_len] targets = targets[:, :truncate_len] attention_masks = attention_masks[:, :truncate_len] return { "image_paths": image_path_list, "images": torch.stack(images_list, dim=0), "images_clip": torch.stack(images_clip_list, dim=0), "input_ids": input_ids, "labels": targets, "bboxes_labels_list": bboxes_labels_list, "bboxes_valid_list": torch.tensor(bboxes_valid_list), "masks_valid_list": masks_valid_list, "attention_masks": attention_masks, "masks_list": masks_list, "label_list": label_list, "resize_list": resize_list, "offset": torch.LongTensor(offset_list), "questions_list": questions_list, "sampled_classes_list": sampled_classes_list, "inference": inferences[0], "conversation_list": conversation_list, } class HybridDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, samples_per_epoch=500 * 8 * 2 * 10, precision: str = "fp32", num_classes_per_sample: int = 3, exclude_val=False, dataset="general_segdet||refer_seg||vqa||reason_seg", sample_rate=[9, 3, 3, 1], general_segdet_data="objects365||cocostuff||paco_lvis", general_segdet_sample_rate=[2,1,1], refer_seg_data="refclef||refcoco||refcoco+||refcocog", vqa_data="possible_locations_conv_86k||llava_instruct_80k", vqa_sample_rate=[2,1], ): self.exclude_val = exclude_val self.dataset = dataset self.samples_per_epoch = samples_per_epoch self.num_classes_per_sample = num_classes_per_sample sample_rate = np.array(sample_rate) self.sample_rate = sample_rate / sample_rate.sum() self.base_dir = base_dir self.tokenizer = tokenizer self.precision = precision self.datasets = dataset.split("||") self.all_datasets = [] for dataset in self.datasets: if dataset == "general_segdet": self.all_datasets.append( SegDetDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, general_segdet_data, general_segdet_sample_rate, ) ) elif dataset == "refer_seg": self.all_datasets.append( ReferSegDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, refer_seg_data, ) ) elif dataset == "vqa": self.all_datasets.append( VQADataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, vqa_data, vqa_sample_rate, ) ) elif dataset == "mixed_grounding": self.all_datasets.append( MixedGroundingDataset( base_dir, tokenizer, vision_tower, samples_per_epoch, precision, num_classes_per_sample, exclude_val, ) ) def __len__(self): return self.samples_per_epoch def __getitem__(self, idx): ind = np.random.choice(list(range(len(self.datasets))), p=self.sample_rate) data = self.all_datasets[ind] inference = False return *data[0], inference class ValDataset(torch.utils.data.Dataset): pixel_mean = torch.Tensor([123.675, 116.28, 103.53]).view(-1, 1, 1) pixel_std = torch.Tensor([58.395, 57.12, 57.375]).view(-1, 1, 1) img_size = 1024 ignore_label = 255 def __init__( self, base_dir, tokenizer, vision_tower, val_dataset, ): self.base_dir = base_dir splits = val_dataset.split("|") if len(splits) == 2: ds, split = splits images = glob.glob( os.path.join(self.base_dir, "reason_seg", ds, split, "*.jpg") ) self.images = images self.data_type = "reason_seg" elif len(splits) == 3: self.base_dir = os.path.join(self.base_dir, 'refer_seg') ds, splitBy, split = splits refer_api = REFER(self.base_dir, ds, splitBy) ref_ids_val = refer_api.getRefIds(split=split) images_ids_val = refer_api.getImgIds(ref_ids=ref_ids_val) refs_val = refer_api.loadRefs(ref_ids=ref_ids_val) refer_seg_ds = {} refer_seg_ds["images"] = [] loaded_images = refer_api.loadImgs(image_ids=images_ids_val) for item in loaded_images: item = item.copy() if ds == "refclef": item["file_name"] = os.path.join( self.base_dir, "images/saiapr_tc-12", item["file_name"] ) elif ds in ["refcoco", "refcoco+", "refcocog", "grefcoco"]: item["file_name"] = os.path.join( self.base_dir, "images/mscoco/images/train2014", item["file_name"], ) refer_seg_ds["images"].append(item) refer_seg_ds["annotations"] = refer_api.Anns # anns_val img2refs = {} for ref in refs_val: image_id = ref["image_id"] img2refs[image_id] = img2refs.get(image_id, []) + [ ref, ] refer_seg_ds["img2refs"] = img2refs self.refer_seg_ds = refer_seg_ds self.data_type = "refer_seg" self.ds = ds self.tokenizer = tokenizer self.transform = OwlViTProcessor.from_pretrained("google/owlvit-base-patch16") self.clip_image_processor = CLIPImageProcessor.from_pretrained(vision_tower) def __len__(self): if self.data_type == "refer_seg": return len(self.refer_seg_ds["images"]) else: return len(self.images) def preprocess(self, x: torch.Tensor) -> torch.Tensor: """Normalize pixel values and pad to a square input.""" # Normalize colors x = (x - self.pixel_mean) / self.pixel_std # Pad h, w = x.shape[-2:] padh = self.img_size - h padw = self.img_size - w x = F.pad(x, (0, padw, 0, padh)) return x def __getitem__(self, idx): if self.data_type == "refer_seg": refer_seg_ds = self.refer_seg_ds images = refer_seg_ds["images"] annotations = refer_seg_ds["annotations"] img2refs = refer_seg_ds["img2refs"] image_info = images[idx] image_path = image_info["file_name"] image_id = image_info["id"] refs = img2refs[image_id] if len(refs) == 0: raise ValueError("image {} has no refs".format(image_id)) sents = [] ann_ids = [] for ref in refs: for sent in ref["sentences"]: sents.append(sent["sent"].strip().lower()) ann_ids.append(ref["ann_id"]) sampled_sents = sents sampled_ann_ids = ann_ids image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) is_sentence = False else: image_path = self.images[idx] image = cv2.imread(image_path) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) json_path = image_path.replace(".jpg", ".json")
mask_json, sampled_sents, is_sentence = get_mask_from_json(json_path, image)
5
2023-12-15 14:58:24+00:00
24k
sinoyou/nelf-pro
nerfstudio/data/utils/dataloaders.py
[ { "identifier": "Cameras", "path": "nerfstudio/cameras/cameras.py", "snippet": "class Cameras(TensorDataclass):\n \"\"\"Dataparser outputs for the image dataset and the ray generator.\n\n Note: currently only supports cameras with the same principal points and types. The reason we type\n the focal lengths, principal points, and image sizes as tensors is to allow for batched cameras\n down the line in cases where your batches of camera data don't come from the same cameras.\n\n If a single value is provided, it is broadcasted to all cameras.\n\n Args:\n camera_to_worlds: Camera to world matrices. Tensor of per-image c2w matrices, in [R | t] format\n fx: Focal length x\n fy: Focal length y\n cx: Principal point x\n cy: Principal point y\n width: Image width\n height: Image height\n distortion_params: OpenCV 6 radial distortion coefficients\n camera_type: Type of camera model. This will be an int corresponding to the CameraType enum.\n times: Timestamps for each camera\n probe_config: dict config containing the generated probe information (core and basis)\n \"\"\"\n\n camera_to_worlds: TensorType[\"num_cameras\":..., 3, 4]\n fx: TensorType[\"num_cameras\":..., 1]\n fy: TensorType[\"num_cameras\":..., 1]\n cx: TensorType[\"num_cameras\":..., 1]\n cy: TensorType[\"num_cameras\":..., 1]\n width: TensorType[\"num_cameras\":..., 1]\n height: TensorType[\"num_cameras\":..., 1]\n distortion_params: Optional[TensorType[\"num_cameras\":..., 6]]\n camera_type: TensorType[\"num_cameras\":..., 1]\n times: Optional[TensorType[\"num_cameras\":..., 1]]\n image_filenames: Optional[List[str]]\n probe_config: Optional[list]\n\n def __init__(\n self,\n camera_to_worlds: TensorType[\"batch_c2ws\":..., 3, 4],\n fx: Union[TensorType[\"batch_fxs\":..., 1], float],\n fy: Union[TensorType[\"batch_fys\":..., 1], float],\n cx: Union[TensorType[\"batch_cxs\":..., 1], float],\n cy: Union[TensorType[\"batch_cys\":..., 1], float],\n width: Optional[Union[TensorType[\"batch_ws\":..., 1], int]] = None,\n height: Optional[Union[TensorType[\"batch_hs\":..., 1], int]] = None,\n distortion_params: Optional[TensorType[\"batch_dist_params\":..., 6]] = None,\n camera_type: Optional[\n Union[\n TensorType[\"batch_cam_types\":..., 1],\n int,\n List[CameraType],\n CameraType,\n ]\n ] = CameraType.PERSPECTIVE,\n times: Optional[TensorType[\"num_cameras\"]] = None,\n image_filenames: Optional[List[str]] = None,\n probe_config: Optional[list] = None\n ):\n \"\"\"Initializes the Cameras object.\n\n Note on Input Tensor Dimensions: All of these tensors have items of dimensions TensorType[3, 4]\n (in the case of the c2w matrices), TensorType[6] (in the case of distortion params), or\n TensorType[1] (in the case of the rest of the elements). The dimensions before that are\n considered the batch dimension of that tensor (batch_c2ws, batch_fxs, etc.). We will broadcast\n all the tensors to be the same batch dimension. This means you can use any combination of the\n input types in the function signature and it won't break. Your batch size for all tensors\n must be broadcastable to the same size, and the resulting number of batch dimensions will be\n the batch dimension with the largest number of dimensions.\n \"\"\"\n\n # This will notify the tensordataclass that we have a field with more than 1 dimension\n self._field_custom_dimensions = {\"camera_to_worlds\": 2}\n\n self.camera_to_worlds = camera_to_worlds\n\n # fx fy calculation\n self.fx = self._init_get_fc_xy(fx, \"fx\") # @dataclass's post_init will take care of broadcasting\n self.fy = self._init_get_fc_xy(fy, \"fy\") # @dataclass's post_init will take care of broadcasting\n\n # cx cy calculation\n self.cx = self._init_get_fc_xy(cx, \"cx\") # @dataclass's post_init will take care of broadcasting\n self.cy = self._init_get_fc_xy(cy, \"cy\") # @dataclass's post_init will take care of broadcasting\n\n # Distortion Params Calculation:\n self.distortion_params = distortion_params # @dataclass's post_init will take care of broadcasting\n\n # @dataclass's post_init will take care of broadcasting\n self.height = self._init_get_height_width(height, self.cy)\n self.width = self._init_get_height_width(width, self.cx)\n self.camera_type = self._init_get_camera_type(camera_type)\n self.times = self._init_get_times(times)\n \n self.image_filenames = image_filenames\n self.probe_config = probe_config\n if self.probe_config is not None:\n self.probe = Probes(self.camera_to_worlds, self.probe_config)\n else:\n self.probe = None\n \n self.__post_init__() # This will do the dataclass post_init and broadcast all the tensors\n\n def _init_get_fc_xy(self, fc_xy, name):\n \"\"\"\n Parses the input focal length / principle point x or y and returns a tensor of the correct shape\n\n Only needs to make sure that we a 1 in the last dimension if it is a tensor. If it is a float, we\n just need to make it into a tensor and it will be broadcasted later in the __post_init__ function.\n\n Args:\n fc_xy: The focal length / principle point x or y\n name: The name of the variable. Used for error messages\n \"\"\"\n if isinstance(fc_xy, float):\n fc_xy = torch.Tensor([fc_xy], device=self.device)\n elif isinstance(fc_xy, torch.Tensor):\n if fc_xy.ndim == 0 or fc_xy.shape[-1] != 1:\n fc_xy = fc_xy.unsqueeze(-1)\n fc_xy = fc_xy.to(self.device)\n else:\n raise ValueError(f\"{name} must be a float or tensor, got {type(fc_xy)}\")\n return fc_xy\n\n def _init_get_camera_type(\n self,\n camera_type: Union[\n TensorType[\"batch_cam_types\":..., 1], TensorType[\"batch_cam_types\":...], int, List[CameraType], CameraType\n ],\n ) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"\n Parses the __init__() argument camera_type\n\n Camera Type Calculation:\n If CameraType, convert to int and then to tensor, then broadcast to all cameras\n If List of CameraTypes, convert to ints and then to tensor, then broadcast to all cameras\n If int, first go to tensor and then broadcast to all cameras\n If tensor, broadcast to all cameras\n\n Args:\n camera_type: camera_type argument from __init__()\n \"\"\"\n if isinstance(camera_type, CameraType):\n camera_type = torch.tensor([camera_type.value], device=self.device)\n elif isinstance(camera_type, List) and isinstance(camera_type[0], CameraType):\n camera_type = torch.tensor([[c.value] for c in camera_type], device=self.device)\n elif isinstance(camera_type, int):\n camera_type = torch.tensor([camera_type], device=self.device)\n elif isinstance(camera_type, torch.Tensor):\n assert not torch.is_floating_point(\n camera_type\n ), f\"camera_type tensor must be of type int, not: {camera_type.dtype}\"\n camera_type = camera_type.to(self.device)\n if camera_type.ndim == 0 or camera_type.shape[-1] != 1:\n camera_type = camera_type.unsqueeze(-1)\n # assert torch.all(\n # camera_type.view(-1)[0] == camera_type\n # ), \"Batched cameras of different camera_types will be allowed in the future.\"\n else:\n raise ValueError(\n 'Invalid camera_type. Must be CameraType, List[CameraType], int, or torch.Tensor[\"num_cameras\"]. \\\n Received: '\n + str(type(camera_type))\n )\n return camera_type\n\n def _init_get_height_width(\n self,\n h_w: Union[TensorType[\"batch_hws\":..., 1], TensorType[\"batch_hws\":...], int, None],\n c_x_y: TensorType[\"batch_cxys\":...],\n ) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"\n Parses the __init__() argument for height or width\n\n Height/Width Calculation:\n If int, first go to tensor and then broadcast to all cameras\n If tensor, broadcast to all cameras\n If none, use cx or cy * 2\n Else raise error\n\n Args:\n h_w: height or width argument from __init__()\n c_x_y: cx or cy for when h_w == None\n \"\"\"\n if isinstance(h_w, int):\n h_w = torch.Tensor([h_w]).to(torch.int64).to(self.device)\n elif isinstance(h_w, torch.Tensor):\n assert not torch.is_floating_point(h_w), f\"height and width tensor must be of type int, not: {h_w.dtype}\"\n h_w = h_w.to(torch.int64).to(self.device)\n if h_w.ndim == 0 or h_w.shape[-1] != 1:\n h_w = h_w.unsqueeze(-1)\n # assert torch.all(h_w == h_w.view(-1)[0]), \"Batched cameras of different h, w will be allowed in the future.\"\n elif h_w is None:\n h_w = torch.Tensor((c_x_y * 2).to(torch.int64).to(self.device))\n else:\n raise ValueError(\"Height must be an int, tensor, or None, received: \" + str(type(h_w)))\n return h_w\n\n def _init_get_times(self, times):\n if times is None:\n times = None\n elif isinstance(times, torch.Tensor):\n if times.ndim == 0 or times.shape[-1] != 1:\n times = times.unsqueeze(-1).to(self.device)\n else:\n raise ValueError(f\"times must be None or a tensor, got {type(times)}\")\n\n return times\n\n @property\n def device(self):\n \"\"\"Returns the device that the camera is on.\"\"\"\n return self.camera_to_worlds.device\n\n @property\n def image_height(self) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"Returns the height of the images.\"\"\"\n return self.height\n\n @property\n def image_width(self) -> TensorType[\"num_cameras\":..., 1]:\n \"\"\"Returns the height of the images.\"\"\"\n return self.width\n\n @property\n def is_jagged(self):\n \"\"\"\n Returns whether or not the cameras are \"jagged\" (i.e. the height and widths are different, meaning that\n you cannot concatenate the image coordinate maps together)\n \"\"\"\n h_jagged = not torch.all(self.height == self.height.view(-1)[0])\n w_jagged = not torch.all(self.width == self.width.view(-1)[0])\n return h_jagged or w_jagged\n\n def get_image_coords(\n self, pixel_offset: float = 0.5, index: Optional[Tuple] = None\n ) -> TensorType[\"height\", \"width\", 2]:\n \"\"\"This gets the image coordinates of one of the cameras in this object.\n\n If no index is specified, it will return the maximum possible sized height / width image coordinate map,\n by looking at the maximum height and width of all the cameras in this object.\n\n Args:\n pixel_offset: Offset for each pixel. Defaults to center of pixel (0.5)\n index: Tuple of indices into the batch dimensions of the camera. Defaults to None, which returns the 0th\n flattened camera\n\n Returns:\n Grid of image coordinates.\n \"\"\"\n if index is None:\n image_height = torch.max(self.image_height.view(-1))\n image_width = torch.max(self.image_width.view(-1))\n image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing=\"ij\")\n image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates\n else:\n image_height = self.image_height[index].item()\n image_width = self.image_width[index].item()\n image_coords = torch.meshgrid(torch.arange(image_height), torch.arange(image_width), indexing=\"ij\")\n image_coords = torch.stack(image_coords, dim=-1) + pixel_offset # stored as (y, x) coordinates\n return image_coords\n\n def generate_rays( # pylint: disable=too-many-statements\n self,\n camera_indices: Union[TensorType[\"num_rays\":..., \"num_cameras_batch_dims\"], int],\n coords: Optional[TensorType[\"num_rays\":..., 2]] = None,\n camera_opt_to_camera: Optional[TensorType[\"num_rays\":..., 3, 4]] = None,\n distortion_params_delta: Optional[TensorType[\"num_rays\":..., 6]] = None,\n keep_shape: Optional[bool] = None,\n disable_distortion: bool = False,\n ) -> RayBundle:\n \"\"\"Generates rays for the given camera indices.\n\n This function will standardize the input arguments and then call the _generate_rays_from_coords function\n to generate the rays. Our goal is to parse the arguments and then get them into the right shape:\n - camera_indices: (num_rays:..., num_cameras_batch_dims)\n - coords: (num_rays:..., 2)\n - camera_opt_to_camera: (num_rays:..., 3, 4) or None\n - distortion_params_delta: (num_rays:..., 6) or None\n\n Read the docstring for _generate_rays_from_coords for more information on how we generate the rays\n after we have standardized the arguments.\n\n We are only concerned about different combinations of camera_indices and coords matrices, and the following\n are the 4 cases we have to deal with:\n 1. isinstance(camera_indices, int) and coords == None\n - In this case we broadcast our camera_indices / coords shape (h, w, 1 / 2 respectively)\n 2. isinstance(camera_indices, int) and coords != None\n - In this case, we broadcast camera_indices to the same batch dim as coords\n 3. not isinstance(camera_indices, int) and coords == None\n - In this case, we will need to set coords so that it is of shape (h, w, num_rays, 2), and broadcast\n all our other args to match the new definition of num_rays := (h, w) + num_rays\n 4. not isinstance(camera_indices, int) and coords != None\n - In this case, we have nothing to do, only check that the arguments are of the correct shape\n\n There is one more edge case we need to be careful with: when we have \"jagged cameras\" (ie: different heights\n and widths for each camera). This isn't problematic when we specify coords, since coords is already a tensor.\n When coords == None (ie: when we render out the whole image associated with this camera), we run into problems\n since there's no way to stack each coordinate map as all coordinate maps are all different shapes. In this case,\n we will need to flatten each individual coordinate map and concatenate them, giving us only one batch dimension,\n regaurdless of the number of prepended extra batch dimensions in the camera_indices tensor.\n\n\n Args:\n camera_indices: Camera indices of the flattened cameras object to generate rays for.\n coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered.\n camera_opt_to_camera: Optional transform for the camera to world matrices.\n distortion_params_delta: Optional delta for the distortion parameters.\n keep_shape: If None, then we default to the regular behavior of flattening if cameras is jagged, otherwise\n keeping dimensions. If False, we flatten at the end. If True, then we keep the shape of the\n camera_indices and coords tensors (if we can).\n disable_distortion: If True, disables distortion.\n\n Returns:\n Rays for the given camera indices and coords.\n \"\"\"\n # Check the argument types to make sure they're valid and all shaped correctly\n assert isinstance(camera_indices, (torch.Tensor, int)), \"camera_indices must be a tensor or int\"\n assert coords is None or isinstance(coords, torch.Tensor), \"coords must be a tensor or None\"\n assert camera_opt_to_camera is None or isinstance(camera_opt_to_camera, torch.Tensor)\n assert distortion_params_delta is None or isinstance(distortion_params_delta, torch.Tensor)\n if isinstance(camera_indices, torch.Tensor) and isinstance(coords, torch.Tensor):\n num_rays_shape = camera_indices.shape[:-1]\n errormsg = \"Batch dims of inputs must match when inputs are all tensors\"\n assert coords.shape[:-1] == num_rays_shape, errormsg\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == num_rays_shape, errormsg\n assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == num_rays_shape, errormsg\n\n # If zero dimensional, we need to unsqueeze to get a batch dimension and then squeeze later\n if not self.shape:\n cameras = self.reshape((1,))\n assert torch.all(\n torch.tensor(camera_indices == 0) if isinstance(camera_indices, int) else camera_indices == 0\n ), \"Can only index into single camera with no batch dimensions if index is zero\"\n else:\n cameras = self\n\n # If the camera indices are an int, then we need to make sure that the camera batch is 1D\n if isinstance(camera_indices, int):\n assert (\n len(cameras.shape) == 1\n ), \"camera_indices must be a tensor if cameras are batched with more than 1 batch dimension\"\n camera_indices = torch.tensor([camera_indices], device=cameras.device)\n\n assert camera_indices.shape[-1] == len(\n cameras.shape\n ), \"camera_indices must have shape (num_rays:..., num_cameras_batch_dims)\"\n\n # If keep_shape is True, then we need to make sure that the camera indices in question\n # are all the same height and width and can actually be batched while maintaining the image\n # shape\n if keep_shape is True:\n assert torch.all(cameras.height[camera_indices] == cameras.height[camera_indices[0]]) and torch.all(\n cameras.width[camera_indices] == cameras.width[camera_indices[0]]\n ), \"Can only keep shape if all cameras have the same height and width\"\n\n # If the cameras don't all have same height / width, if coords is not none, we will need to generate\n # a flat list of coords for each camera and then concatenate otherwise our rays will be jagged.\n # Camera indices, camera_opt, and distortion will also need to be broadcasted accordingly which is non-trivial\n if cameras.is_jagged and coords is None and (keep_shape is None or keep_shape is False):\n index_dim = camera_indices.shape[-1]\n camera_indices = camera_indices.reshape(-1, index_dim)\n _coords = [cameras.get_image_coords(index=tuple(index)).reshape(-1, 2) for index in camera_indices]\n camera_indices = torch.cat(\n [index.unsqueeze(0).repeat(coords.shape[0], 1) for index, coords in zip(camera_indices, _coords)],\n )\n coords = torch.cat(_coords, dim=0)\n assert coords.shape[0] == camera_indices.shape[0]\n # Need to get the coords of each indexed camera and flatten all coordinate maps and concatenate them\n\n # The case where we aren't jagged && keep_shape (since otherwise coords is already set) and coords\n # is None. In this case we append (h, w) to the num_rays dimensions for all tensors. In this case,\n # each image in camera_indices has to have the same shape since otherwise we would have error'd when\n # we checked keep_shape is valid or we aren't jagged.\n if coords is None:\n index_dim = camera_indices.shape[-1]\n index = camera_indices.reshape(-1, index_dim)[0]\n coords: torch.Tensor = cameras.get_image_coords(index=tuple(index)) # (h, w, 2)\n coords = coords.reshape(coords.shape[:2] + (1,) * len(camera_indices.shape[:-1]) + (2,)) # (h, w, 1..., 2)\n coords = coords.expand(coords.shape[:2] + camera_indices.shape[:-1] + (2,)) # (h, w, num_rays, 2)\n camera_opt_to_camera = ( # (h, w, num_rays, 3, 4) or None\n camera_opt_to_camera.broadcast_to(coords.shape[:-1] + (3, 4))\n if camera_opt_to_camera is not None\n else None\n )\n distortion_params_delta = ( # (h, w, num_rays, 6) or None\n distortion_params_delta.broadcast_to(coords.shape[:-1] + (6,))\n if distortion_params_delta is not None\n else None\n )\n\n # If camera indices was an int or coords was none, we need to broadcast our indices along batch dims\n camera_indices = camera_indices.broadcast_to(coords.shape[:-1] + (len(cameras.shape),)).to(torch.long)\n\n # Checking our tensors have been standardized\n assert isinstance(coords, torch.Tensor) and isinstance(camera_indices, torch.Tensor)\n assert camera_indices.shape[-1] == len(cameras.shape)\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape[:-2] == coords.shape[:-1]\n assert distortion_params_delta is None or distortion_params_delta.shape[:-1] == coords.shape[:-1]\n\n # This will do the actual work of generating the rays now that we have standardized the inputs\n # raybundle.shape == (num_rays) when done\n # pylint: disable=protected-access\n raybundle = cameras._generate_rays_from_coords(\n camera_indices, coords, camera_opt_to_camera, distortion_params_delta, disable_distortion=disable_distortion\n )\n\n # If we have mandated that we don't keep the shape, then we flatten\n if keep_shape is False:\n raybundle = raybundle.flatten()\n\n # TODO: We should have to squeeze the last dimension here if we started with zero batch dims, but never have to,\n # so there might be a rogue squeeze happening somewhere, and this may cause some unintended behaviour\n # that we haven't caught yet with tests\n return raybundle\n\n # pylint: disable=too-many-statements\n def _generate_rays_from_coords(\n self,\n camera_indices: TensorType[\"num_rays\":..., \"num_cameras_batch_dims\"],\n coords: TensorType[\"num_rays\":..., 2],\n camera_opt_to_camera: Optional[TensorType[\"num_rays\":..., 3, 4]] = None,\n distortion_params_delta: Optional[TensorType[\"num_rays\":..., 6]] = None,\n disable_distortion: bool = False,\n ) -> RayBundle:\n \"\"\"Generates rays for the given camera indices and coords where self isn't jagged\n\n This is a fairly complex function, so let's break this down slowly.\n\n Shapes involved:\n - num_rays: This is your output raybundle shape. It dictates the number and shape of the rays generated\n - num_cameras_batch_dims: This is the number of dimensions of our camera\n\n Args:\n camera_indices: Camera indices of the flattened cameras object to generate rays for.\n The shape of this is such that indexing into camera_indices[\"num_rays\":...] will return the\n index into each batch dimension of the camera in order to get the correct camera specified by\n \"num_rays\".\n Example:\n >>> cameras = Cameras(...)\n >>> cameras.shape\n (2, 3, 4)\n >>> camera_indices = torch.tensor([0, 0, 0]) # We need an axis of length 3 since cameras.ndim == 3\n >>> camera_indices.shape\n (3,)\n >>> coords = torch.tensor([1,1])\n >>> coords.shape\n (2,)\n >>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)\n # This will generate a RayBundle with a single ray for the\n # camera at cameras[0,0,0] at image coordinates (1,1), so out_rays.shape == ()\n >>> out_rays.shape\n ()\n >>> camera_indices = torch.tensor([[0,0,0]])\n >>> camera_indices.shape\n (1, 3)\n >>> coords = torch.tensor([[1,1]])\n >>> coords.shape\n (1, 2)\n >>> out_rays = cameras.generate_rays(camera_indices=camera_indices, coords = coords)\n # This will generate a RayBundle with a single ray for the\n # camera at cameras[0,0,0] at point (1,1), so out_rays.shape == (1,)\n # since we added an extra dimension in front of camera_indices\n >>> out_rays.shape\n (1,)\n\n If you want more examples, check tests/cameras/test_cameras and the function check_generate_rays_shape\n\n The bottom line is that for camera_indices: (num_rays:..., num_cameras_batch_dims), num_rays is the\n output shape and if you index into the output RayBundle with some indices [i:...], if you index into\n camera_indices with camera_indices[i:...] as well, you will get a 1D tensor containing the batch\n indices into the original cameras object corresponding to that ray (ie: you will get the camera\n from our batched cameras corresponding to the ray at RayBundle[i:...]).\n\n coords: Coordinates of the pixels to generate rays for. If None, the full image will be rendered, meaning\n height and width get prepended to the num_rays dimensions. Indexing into coords with [i:...] will\n get you the image coordinates [x, y] of that specific ray located at output RayBundle[i:...].\n\n camera_opt_to_camera: Optional transform for the camera to world matrices.\n In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you\n the 2D camera to world transform matrix for the camera optimization at RayBundle[i:...].\n\n distortion_params_delta: Optional delta for the distortion parameters.\n In terms of shape, it follows the same rules as coords, but indexing into it with [i:...] gets you\n the 1D tensor with the 6 distortion parameters for the camera optimization at RayBundle[i:...].\n\n disable_distortion: If True, disables distortion.\n\n Returns:\n Rays for the given camera indices and coords. RayBundle.shape == num_rays\n \"\"\"\n # Make sure we're on the right devices\n camera_indices = camera_indices.to(self.device)\n coords = coords.to(self.device)\n\n # Checking to make sure everything is of the right shape and type\n num_rays_shape = camera_indices.shape[:-1]\n assert camera_indices.shape == num_rays_shape + (self.ndim,)\n assert coords.shape == num_rays_shape + (2,)\n assert coords.shape[-1] == 2\n assert camera_opt_to_camera is None or camera_opt_to_camera.shape == num_rays_shape + (3, 4)\n assert distortion_params_delta is None or distortion_params_delta.shape == num_rays_shape + (6,)\n\n # Here, we've broken our indices down along the num_cameras_batch_dims dimension allowing us to index by all\n # of our output rays at each dimension of our cameras object\n true_indices = [camera_indices[..., i] for i in range(camera_indices.shape[-1])]\n\n # Get all our focal lengths, principal points and make sure they are the right shapes\n y = coords[..., 0] # (num_rays,) get rid of the last dimension\n x = coords[..., 1] # (num_rays,) get rid of the last dimension\n fx, fy = self.fx[true_indices].squeeze(-1), self.fy[true_indices].squeeze(-1) # (num_rays,)\n cx, cy = self.cx[true_indices].squeeze(-1), self.cy[true_indices].squeeze(-1) # (num_rays,)\n assert (\n y.shape == num_rays_shape\n and x.shape == num_rays_shape\n and fx.shape == num_rays_shape\n and fy.shape == num_rays_shape\n and cx.shape == num_rays_shape\n and cy.shape == num_rays_shape\n ), (\n str(num_rays_shape)\n + str(y.shape)\n + str(x.shape)\n + str(fx.shape)\n + str(fy.shape)\n + str(cx.shape)\n + str(cy.shape)\n )\n\n # Get our image coordinates and image coordinates offset by 1 (offsets used for dx, dy calculations)\n # Also make sure the shapes are correct\n coord = torch.stack([(x - cx) / fx, -(y - cy) / fy], -1) # (num_rays, 2)\n coord_x_offset = torch.stack([(x - cx + 1) / fx, -(y - cy) / fy], -1) # (num_rays, 2)\n coord_y_offset = torch.stack([(x - cx) / fx, -(y - cy + 1) / fy], -1) # (num_rays, 2)\n assert (\n coord.shape == num_rays_shape + (2,)\n and coord_x_offset.shape == num_rays_shape + (2,)\n and coord_y_offset.shape == num_rays_shape + (2,)\n )\n\n # Stack image coordinates and image coordinates offset by 1, check shapes too\n coord_stack = torch.stack([coord, coord_x_offset, coord_y_offset], dim=0) # (3, num_rays, 2)\n assert coord_stack.shape == (3,) + num_rays_shape + (2,)\n\n # Undistorts our images according to our distortion parameters\n if not disable_distortion:\n distortion_params = None\n if self.distortion_params is not None:\n distortion_params = self.distortion_params[true_indices]\n if distortion_params_delta is not None:\n distortion_params = distortion_params + distortion_params_delta\n elif distortion_params_delta is not None:\n distortion_params = distortion_params_delta\n\n # Do not apply distortion for equirectangular images\n if distortion_params is not None:\n mask = (self.camera_type[true_indices] != CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)\n coord_mask = torch.stack([mask, mask, mask], dim=0)\n if mask.any():\n coord_stack[coord_mask, :] = camera_utils.radial_and_tangential_undistort(\n coord_stack[coord_mask, :].reshape(3, -1, 2),\n distortion_params[mask, :],\n ).reshape(-1, 2)\n\n # Make sure after we have undistorted our images, the shapes are still correct\n assert coord_stack.shape == (3,) + num_rays_shape + (2,)\n\n # Gets our directions for all our rays in camera coordinates and checks shapes at the end\n # Here, directions_stack is of shape (3, num_rays, 3)\n # directions_stack[0] is the direction for ray in camera coordinates\n # directions_stack[1] is the direction for ray in camera coordinates offset by 1 in x\n # directions_stack[2] is the direction for ray in camera coordinates offset by 1 in y\n cam_types = torch.unique(self.camera_type, sorted=False)\n directions_stack = torch.empty((3,) + num_rays_shape + (3,), device=self.device)\n if CameraType.PERSPECTIVE.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.PERSPECTIVE.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0], mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1], mask).float()\n directions_stack[..., 2][mask] = -1.0\n\n if CameraType.FISHEYE.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.FISHEYE.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n\n theta = torch.sqrt(torch.sum(coord_stack**2, dim=-1))\n theta = torch.clip(theta, 0.0, math.pi)\n\n sin_theta = torch.sin(theta)\n\n directions_stack[..., 0][mask] = torch.masked_select(coord_stack[..., 0] * sin_theta / theta, mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(coord_stack[..., 1] * sin_theta / theta, mask).float()\n directions_stack[..., 2][mask] = -torch.masked_select(torch.cos(theta), mask)\n\n if CameraType.EQUIRECTANGULAR.value in cam_types:\n mask = (self.camera_type[true_indices] == CameraType.EQUIRECTANGULAR.value).squeeze(-1) # (num_rays)\n mask = torch.stack([mask, mask, mask], dim=0)\n\n # For equirect, fx = fy = height = width/2\n # Then coord[..., 0] goes from -1 to 1 and coord[..., 1] goes from -1/2 to 1/2\n theta = -torch.pi * coord_stack[..., 0] # minus sign for right-handed\n phi = torch.pi * (0.5 - coord_stack[..., 1])\n # use spherical in local camera coordinates (+y up, x=0 and z<0 is theta=0)\n directions_stack[..., 0][mask] = torch.masked_select(-torch.sin(theta) * torch.sin(phi), mask).float()\n directions_stack[..., 1][mask] = torch.masked_select(torch.cos(phi), mask).float()\n directions_stack[..., 2][mask] = torch.masked_select(-torch.cos(theta) * torch.sin(phi), mask).float()\n\n for value in cam_types:\n if value not in [CameraType.PERSPECTIVE.value, CameraType.FISHEYE.value, CameraType.EQUIRECTANGULAR.value]:\n raise ValueError(f\"Camera type {value} not supported.\")\n\n assert directions_stack.shape == (3,) + num_rays_shape + (3,)\n\n c2w = self.camera_to_worlds[true_indices]\n assert c2w.shape == num_rays_shape + (3, 4)\n\n if camera_opt_to_camera is not None:\n c2w = pose_utils.multiply(c2w, camera_opt_to_camera)\n rotation = c2w[..., :3, :3] # (..., 3, 3)\n assert rotation.shape == num_rays_shape + (3, 3)\n\n directions_stack = torch.sum(\n directions_stack[..., None, :] * rotation, dim=-1\n ) # (..., 1, 3) * (..., 3, 3) -> (..., 3)\n\n directions_norm = torch.norm(directions_stack, dim=-1, keepdim=True)\n directions_norm = directions_norm[0]\n\n directions_stack = normalize(directions_stack, dim=-1)\n assert directions_stack.shape == (3,) + num_rays_shape + (3,)\n\n origins = c2w[..., :3, 3] # (..., 3)\n assert origins.shape == num_rays_shape + (3,)\n\n directions = directions_stack[0]\n assert directions.shape == num_rays_shape + (3,)\n\n # norms of the vector going between adjacent coords, giving us dx and dy per output ray\n dx = torch.sqrt(torch.sum((directions - directions_stack[1]) ** 2, dim=-1)) # (\"num_rays\":...,)\n dy = torch.sqrt(torch.sum((directions - directions_stack[2]) ** 2, dim=-1)) # (\"num_rays\":...,)\n assert dx.shape == num_rays_shape and dy.shape == num_rays_shape\n\n pixel_area = (dx * dy)[..., None] # (\"num_rays\":..., 1)\n assert pixel_area.shape == num_rays_shape + (1,)\n\n times = self.times[camera_indices, 0] if self.times is not None else None\n\n\n return RayBundle(\n origins=origins,\n directions=directions,\n pixel_area=pixel_area,\n camera_indices=camera_indices,\n directions_norm=directions_norm,\n times=times,\n probes=self.probe,\n )\n\n def to_json(\n self, camera_idx: int, image: Optional[TensorType[\"height\", \"width\", 2]] = None, max_size: Optional[int] = None\n ) -> Dict:\n \"\"\"Convert a camera to a json dictionary.\n\n Args:\n camera_idx: Index of the camera to convert.\n image: An image in range [0, 1] that is encoded to a base64 string.\n max_size: Max size to resize the image to if present.\n\n Returns:\n A JSON representation of the camera\n \"\"\"\n flattened = self.flatten()\n json_ = {\n \"type\": \"PinholeCamera\",\n \"cx\": flattened[camera_idx].cx.item(),\n \"cy\": flattened[camera_idx].cy.item(),\n \"fx\": flattened[camera_idx].fx.item(),\n \"fy\": flattened[camera_idx].fy.item(),\n \"camera_to_world\": self.camera_to_worlds[camera_idx].tolist(),\n \"camera_index\": camera_idx,\n \"times\": flattened[camera_idx].times.item() if self.times is not None else None,\n }\n if image is not None:\n image_uint8 = (image * 255).detach().type(torch.uint8)\n if max_size is not None:\n image_uint8 = image_uint8.permute(2, 0, 1)\n image_uint8 = torchvision.transforms.functional.resize(image_uint8, max_size) # type: ignore\n image_uint8 = image_uint8.permute(1, 2, 0)\n image_uint8 = image_uint8.cpu().numpy()\n data = cv2.imencode(\".jpg\", image_uint8)[1].tobytes()\n json_[\"image\"] = str(\"data:image/jpeg;base64,\" + base64.b64encode(data).decode(\"ascii\"))\n return json_\n\n def get_intrinsics_matrices(self) -> TensorType[\"num_cameras\":..., 3, 3]:\n \"\"\"Returns the intrinsic matrices for each camera.\n\n Returns:\n Pinhole camera intrinsics matrices\n \"\"\"\n K = torch.zeros((*self.shape, 3, 3), dtype=torch.float32)\n K[..., 0, 0] = self.fx.squeeze(-1)\n K[..., 1, 1] = self.fy.squeeze(-1)\n K[..., 0, 2] = self.cx.squeeze(-1)\n K[..., 1, 2] = self.cy.squeeze(-1)\n K[..., 2, 2] = 1.0\n return K\n\n def rescale_output_resolution(\n self,\n scaling_factor: Union[TensorType[\"num_cameras\":...], TensorType[\"num_cameras\":..., 1], float, int],\n round_hw=False,\n ) -> None:\n \"\"\"Rescale the output resolution of the cameras.\n\n Args:\n scaling_factor: Scaling factor to apply to the output resolution.\n round_hw: Whether to round the height and width to the nearest integer.\n \"\"\"\n if isinstance(scaling_factor, (float, int)):\n scaling_factor = torch.tensor([scaling_factor]).to(self.device).broadcast_to((self.cx.shape))\n elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == self.shape:\n scaling_factor = scaling_factor.unsqueeze(-1)\n elif isinstance(scaling_factor, torch.Tensor) and scaling_factor.shape == (*self.shape, 1):\n pass\n else:\n raise ValueError(\n f\"Scaling factor must be a float, int, or a tensor of shape {self.shape} or {(*self.shape, 1)}.\"\n )\n\n self.fx = self.fx * scaling_factor\n self.fy = self.fy * scaling_factor\n self.cx = self.cx * scaling_factor\n self.cy = self.cy * scaling_factor\n if not round_hw:\n self.height = (self.height * scaling_factor).to(torch.int64)\n self.width = (self.width * scaling_factor).to(torch.int64)\n else:\n self.height = torch.floor(self.height * scaling_factor + 0.5).to(torch.int64)\n self.width = torch.floor(self.width * scaling_factor + 0.5).to(torch.int64)\n\n def get_plotly(self, camera_group):\n\n # define local necssary coordinates for plotting\n num_cameras = self.camera_to_worlds.shape[0]\n _cam_center_c = np.array([[.0, .0, .0]]).repeat(num_cameras, axis=0)\n _cam_forward_c = np.array([[.0, .0, -1.0]]).repeat(num_cameras, axis=0)\n _cam_up_c = np.array([[.0, 1.0, .0]]).repeat(num_cameras, axis=0)\n _cam_right_c = np.array([[1.0, .0, .0]]).repeat(num_cameras, axis=0)\n\n _pyramid_width = self.width.cpu().numpy() / self.fx.cpu().numpy()\n _pyramid_height = self.height.cpu().numpy() / self.fy.cpu().numpy()\n\n _cam_pyramid_ur = np.concatenate([_pyramid_width/2, _pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_dr = np.concatenate([_pyramid_width/2, -_pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_ul = np.concatenate([-_pyramid_width/2, _pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n _cam_pyramid_dl = np.concatenate([-_pyramid_width/2, -_pyramid_height/2, -np.ones_like(_pyramid_width)], axis=-1)\n\n _local_coordinates = {\n 'center': _cam_center_c, \n 'forward': _cam_forward_c, \n 'up': _cam_up_c, \n 'right': _cam_right_c, \n 'pyramid_ur': _cam_pyramid_ur, \n 'pyramid_dr': _cam_pyramid_dr, \n 'pyramid_ul': _cam_pyramid_ul, \n 'pyramid_dl': _cam_pyramid_dl, \n }\n\n # transform it into world coordinates\n data = {}\n for k in _local_coordinates.keys():\n _local_coor_homo = np.concatenate([_local_coordinates[k].reshape(-1, 3) * plotly_camera_scale, np.ones((num_cameras, 1))], axis=-1) # num_cam, 4\n _cw = self.camera_to_worlds.cpu().numpy() # num_cam, 3, 4\n\n _homo = np.einsum('ijk,ik->ij', _cw, _local_coor_homo) # num_cam, 3\n data[k] = _homo[:, :3]\n\n plot_data = plot_camera_components(data, image_list=self.image_filenames, camera_group=camera_group)\n \n if isinstance(plot_data, list):\n return plot_data\n else:\n return [plot_data]" }, { "identifier": "RayBundle", "path": "nerfstudio/cameras/rays.py", "snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions: TensorType[..., 3]\n \"\"\"Unit ray direction vector\"\"\"\n pixel_area: TensorType[..., 1]\n \"\"\"Projected area of pixel a distance 1 away from origin\"\"\"\n directions_norm: Optional[TensorType[..., 1]] = None\n \"\"\"Norm of ray direction vector before normalization\"\"\"\n camera_indices: Optional[TensorType[..., 1]] = None\n \"\"\"Camera indices\"\"\"\n nears: Optional[TensorType[..., 1]] = None\n \"\"\"Distance along ray to start sampling\"\"\"\n fars: Optional[TensorType[..., 1]] = None\n \"\"\"Rays Distance along ray to stop sampling\"\"\"\n metadata: Optional[Dict[str, TensorType[\"num_rays\", \"latent_dims\"]]] = None\n \"\"\"Additional metadata or data needed for interpolation, will mimic shape of rays\"\"\"\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n probes: Optional[Probes] = None\n \"\"\"Probe Cameras Object. This object doesn't follow the same shape pattern as the other fields. \n Lazy broadcasting is used for preventing CUDA memory overflow. \"\"\"\n\n def set_camera_indices(self, camera_index: int) -> None:\n \"\"\"Sets all of the the camera indices to a specific camera index.\n\n Args:\n camera_index: Camera index.\n \"\"\"\n self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index\n\n def __len__(self):\n num_rays = torch.numel(self.origins) // self.origins.shape[-1]\n return num_rays\n\n def sample(self, num_rays: int) -> \"RayBundle\":\n \"\"\"Returns a RayBundle as a subset of rays.\n\n Args:\n num_rays: Number of rays in output RayBundle\n\n Returns:\n RayBundle with subset of rays.\n \"\"\"\n assert num_rays <= len(self)\n indices = random.sample(range(len(self)), k=num_rays)\n return self[indices]\n\n def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> \"RayBundle\":\n \"\"\"Flattens RayBundle and extracts chunk given start and end indicies.\n\n Args:\n start_idx: Start index of RayBundle chunk.\n end_idx: End index of RayBundle chunk.\n\n Returns:\n Flattened RayBundle with end_idx-start_idx rays.\n\n \"\"\"\n return self.flatten()[start_idx:end_idx]\n\n def get_ray_samples(\n self,\n bin_starts: TensorType[\"bs\":..., \"num_samples\", 1],\n bin_ends: TensorType[\"bs\":..., \"num_samples\", 1],\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_to_euclidean_fn: Optional[Callable] = None,\n ) -> RaySamples:\n \"\"\"Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.\n\n Args:\n bin_starts: Distance from origin to start of bin. (in Euclidean space)\n bin_ends: Distance from origin to end of bin. (in Euclidean space)\n spacing_starts: start point in normalized space. [0, 1]\n spacing_ends: end point in normalized space. [0, 1]\n\n Returns:\n Samples projected along ray.\n \"\"\"\n deltas = bin_ends - bin_starts\n if self.camera_indices is not None:\n camera_indices = self.camera_indices[..., None]\n else:\n camera_indices = None\n\n shaped_raybundle_fields = self[..., None]\n\n frustums = Frustums(\n origins=shaped_raybundle_fields.origins, # [..., 1, 3]\n directions=shaped_raybundle_fields.directions, # [..., 1, 3]\n starts=bin_starts, # [..., num_samples, 1]\n ends=bin_ends, # [..., num_samples, 1]\n pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]\n )\n\n ray_samples = RaySamples(\n frustums=frustums,\n camera_indices=camera_indices, # [..., 1, 1]\n deltas=deltas, # [..., num_samples, 1]\n spacing_starts=spacing_starts, # [..., num_samples, 1]\n spacing_ends=spacing_ends, # [..., num_samples, 1]\n spacing_to_euclidean_fn=spacing_to_euclidean_fn,\n metadata=shaped_raybundle_fields.metadata,\n times=None if self.times is None else self.times[..., None], # [..., 1, 1]\n probes=self.probes, # special class, not following the same shape pattern\n )\n\n return ray_samples" }, { "identifier": "InputDataset", "path": "nerfstudio/data/datasets/base_dataset.py", "snippet": "class InputDataset(Dataset):\n \"\"\"Dataset that returns images.\n\n Args:\n dataparser_outputs: description of where and how to read input images.\n scale_factor: The scaling factor for the dataparser outputs\n \"\"\"\n\n def __init__(self, dataparser_outputs: DataparserOutputs, scale_factor: float = 1.0):\n super().__init__()\n self._dataparser_outputs = dataparser_outputs\n self.has_masks = dataparser_outputs.mask_filenames is not None\n self.scale_factor = scale_factor\n self.scene_box = deepcopy(dataparser_outputs.scene_box)\n self.metadata = deepcopy(dataparser_outputs.metadata)\n self.cameras = deepcopy(dataparser_outputs.cameras)\n self.cameras.rescale_output_resolution(scaling_factor=scale_factor)\n self.image_cache = {}\n\n def __len__(self):\n return len(self._dataparser_outputs.image_filenames)\n\n def get_numpy_image(self, image_idx: int) -> npt.NDArray[np.uint8]:\n \"\"\"Returns the image of shape (H, W, 3 or 4).\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n image_filename = self._dataparser_outputs.image_filenames[image_idx]\n pil_image = Image.open(image_filename)\n if self.scale_factor != 1.0:\n width, height = pil_image.size\n newsize = (int(width * self.scale_factor), int(height * self.scale_factor))\n pil_image = pil_image.resize(newsize, resample=Image.BILINEAR)\n image = np.array(pil_image, dtype=\"uint8\") # shape is (h, w, 3 or 4)\n # mask_filename = str(image_filename).replace(\"dense/images\", \"masks\").replace(\".jpg\", \".npy\")\n # mask = np.load(mask_filename)\n # image = image * mask[..., None]\n\n assert len(image.shape) == 3\n assert image.dtype == np.uint8\n assert image.shape[2] in [3, 4], f\"Image shape of {image.shape} is in correct.\"\n return image\n\n def get_image(self, image_idx: int) -> TensorType[\"image_height\", \"image_width\", \"num_channels\"]:\n \"\"\"Returns a 3 channel image.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n image = torch.from_numpy(self.get_numpy_image(image_idx).astype(\"float32\") / 255.0)\n if self._dataparser_outputs.alpha_color is not None and image.shape[-1] == 4:\n assert image.shape[-1] == 4\n image = image[:, :, :3] * image[:, :, -1:] + self._dataparser_outputs.alpha_color * (1.0 - image[:, :, -1:])\n else:\n image = image[:, :, :3]\n return image\n\n def get_data(self, image_idx: int) -> Dict:\n \"\"\"Returns the ImageDataset data as a dictionary.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n if image_idx in self.image_cache:\n image = self.image_cache[image_idx]\n else:\n image = self.get_image(image_idx)\n self.image_cache[image_idx] = image\n\n data = {\"image_idx\": image_idx, 'image_filename': self._dataparser_outputs.image_filenames[image_idx].name}\n data[\"image\"] = image\n for _, data_func_dict in self._dataparser_outputs.additional_inputs.items():\n assert \"func\" in data_func_dict, \"Missing function to process data: specify `func` in `additional_inputs`\"\n func = data_func_dict[\"func\"]\n assert \"kwargs\" in data_func_dict, \"No data to process: specify `kwargs` in `additional_inputs`\"\n data.update(func(image_idx, **data_func_dict[\"kwargs\"]))\n if self.has_masks:\n mask_filepath = self._dataparser_outputs.mask_filenames[image_idx]\n data[\"mask\"] = get_image_mask_tensor_from_path(filepath=mask_filepath, scale_factor=self.scale_factor)\n metadata = self.get_metadata(data)\n data.update(metadata)\n return data\n\n # pylint: disable=no-self-use\n def get_metadata(self, data: Dict) -> Dict:\n \"\"\"Method that can be used to process any additional metadata that may be part of the model inputs.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n del data\n return {}\n\n def __getitem__(self, image_idx: int) -> Dict:\n data = self.get_data(image_idx)\n return data" }, { "identifier": "nerfstudio_collate", "path": "nerfstudio/data/utils/nerfstudio_collate.py", "snippet": "def nerfstudio_collate(\n batch, extra_mappings: Union[Dict[type, Callable], None] = None\n): # pylint: disable=too-many-return-statements\n r\"\"\"\n This is the default pytorch collate function, but with support for nerfstudio types. All documentation\n below is copied straight over from pytorch's default_collate function, python version 3.8.13,\n pytorch version '1.12.1+cu113'. Custom nerfstudio types are accounted for at the end, and extra\n mappings can be passed in to handle custom types. These mappings are from types: callable (types\n being like int or float or the return value of type(3.), etc). The only code before we parse for custom types that\n was changed from default pytorch was the addition of the extra_mappings argument, a find and replace operation\n from default_collate to nerfstudio_collate, and the addition of the nerfstudio_collate_err_msg_format variable.\n\n\n Function that takes in a batch of data and puts the elements within the batch\n into a tensor with an additional outer dimension - batch size. The exact output type can be\n a :class:`torch.Tensor`, a `Sequence` of :class:`torch.Tensor`, a\n Collection of :class:`torch.Tensor`, or left unchanged, depending on the input type.\n This is used as the default function for collation when\n `batch_size` or `batch_sampler` is defined in :class:`~torch.utils.data.DataLoader`.\n\n Here is the general input type (based on the type of the element within the batch) to output type mapping:\n\n * :class:`torch.Tensor` -> :class:`torch.Tensor` (with an added outer dimension batch size)\n * NumPy Arrays -> :class:`torch.Tensor`\n * `float` -> :class:`torch.Tensor`\n * `int` -> :class:`torch.Tensor`\n * `str` -> `str` (unchanged)\n * `bytes` -> `bytes` (unchanged)\n * `Mapping[K, V_i]` -> `Mapping[K, nerfstudio_collate([V_1, V_2, ...])]`\n * `NamedTuple[V1_i, V2_i, ...]` -> `NamedTuple[nerfstudio_collate([V1_1, V1_2, ...]),\n nerfstudio_collate([V2_1, V2_2, ...]), ...]`\n * `Sequence[V1_i, V2_i, ...]` -> `Sequence[nerfstudio_collate([V1_1, V1_2, ...]),\n nerfstudio_collate([V2_1, V2_2, ...]), ...]`\n\n Args:\n batch: a single batch to be collated\n\n Examples:\n >>> # Example with a batch of `int`s:\n >>> nerfstudio_collate([0, 1, 2, 3])\n tensor([0, 1, 2, 3])\n >>> # Example with a batch of `str`s:\n >>> nerfstudio_collate(['a', 'b', 'c'])\n ['a', 'b', 'c']\n >>> # Example with `Map` inside the batch:\n >>> nerfstudio_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}])\n {'A': tensor([ 0, 100]), 'B': tensor([ 1, 100])}\n >>> # Example with `NamedTuple` inside the batch:\n >>> Point = namedtuple('Point', ['x', 'y'])\n >>> nerfstudio_collate([Point(0, 0), Point(1, 1)])\n Point(x=tensor([0, 1]), y=tensor([0, 1]))\n >>> # Example with `Tuple` inside the batch:\n >>> nerfstudio_collate([(0, 1), (2, 3)])\n [tensor([0, 2]), tensor([1, 3])]\n >>> # Example with `List` inside the batch:\n >>> nerfstudio_collate([[0, 1], [2, 3]])\n [tensor([0, 2]), tensor([1, 3])]\n \"\"\"\n if extra_mappings is None:\n extra_mappings = {}\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, torch.Tensor): # pylint: disable=no-else-return\n out = None\n if torch.utils.data.get_worker_info() is not None:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum(x.numel() for x in batch)\n storage = elem.storage()._new_shared(numel, device=elem.device) # pylint: disable=protected-access\n out = elem.new(storage).resize_(len(batch), *list(elem.size()))\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == \"numpy\" and elem_type.__name__ != \"str_\" and elem_type.__name__ != \"string_\":\n # pylint: disable=no-else-return, consider-using-in\n if elem_type.__name__ == \"ndarray\" or elem_type.__name__ == \"memmap\":\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(NERFSTUDIO_COLLATE_ERR_MSG_FORMAT.format(elem.dtype))\n\n return nerfstudio_collate([torch.as_tensor(b) for b in batch], extra_mappings=extra_mappings)\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, collections.abc.Mapping):\n try:\n return elem_type(\n {key: nerfstudio_collate([d[key] for d in batch], extra_mappings=extra_mappings) for key in elem}\n )\n except TypeError:\n # The mapping type may not support `__init__(iterable)`.\n return {key: nerfstudio_collate([d[key] for d in batch], extra_mappings=extra_mappings) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, \"_fields\"): # namedtuple\n return elem_type(*(nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in zip(*batch)))\n elif isinstance(elem, collections.abc.Sequence):\n # check to make sure that the elements in batch have consistent size\n it = iter(batch)\n elem_size = len(next(it))\n if not all(len(elem) == elem_size for elem in it):\n raise RuntimeError(\"each element in list of batch should be of equal size\")\n transposed = list(zip(*batch)) # It may be accessed twice, so we use a list.\n\n if isinstance(elem, tuple):\n return [\n nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed\n ] # Backwards compatibility.\n else:\n try:\n return elem_type([nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed])\n except TypeError:\n # The sequence type may not support `__init__(iterable)` (e.g., `range`).\n return [nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed]\n\n # NerfStudio types supported below\n\n elif isinstance(elem, Cameras):\n # If a camera, just concatenate along the batch dimension. In the future, this may change to stacking\n assert all((isinstance(cam, Cameras) for cam in batch))\n assert all((cam.distortion_params is None for cam in batch)) or all(\n (cam.distortion_params is not None for cam in batch)\n ), \"All cameras must have distortion parameters or none of them should have distortion parameters.\\\n Generalized batching will be supported in the future.\"\n\n # If no batch dimension exists, then we need to stack everything and create a batch dimension on 0th dim\n if elem.shape == ():\n op = torch.stack\n # If batch dimension exists, then we need to concatenate along the 0th dimension\n else:\n op = torch.cat\n\n return Cameras(\n op([cameras.camera_to_worlds for cameras in batch], dim=0),\n op([cameras.fx for cameras in batch], dim=0),\n op([cameras.fy for cameras in batch], dim=0),\n op([cameras.cx for cameras in batch], dim=0),\n op([cameras.cy for cameras in batch], dim=0),\n height=op([cameras.height for cameras in batch], dim=0),\n width=op([cameras.width for cameras in batch], dim=0),\n distortion_params=op(\n [\n cameras.distortion_params\n if cameras.distortion_params is not None\n else torch.zeros_like(cameras.distortion_params)\n for cameras in batch\n ],\n dim=0,\n ),\n camera_type=op([cameras.camera_type for cameras in batch], dim=0),\n times=torch.stack(\n [cameras.times if cameras.times is not None else -torch.ones_like(cameras.times) for cameras in batch],\n dim=0,\n ),\n )\n\n elif isinstance(elem, BasicImages):\n assert all((isinstance(elem, BasicImages) for elem in batch))\n all_images = []\n for images in batch:\n all_images.extend(images.images)\n return BasicImages(all_images)\n\n for type_key in extra_mappings:\n if isinstance(elem, type_key):\n return extra_mappings[type_key](batch)\n\n raise TypeError(NERFSTUDIO_COLLATE_ERR_MSG_FORMAT.format(elem_type))" }, { "identifier": "get_dict_to_torch", "path": "nerfstudio/utils/misc.py", "snippet": "def get_dict_to_torch(stuff: Any, device: Union[torch.device, str] = \"cpu\", exclude: Optional[List[str]] = None):\n \"\"\"Set everything in the dict to the specified torch device.\n\n Args:\n stuff: things to convert to torch\n device: machine to put the \"stuff\" on\n exclude: list of keys to skip over transferring to device\n \"\"\"\n if isinstance(stuff, dict):\n for k, v in stuff.items():\n if exclude and k in exclude:\n stuff[k] = v\n else:\n stuff[k] = get_dict_to_torch(v, device)\n return stuff\n if isinstance(stuff, torch.Tensor):\n return stuff.to(device)\n return stuff" } ]
import concurrent.futures import multiprocessing import random import torch from abc import abstractmethod from typing import Dict, Optional, Tuple, Union from rich.progress import Console, track from torch.utils.data import Dataset from torch.utils.data.dataloader import DataLoader from nerfstudio.cameras.cameras import Cameras from nerfstudio.cameras.rays import RayBundle from nerfstudio.data.datasets.base_dataset import InputDataset from nerfstudio.data.utils.nerfstudio_collate import nerfstudio_collate from nerfstudio.utils.misc import get_dict_to_torch
16,976
self.first_time = True self.cached_collated_batch = None if self.cache_all_images: CONSOLE.print(f"Caching all {len(self.dataset)} images.") if len(self.dataset) > 500: CONSOLE.print( "[bold yellow]Warning: If you run out of memory, try reducing the number of images to sample from." ) self.cached_collated_batch = self._get_collated_batch() elif self.num_times_to_repeat_images == -1: CONSOLE.print( f"Caching {self.num_images_to_sample_from} out of {len(self.dataset)} images, without resampling." ) else: CONSOLE.print( f"Caching {self.num_images_to_sample_from} out of {len(self.dataset)} images, " f"resampling every {self.num_times_to_repeat_images} iters." ) def __getitem__(self, idx): return self.dataset.__getitem__(idx) def _get_batch_list(self): """Returns a list of batches from the dataset attribute.""" # todo: to enable local image caching, the samples indices should be consecutive rather than random. # each time when we switches the batch size, we should print out the image name lists. (in a sorting manner) # indices = random.sample(range(len(self.dataset)), k=self.num_images_to_sample_from) # consecutive sampling start_indices = random.sample(range(len(self.dataset)), 1) indices_circle_list = list(range(len(self.dataset))) + list(range(len(self.dataset))) indices = indices_circle_list[start_indices[0]:start_indices[0]+self.num_images_to_sample_from] random.shuffle(indices) # start_or_end_indices = random.sample(range(len(self.dataset) - self.num_images_to_sample_from + 1), 1)[0] # indices_list = list(range(len(self.dataset))) # indices = indices_list[start_or_end_indices:start_or_end_indices+self.num_images_to_sample_from] # # random.shuffle(indices) batch_list = [] results = [] num_threads = int(self.num_workers) * 4 num_threads = min(num_threads, multiprocessing.cpu_count() - 1) num_threads = max(num_threads, 1) with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor: for idx in indices: res = executor.submit(self.dataset.__getitem__, idx) results.append(res) for res in track( results, description="Loading data batch", transient=True, disable=(self.num_images_to_sample_from == 1) ): batch_list.append(res.result()) # print out filenames cached_image_filenames = [batch["image_filename"] for batch in batch_list] sorted_cached_image_filenames = sorted(cached_image_filenames) CONSOLE.print(f"New Loaded Image filenames: {sorted_cached_image_filenames}") return batch_list def _get_collated_batch(self): """Returns a collated batch.""" batch_list = self._get_batch_list() collated_batch = self.collate_fn(batch_list) collated_batch = get_dict_to_torch(collated_batch, device=self.device, exclude=["image"]) return collated_batch def __iter__(self): while True: if self.cache_all_images: collated_batch = self.cached_collated_batch elif self.first_time or ( self.num_times_to_repeat_images != -1 and self.num_repeated >= self.num_times_to_repeat_images ): # trigger a reset self.num_repeated = 0 collated_batch = self._get_collated_batch() # possibly save a cached item self.cached_collated_batch = collated_batch if self.num_times_to_repeat_images != 0 else None self.first_time = False else: collated_batch = self.cached_collated_batch self.num_repeated += 1 yield collated_batch class EvalDataloader(DataLoader): """Evaluation dataloader base class Args: input_dataset: InputDataset to load data from device: Device to load data to """ def __init__( self, input_dataset: InputDataset, device: Union[torch.device, str] = "cpu", **kwargs, ): self.input_dataset = input_dataset self.cameras = input_dataset.cameras.to(device) self.device = device self.kwargs = kwargs super().__init__(dataset=input_dataset) @abstractmethod def __iter__(self): """Iterates over the dataset""" return self @abstractmethod def __next__(self) -> Tuple[RayBundle, Dict]: """Returns the next batch of data"""
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Code for sampling images from a dataset of images. """ # for multithreading CONSOLE = Console(width=120) class CacheDataloader(DataLoader): """Collated image dataset that implements caching of default-pytorch-collatable data. Creates batches of the InputDataset return type. Args: dataset: Dataset to sample from. num_samples_to_collate: How many images to sample rays for each batch. -1 for all images. num_times_to_repeat_images: How often to collate new images. -1 to never pick new images. device: Device to perform computation. collate_fn: The function we will use to collate our training data """ def __init__( self, dataset: Dataset, num_images_to_sample_from: int = -1, num_times_to_repeat_images: int = -1, device: Union[torch.device, str] = "cpu", collate_fn=nerfstudio_collate, **kwargs, ): self.dataset = dataset super().__init__(dataset=dataset, **kwargs) # This will set self.dataset self.num_times_to_repeat_images = num_times_to_repeat_images self.cache_all_images = (num_images_to_sample_from == -1) or (num_images_to_sample_from >= len(self.dataset)) self.num_images_to_sample_from = len(self.dataset) if self.cache_all_images else num_images_to_sample_from self.device = device self.collate_fn = collate_fn self.num_workers = kwargs.get("num_workers", 0) self.num_repeated = self.num_times_to_repeat_images # starting value self.first_time = True self.cached_collated_batch = None if self.cache_all_images: CONSOLE.print(f"Caching all {len(self.dataset)} images.") if len(self.dataset) > 500: CONSOLE.print( "[bold yellow]Warning: If you run out of memory, try reducing the number of images to sample from." ) self.cached_collated_batch = self._get_collated_batch() elif self.num_times_to_repeat_images == -1: CONSOLE.print( f"Caching {self.num_images_to_sample_from} out of {len(self.dataset)} images, without resampling." ) else: CONSOLE.print( f"Caching {self.num_images_to_sample_from} out of {len(self.dataset)} images, " f"resampling every {self.num_times_to_repeat_images} iters." ) def __getitem__(self, idx): return self.dataset.__getitem__(idx) def _get_batch_list(self): """Returns a list of batches from the dataset attribute.""" # todo: to enable local image caching, the samples indices should be consecutive rather than random. # each time when we switches the batch size, we should print out the image name lists. (in a sorting manner) # indices = random.sample(range(len(self.dataset)), k=self.num_images_to_sample_from) # consecutive sampling start_indices = random.sample(range(len(self.dataset)), 1) indices_circle_list = list(range(len(self.dataset))) + list(range(len(self.dataset))) indices = indices_circle_list[start_indices[0]:start_indices[0]+self.num_images_to_sample_from] random.shuffle(indices) # start_or_end_indices = random.sample(range(len(self.dataset) - self.num_images_to_sample_from + 1), 1)[0] # indices_list = list(range(len(self.dataset))) # indices = indices_list[start_or_end_indices:start_or_end_indices+self.num_images_to_sample_from] # # random.shuffle(indices) batch_list = [] results = [] num_threads = int(self.num_workers) * 4 num_threads = min(num_threads, multiprocessing.cpu_count() - 1) num_threads = max(num_threads, 1) with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor: for idx in indices: res = executor.submit(self.dataset.__getitem__, idx) results.append(res) for res in track( results, description="Loading data batch", transient=True, disable=(self.num_images_to_sample_from == 1) ): batch_list.append(res.result()) # print out filenames cached_image_filenames = [batch["image_filename"] for batch in batch_list] sorted_cached_image_filenames = sorted(cached_image_filenames) CONSOLE.print(f"New Loaded Image filenames: {sorted_cached_image_filenames}") return batch_list def _get_collated_batch(self): """Returns a collated batch.""" batch_list = self._get_batch_list() collated_batch = self.collate_fn(batch_list) collated_batch = get_dict_to_torch(collated_batch, device=self.device, exclude=["image"]) return collated_batch def __iter__(self): while True: if self.cache_all_images: collated_batch = self.cached_collated_batch elif self.first_time or ( self.num_times_to_repeat_images != -1 and self.num_repeated >= self.num_times_to_repeat_images ): # trigger a reset self.num_repeated = 0 collated_batch = self._get_collated_batch() # possibly save a cached item self.cached_collated_batch = collated_batch if self.num_times_to_repeat_images != 0 else None self.first_time = False else: collated_batch = self.cached_collated_batch self.num_repeated += 1 yield collated_batch class EvalDataloader(DataLoader): """Evaluation dataloader base class Args: input_dataset: InputDataset to load data from device: Device to load data to """ def __init__( self, input_dataset: InputDataset, device: Union[torch.device, str] = "cpu", **kwargs, ): self.input_dataset = input_dataset self.cameras = input_dataset.cameras.to(device) self.device = device self.kwargs = kwargs super().__init__(dataset=input_dataset) @abstractmethod def __iter__(self): """Iterates over the dataset""" return self @abstractmethod def __next__(self) -> Tuple[RayBundle, Dict]: """Returns the next batch of data"""
def get_camera(self, image_idx: int = 0) -> Cameras:
0
2023-12-15 20:07:22+00:00
24k
amazon-science/c2f-seg
data/dataloader_transformer.py
[ { "identifier": "FishBowl", "path": "data/dataloader_Fishbowl.py", "snippet": "class FishBowl(object):\n def __init__(self, config, mode, subtest=None):\n self.datatype = mode\n data_dir = config.root_path\n\n self.img_path = os.path.join(data_dir, self.datatype+\"_data\", self.datatype+\"_frames\")\n self.mode = mode\n self.dtype = torch.float32\n self.test_set = subtest\n \n self.data_summary = pickle.load(open(os.path.join(data_dir, self.datatype+\"_data\", self.datatype+\"_data.pkl\"), \"rb\"))\n self.obj_lists = list(self.data_summary.keys())\n self.device = \"cpu\"\n\n self.seq_len = 32 if self.mode == \"test\" else config.train_seq_len\n\n self.cur_vid = None\n self.video_frames = None\n self.patch_h = config.patch_H\n self.patch_w = config.patch_W\n self.enlarge_coef = config.enlarge_coef\n\n def decode2binarymask(self, masks):\n mask = mask_utils.decode(masks)\n binary_masks = mask.astype('bool') # (Image_W,Image_H,128)\n binary_masks = binary_masks.transpose(2,0,1) #(128, Image_W, Image_H)\n return binary_masks\n\n def __len__(self):\n return len(self.obj_lists)\n\n def __getitem__(self, idx):\n v_id, obj_id = self.obj_lists[idx].split(\"_\")\n if v_id != self.cur_vid:\n self.cur_vid = v_id\n fm_crop = []\n fm_no_crop = []\n vm_crop = []\n vm_no_crop = []\n img_crop = []\n \n obj_position = []\n\n counts = []\n loss_mask_weight = []\n\n # for evaluation \n video_ids = []\n object_ids = []\n frame_ids = []\n\n obj_dict = self.data_summary[self.obj_lists[idx]]\n timesteps = list(obj_dict.keys())\n assert np.all(np.diff(sorted(timesteps))==1)\n start_t, end_t = min(timesteps), max(timesteps)\n # print(start_t, end_t)\n if self.mode != \"test\" and end_t - start_t > self.seq_len - 1:\n start_t = np.random.randint(start_t, end_t-(self.seq_len-2))\n end_t = start_t + self.seq_len - 1\n\n if self.mode == \"test\":\n if start_t + self.seq_len-1<=end_t:\n end_t = start_t + self.seq_len-1\n\n for t_step in range(start_t, end_t):\n image_path = os.path.join(self.img_path, v_id, str(t_step).zfill(5)+'.png')\n img = cv2.imread(image_path)[:,:,::-1]\n # get visible mask and full mask\n vm = self.decode2binarymask(obj_dict[t_step][\"VM\"])[0]\n fm = self.decode2binarymask(obj_dict[t_step][\"FM\"])[0] # 320, 480\n vx_min, vx_max, vy_min, vy_max = obj_dict[t_step][\"VM_bx\"]\n x_center = (vx_min + vx_max) // 2\n y_center = (vy_min + vy_max) // 2\n x_len = int((vx_max - vx_min) * self.enlarge_coef)\n y_len = int((vy_max - vy_min) * self.enlarge_coef)\n vx_min = max(0, x_center - x_len // 2)\n vx_max = min(320, x_center + x_len // 2)\n vy_min = max(0, y_center - y_len // 2)\n vy_max = min(480, y_center + y_len // 2)\n\n obj_position.append([vx_min, vx_max, vy_min, vy_max])\n vm_crop.append(vm[vx_min:vx_max+1, vy_min:vy_max+1])\n fm_crop.append(fm[vx_min:vx_max+1, vy_min:vy_max+1])\n img_crop.append(img[vx_min:vx_max+1, vy_min:vy_max+1])\n\n vm_no_crop.append(vm)\n fm_no_crop.append(fm)\n # get loss mask\n loss_mask_weight.append(self.decode2binarymask(obj_dict[t_step][\"loss_mask_weight\"])[0])\n\n # for evaluation\n video_ids.append(int(v_id))\n object_ids.append(int(obj_id))\n frame_ids.append(t_step)\n counts.append(1)\n \n if True:\n num_pad = self.seq_len - (end_t - start_t)\n for _ in range(num_pad):\n obj_position.append(copy.deepcopy(obj_position[-1]))\n\n fm_crop.append(copy.deepcopy(fm_crop[-1]))\n fm_no_crop.append(copy.deepcopy(fm_no_crop[-1]))\n vm_crop.append(copy.deepcopy(vm_crop[-1]))\n vm_no_crop.append(copy.deepcopy(vm_no_crop[-1]))\n img_crop.append(copy.deepcopy(img_crop[-1]))\n\n loss_mask_weight.append(copy.deepcopy(loss_mask_weight[-1]))\n \n video_ids.append(video_ids[-1])\n object_ids.append(object_ids[-1])\n frame_ids.append(frame_ids[-1] + 1)\n counts.append(0)\n \n vm_crop, vm_crop_gt, fm_crop, img_crop, vm_pad, vm_scale = self.crop_and_rescale(vm_crop, fm_crop, img_crop)\n\n vm_crop = np.stack(vm_crop, axis=0) # Seq_len * h * w\n vm_crop_gt = np.stack(vm_crop_gt, axis=0) # Seq_len * h * w\n vm_no_crop = np.stack(vm_no_crop, axis=0) # Seq_len * H * W\n fm_crop = np.stack(fm_crop, axis=0) # Seq_len * h * w\n fm_no_crop = np.stack(fm_no_crop, axis=0) # Seq_len * H * W\n\n vm_crop = torch.from_numpy(np.array(vm_crop)).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(np.array(vm_crop_gt)).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n fm_crop = torch.from_numpy(np.array(fm_crop)).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n\n vm_pad = torch.from_numpy(np.array(vm_pad)).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(np.array(vm_scale)).to(self.dtype).to(self.device)\n\n video_ids = torch.from_numpy(np.array(video_ids)).to(self.dtype).to(self.device)\n object_ids = torch.from_numpy(np.array(object_ids)).to(self.dtype).to(self.device)\n frame_ids = torch.from_numpy(np.array(frame_ids)).to(self.dtype).to(self.device)\n counts = torch.from_numpy(np.array(counts)).to(self.dtype).to(self.device)\n loss_mask_weight = torch.from_numpy(np.array(loss_mask_weight)).to(self.dtype).to(self.device) \n obj_position = torch.from_numpy(np.array(obj_position)).to(self.dtype).to(self.device)\n\n obj_data = {\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"vm_no_crop\": vm_no_crop,\n \"fm_crop\": fm_crop,\n \"fm_no_crop\": fm_no_crop,\n \"img_crop\": img_crop,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"video_ids\": video_ids,\n \"object_ids\": object_ids,\n \"frame_ids\": frame_ids,\n \"counts\": counts,\n \"loss_mask\": loss_mask_weight, \n \"obj_position\": obj_position,\n }\n\n return obj_data\n\n def crop_and_rescale(self, vm_crop, fm_crop_vm=None, img_crop=None):\n h, w = np.array([m.shape for m in vm_crop]).max(axis=0)\n vm_pad = []\n vm_scale = []\n vm_crop_gt = []\n\n for i, m in enumerate(vm_crop):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n vm_pad.append(np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)]))\n vm_scale.append(np.array([self.patch_h/h, self.patch_w/w]))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n if self.mode==\"train\":\n vm_crop[i] = self.data_augmentation(m)\n vm_crop_gt.append(m)\n else:\n vm_crop[i] = m\n vm_crop_gt.append(m)\n\n for i, m in enumerate(fm_crop_vm):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n fm_crop_vm[i] = m\n\n for i, img_ in enumerate(img_crop):\n img_ = transform.rescale(img_, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop[i] = img_\n\n vm_pad = np.stack(vm_pad)\n vm_scale = np.stack(vm_scale)\n return vm_crop, vm_crop_gt, fm_crop_vm, img_crop, vm_pad, vm_scale\n \n def getImg(self, v_id):\n imgs = []\n imgs_list = os.listdir(os.path.join(self.img_path, v_id))\n imgs_list.sort()\n for sub_path in imgs_list:\n img_path = os.path.join(self.img_path, v_id, sub_path)\n img_tmp = plt.imread(img_path)\n imgs.append(img_tmp)\n assert len(imgs) == 128\n return imgs\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n for item in sample_loader:\n yield item\n \n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n return res\n \n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.1 and rdv < 0.6:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.5:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.5 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)" }, { "identifier": "MOViD_A", "path": "data/dataloader_MOViD_A.py", "snippet": "class MOViD_A(object):\n def __init__(self, config, mode):\n super(MOViD_A, self).__init__()\n self.mode = mode\n self.dtype = torch.float32\n self.device = \"cpu\"\n root_path = config.root_path\n self.data_dir = os.path.join(root_path, mode)\n \n self.instance_list = np.genfromtxt(\n os.path.join(root_path, \"{}_instance.txt\".format(mode)),\n dtype=np.str,\n encoding='utf-8'\n )\n\n self.train_seq_len = 24\n self.cur_vid = None\n self.patch_h = config.patch_H\n self.patch_w = config.patch_W\n self.enlarge_coef = config.enlarge_coef\n\n def __len__(self):\n return len(self.instance_list)\n\n def __getitem__(self, idx, specified_V_O_id=None):\n # whether choose a specific instance to load\n if specified_V_O_id is None:\n v_id, obj_id, value = self.instance_list[idx].split(\"_\")\n else:\n v_id, obj_id, value = specified_V_O_id.split(\"_\")\n v_id, obj_id, value = int(v_id), int(obj_id), int(value)\n if v_id != self.cur_vid:\n self.cur_vid = v_id\n self.video_path = os.path.join(self.data_dir, str(v_id))\n metadata = self.read_json(os.path.join(self.video_path, 'metadata.json'))\n\n self.num_frames = metadata[\"metadata\"][\"num_frames\"]\n self.height = metadata['metadata']['height']\n self.width = metadata['metadata']['width']\n self.instances = [self.format_instance_information(obj) for obj in metadata[\"instances\"]]\n\n vis_mask_paths = [os.path.join(self.video_path, \"segmentation_full_{}.png\".format(str(f).zfill(5))) for f in range(self.num_frames)]\n vis_mask = [np.array(Image.open(frame_path)) for frame_path in vis_mask_paths] #[t,h,w]\n\n full_mask_paths = [os.path.join(self.video_path, \"segmentation_{}_{}.png\".format(obj_id, str(f).zfill(5))) for f in range(self.num_frames)]\n full_mask = [np.array(Image.open(frame_path)) for frame_path in full_mask_paths] #[t,h,w]\n \n rgb_img_path = [os.path.join(self.video_path, \"rgba_full_{}.png\".format(str(f).zfill(5))) for f in range(self.num_frames)]\n rgb_img = [np.array(Image.open(frame_path))[...,:3] for frame_path in rgb_img_path]\n \n counts = []\n obj_position = []\n\n vm_crop = []\n vm_no_crop = []\n fm_crop = []\n fm_no_crop = []\n loss_mask_weight = []\n img_crop = []\n # for evaluation \n video_ids = []\n object_ids = []\n frame_ids = []\n\n timesteps = self.instances[obj_id]['bbox_frames']\n start_t, end_t = 0, 23\n if self.mode != \"test\" and end_t - start_t > self.train_seq_len - 1:\n start_t = np.random.randint(start_t, end_t-(self.train_seq_len-2))\n end_t = start_t + self.train_seq_len - 1\n\n for t_step in range(start_t, end_t+1):\n Image_H, Image_W = self.height, self.width\n # some objects will move out the field of view in some frames\n if t_step in timesteps:\n index = self.instances[obj_id][\"bbox_frames\"].index(t_step)\n xmin, ymin, xmax, ymax = self.instances[obj_id][\"bboxes\"][index]\n vx_min, vy_min, vx_max, vy_max = int(Image_H*xmin), int(Image_W*ymin), int(Image_H*xmax), int(Image_W*ymax)\n counts.append(1)\n else:\n bboxs = mask_find_bboxs(full_mask[t_step].astype(np.uint8))\n \n if bboxs.size==0:\n vx_min, vy_min, vx_max, vy_max = 0, 0, 256, 256\n else:\n b = bboxs[-1][:4]\n vx_min, vy_min, vx_max, vy_max = b[1], b[0], b[1]+b[3], b[0]+b[2]\n counts.append(0)\n\n # enlarge the bbox\n x_center = (vx_min + vx_max) // 2\n y_center = (vy_min + vy_max) // 2\n x_len = int((vx_max - vx_min) * self.enlarge_coef)\n y_len = int((vy_max - vy_min) * self.enlarge_coef)\n vx_min = max(0, x_center - x_len // 2)\n vx_max = min(Image_H, x_center + x_len // 2)\n vy_min = max(0, y_center - y_len // 2)\n vy_max = min(Image_W, y_center + y_len // 2)\n\n obj_position.append([vx_min, vx_max, vy_min, vy_max])\n\n # get mask\n vm = vis_mask[t_step]\n vm_crop.append(vm[vx_min:vx_max+1, vy_min:vy_max+1]==value)\n vm_no_crop.append(vm==value)\n\n fm = full_mask[t_step]\n fm_crop.append(fm[vx_min:vx_max+1, vy_min:vy_max+1]==value)\n fm_no_crop.append(fm==value)\n \n # get image\n image = rgb_img[t_step]\n img_crop.append(image[vx_min:vx_max+1, vy_min:vy_max+1])\n\n # get loss mask\n fore_ground = vm == 0\n obj_ground = vm==value\n loss_mask = np.logical_or(fore_ground, obj_ground)\n\n loss_mask_weight.append(loss_mask)\n\n # for evaluation\n video_ids.append(v_id)\n object_ids.append(obj_id)\n frame_ids.append(t_step)\n\n obj_position = torch.from_numpy(np.array(obj_position)).to(self.dtype).to(self.device)\n \n vm_crop, fm_crop, vm_pad, vm_scale, vm_crop_gt, img_crop = self.crop_and_rescale(vm_crop, fm_crop, img_crop)\n\n vm_crop = np.stack(vm_crop, axis=0) # Seq_len * h * w\n vm_no_crop = np.stack(vm_no_crop, axis=0) # Seq_len * H * W\n # fm_crop = np.stack(fm_crop, axis=0) # Seq_len * h * w\n fm_crop = np.stack(fm_crop, axis=0) # Seq_len * h * w\n fm_no_crop = np.stack(fm_no_crop, axis=0) # Seq_len * H * W\n img_crop = np.stack(img_crop, axis=0) # Sqe_len * H * W\n\n vm_crop = torch.from_numpy(np.array(vm_crop)).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n fm_crop = torch.from_numpy(np.array(fm_crop)).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n\n vm_pad = torch.from_numpy(np.array(vm_pad)).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(np.array(vm_scale)).to(self.dtype).to(self.device)\n\n video_ids = torch.from_numpy(np.array(video_ids)).to(self.dtype).to(self.device)\n object_ids = torch.from_numpy(np.array(object_ids)).to(self.dtype).to(self.device)\n frame_ids = torch.from_numpy(np.array(frame_ids)).to(self.dtype).to(self.device)\n counts = torch.from_numpy(np.array(counts)).to(self.dtype).to(self.device)\n loss_mask_weight = torch.from_numpy(np.array(loss_mask_weight)).to(self.dtype).to(self.device) \n obj_position = torch.from_numpy(np.array(obj_position)).to(self.dtype).to(self.device)\n\n obj_data = {\n \"vm_crop\": vm_crop,\n \"vm_no_crop\": vm_no_crop,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n\n \"img_crop\": img_crop,\n \n \"fm_crop\": fm_crop,\n \"fm_no_crop\": fm_no_crop,\n\n \"obj_position\": obj_position, \n \"loss_mask\": loss_mask_weight, \n \"counts\": counts,\n \"video_ids\": video_ids,\n \"object_ids\": object_ids,\n \"frame_ids\": frame_ids,\n }\n\n return obj_data\n\n def crop_and_rescale(self, vm_crop, fm_crop=None,img_crop=None):\n h, w = np.array([m.shape for m in vm_crop]).max(axis=0)\n vm_pad = []\n vm_crop_gt = []\n vm_scale = []\n for i, img in enumerate(img_crop):\n img = transform.rescale(img, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img = np.pad(img, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop[i] = img\n\n for i, m in enumerate(vm_crop):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n if self.mode==\"train\":\n vm_crop[i] = self.data_augmentation(m)\n else:\n vm_crop[i] = m\n vm_crop_gt.append(m)\n vm_pad.append(np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)]))\n vm_scale.append(np.array([self.patch_h/h, self.patch_w/w]))\n\n for i, m in enumerate(fm_crop):\n m = transform.rescale(m, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n fm_crop[i] = m\n\n vm_pad = np.stack(vm_pad)\n vm_scale = np.stack(vm_scale)\n return vm_crop, fm_crop, vm_pad, vm_scale, vm_crop_gt,img_crop\n \n def read_json(self,dir_):\n with open(dir_) as f:\n data = json.load(f)\n return data\n\n def format_instance_information(self, obj):\n return {\n \"bboxes\": obj[\"bboxes\"],\n \"bbox_frames\": obj[\"bbox_frames\"],\n }\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n return res\n \n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.1 and rdv < 0.6:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.5:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.5 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)" }, { "identifier": "Kins_Fusion_dataset", "path": "data/dataloader_KINS.py", "snippet": "class Kins_Fusion_dataset(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(Kins_Fusion_dataset, self).__init__()\n self.config = config\n self.mode = mode\n self.root_path = config.root_path\n \n # Load Fusion dataset\n self.data_info = pickle.load(open(os.path.join(self.root_path, \"fusion_{}.pkl\".format(self.mode)), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"c2f_seg_{}_list.txt\".format(self.mode)), dtype=np.str, encoding='utf-8')\n self.img_root_path = os.path.join(self.root_path, \"{}ing\".format(mode),\"image_2\")\n \n # Load the GT of AISFormer\n if mode==\"train\":\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_train.json\"))\n else:\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_val_upate.json\"))\n annotations = aisformer_gt[\"annotations\"]\n images = aisformer_gt[\"images\"]\n self.images, self.annotations = self.make_json_dict(images, annotations)\n \n # Load the GT of vanilla KINS\n self.base_img_path = os.path.join(self.root_path, \"{}ing\".format(mode), \"image_2\")\n self.base_ann_path= os.path.join(self.root_path, \"update_{}_2020.json\".format(mode))\n annotations = cvb.load(self.base_ann_path)\n imgs_info = annotations['images']\n anns_info = annotations[\"annotations\"]\n self.imgs_dict, self.anns_dict = self.make_json_dict(imgs_info, anns_info)\n\n # dataloader setting\n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n\n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def load_item(self, index):\n # load aisformer predicted visible masks\n if \"aisformer\" in self.label_info[index]:\n dataset_name, image_id, anno_id = self.label_info[index].split(\",\")\n image_id, anno_id = int(image_id), int(anno_id)\n # add image information\n img_name = self.images[image_id]\n img_path = os.path.join(self.img_root_path, img_name)\n # img_path = os.path.join(self.img_root_path, str(image_id).zfill(6)+ \".png\")\n img = np.array(Image.open(img_path))\n instances = self.data_info['{}_{}'.format(dataset_name, image_id)][anno_id]\n segmentation = instances[\"pred_visible_mask\"]\n height, width = segmentation[\"size\"]\n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n rles = mask_utils.frPyObjects(instances[\"gt_full_mask\"], height, width)\n fm_no_crop = mask_utils.decode(mask_utils.merge(rles)).astype(bool)\n fm_no_crop = fm_no_crop[..., np.newaxis]\n\n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n \n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n \n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n \n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n vm_no_crop_gt = torch.from_numpy(np.array(vm_no_crop_gt)).to(self.dtype).to(self.device)\n\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop_gt,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n }\n return meta\n else:\n img_id, anno_id, category_id = self.label_info[index].split(\"_\")\n img_id, anno_id, category_id = int(img_id), int(anno_id), int(category_id)\n\n img_name = self.imgs_dict[img_id]\n img_path = os.path.join(self.base_img_path, img_name)\n \n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n height, width, _ = img.shape\n \n ann = self.anns_dict[img_id][anno_id]\n fm_no_crop = self.polys_to_mask(ann[\"a_segm\"], height, width)\n vm_no_crop = self.polys_to_mask(ann[\"i_segm\"], height, width)\n if np.sum(vm_no_crop)==0:\n counts = np.array([0])\n else:\n counts = np.array([1])\n y_min, x_min, w, h = ann[\"i_bbox\"]\n\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n \n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1].astype(bool)\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n loss_mask = fm_no_crop-vm_no_crop\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n # vm_crop here is the GT\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n img_id = torch.from_numpy(np.array(img_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n # category_id = torch.from_numpy(np.array(category_id)).to(self.dtype).to(self.device)\n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop,\n \"fm_no_crop\": fm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n }\n return meta\n\n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.2:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.2 and rdv <0.6:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.55:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.55 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.1:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)\n \n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def make_json_dict(self, imgs, anns):\n imgs_dict = {}\n anns_dict = {}\n for ann in anns:\n image_id = ann[\"image_id\"]\n if not image_id in anns_dict:\n anns_dict[image_id] = []\n anns_dict[image_id].append(ann)\n else:\n anns_dict[image_id].append(ann)\n \n for img in imgs:\n image_id = img['id']\n imgs_dict[image_id] = img['file_name']\n\n return imgs_dict, anns_dict\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask" }, { "identifier": "KINS_Aisformer_VRSP_Intersection", "path": "data/dataloader_KINS.py", "snippet": "class KINS_Aisformer_VRSP_Intersection(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(KINS_Aisformer_VRSP_Intersection, self).__init__()\n self.config = config\n self.mode = mode\n self.root_path = config.root_path\n \n # Load Intersection dataset\n self.data_info = pickle.load(open(os.path.join(self.root_path, \"kins_intersection.pkl\"), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"kins_intersection_list.txt\"), dtype=np.str, encoding='utf-8')\n if mode==\"train\":\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_train.json\"))\n else:\n aisformer_gt = cvb.load(os.path.join(self.root_path, \"instances_val_upate.json\"))\n annotations = aisformer_gt[\"annotations\"]\n images = aisformer_gt[\"images\"]\n self.images, self.annotations = self.make_json_dict(images, annotations)\n self.img_root_path = os.path.join(self.root_path, \"{}ing\".format(mode), \"image_2\")\n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n \n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def mask_find_bboxs(self, mask):\n retval, labels, stats, centroids = cv2.connectedComponentsWithStats(mask, connectivity=8)\n stats = stats[stats[:,4].argsort()]\n return stats\n \n def generate_heatmap(self, mask, kernel, sigma):\n heatmap = cv2.GaussianBlur(mask, kernel, sigma)\n am = np.amax(heatmap)\n heatmap /= am / 1\n return heatmap\n \n def load_item(self, index):\n image_id, anno_id = self.label_info[index].split(\"_\")\n image_id, anno_id = int(image_id), int(anno_id)\n instances = self.data_info[image_id][anno_id]\n\n segmentation = instances[\"pred_visible_mask\"]\n height, width = segmentation[\"size\"]\n # add image information\n img_name = self.images[image_id]\n img_path = os.path.join(self.img_root_path, img_name)\n # img_path = os.path.join(self.img_root_path, str(image_id).zfill(6)+ \".png\")\n img = Image.open(img_path)\n img = img.resize((width,height), Image.ANTIALIAS)\n img = np.array(img)\n \n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n # fm_no_crop = mask_utils.decode([instances[\"gt_full_mask\"]]).astype(bool)\n rles = mask_utils.frPyObjects(instances[\"gt_full_mask\"], height, width)\n fm_no_crop = mask_utils.decode(mask_utils.merge(rles)).astype(bool)\n \n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n\n x_center_crop = x_center - x_min\n y_center_crop = y_center - y_min\n \n fm_no_crop = fm_no_crop[..., np.newaxis]\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n \n center_crop = np.zeros_like(vm_crop[0])\n x_center_crop = int(x_center_crop*self.patch_h/h)\n y_center_crop = int(y_center_crop*self.patch_w/w)\n center_crop[x_center_crop: x_center_crop+1, y_center_crop: y_center_crop+1]=1\n center_crop = self.generate_heatmap(center_crop.astype(np.float), (35, 35), 9)\n center_crop = center_crop[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n refine_loss_mask = 1 - (vm_crop_gt==vm_crop).astype(bool)\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n # import pdb;pdb.set_trace()\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop_gt = torch.from_numpy(vm_no_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n refine_loss_mask = torch.from_numpy(np.array(refine_loss_mask)).to(self.dtype).to(self.device)\n center_crop = torch.from_numpy(np.array(center_crop)).to(self.dtype).to(self.device)\n \n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n img = torch.from_numpy(np.array(img)).to(self.dtype).to(self.device)\n\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n # \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n }\n # elif self.mode==\"test\":\n # meta = {\n # # \"vm_no_crop\": vm_no_crop,\n # \"vm_crop\": vm_crop,\n # \"vm_crop_gt\": vm_crop_gt,\n # # \"vm_no_crop_gt\": vm_no_crop_gt,\n # # \"refine_loss_mask\": refine_loss_mask,\n # # \"fm_no_crop\": fm_no_crop,\n # \"fm_crop\": fm_crop,\n # \"img_crop\": img_crop,\n # # \"loss_mask\": loss_mask,\n # # \"obj_position\": obj_position,\n # # \"vm_pad\": vm_pad,\n # # \"vm_scale\": vm_scale,\n # # \"counts\":counts,\n # # \"img_id\": image_id,\n # # \"anno_id\": anno_id,\n # # # for vq\n # # # \"mask_crop\": fm_crop\n # # # \"img\":img,\n # }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"vm_no_crop_gt\": vm_no_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n \"img\":img,\n }\n return meta\n\n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask\n \n def make_json_dict(self, imgs, anns):\n imgs_dict = {}\n anns_dict = {}\n for ann in anns:\n image_id = ann[\"image_id\"]\n if not image_id in anns_dict:\n anns_dict[image_id] = []\n anns_dict[image_id].append(ann)\n else:\n anns_dict[image_id].append(ann)\n \n for img in imgs:\n image_id = img['id']\n imgs_dict[image_id] = img['file_name']\n\n return imgs_dict, anns_dict" }, { "identifier": "COCOA_Fusion_dataset", "path": "data/dataloader_COCOA.py", "snippet": "class COCOA_Fusion_dataset(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(COCOA_Fusion_dataset, self).__init__()\n self.config = config\n self.mode = mode\n self.root_path = config.root_path\n \n # Load Fusion dataset \n self.data_info = pickle.load(open(os.path.join(self.root_path, \"fusion_{}.pkl\".format(self.mode)), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"c2f_seg_{}_list.txt\".format(self.mode)), dtype=np.str, encoding='utf-8')\n \n if mode==\"train\":\n train_label = cvb.load(os.path.join(self.root_path, \"COCO_amodal_train2014_with_classes.json\"))\n self.anns_dict = train_label[\"annotations\"]\n self.img_root_path = os.path.join(self.root_path, \"train2014\")\n elif mode==\"test\":\n val_label = cvb.load(os.path.join(self.root_path, \"COCO_amodal_val2014_with_classes.json\"))\n self.anns_dict = val_label[\"annotations\"]\n self.img_root_path = os.path.join(self.root_path, \"val2014\")\n \n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n\n \n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def load_item(self, index):\n # predicted vm\n if len(self.label_info[index].split(\",\"))==3:\n dataset_name, image_id, anno_id = self.label_info[index].split(\",\")\n image_id, anno_id = int(image_id), int(anno_id)\n if self.mode==\"train\":\n img_path = os.path.join(self.img_root_path, \"COCO_{}2014_{}.jpg\".format(self.mode, str(image_id).zfill(12)))\n elif self.mode==\"test\":\n img_path = os.path.join(self.img_root_path, \"COCO_val2014_{}.jpg\".format(str(image_id).zfill(12)))\n img = np.array(Image.open(img_path))\n if len(img.shape)==2:\n img = np.repeat(img[:, :, np.newaxis], 3, axis=2)\n instances = self.data_info[\"{}_{}\".format(dataset_name, image_id)][anno_id]\n segmentation = instances[\"pred_visible_mask\"]\n height, weight = segmentation[\"size\"]\n # occlude_rate = instances[\"occlude_rate\"]\n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n fm_no_crop = mask_utils.decode([instances[\"gt_full_mask\"]]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n\n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(weight, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n # import pdb;pdb.set_trace()\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n # if self.mode==\"test\":\n # loss_mask = mask_utils.decode([instances[\"loss_mask\"]]).astype(bool)[...,0]\n # else:\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n\n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n img = torch.from_numpy(np.array(img)).to(self.dtype).to(self.device)\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n # occlude_rate = torch.from_numpy(np.array(occlude_rate)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n # \"vm_crop\": vm_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop_gt,\n # \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # for vq\n # \"mask_crop\": fm_crop\n # \"img_no_crop\": img,\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"img_crop\": img_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n # \"occlude_rate\":occlude_rate\n # for vq\n # \"mask_crop\": fm_crop\n # \"img_no_crop\": img,\n }\n return meta\n # gt vm\n elif len(self.label_info[index].split(\",\"))==2:\n anno_id, img_path = self.label_info[index].split(\",\")\n anno_id = int(anno_id)\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n height, width, _ = img.shape\n\n ann = self.anns_dict[anno_id]\n img_id = ann[\"image_id\"]\n # category_id = ann[\"category_id\"]\n\n full_mask = ann[\"segmentation\"]\n fm_no_crop = mask_utils.decode(full_mask)[...,np.newaxis]\n\n visible_mask = ann[\"visible_mask\"]\n vm_no_crop = mask_utils.decode(visible_mask)[...,np.newaxis]\n\n if np.sum(vm_no_crop)==0:\n counts = np.array([0])\n else:\n counts = np.array([1])\n y_min, x_min, w, h = ann[\"bbox\"]\n y_max, x_max = y_min + w, x_min + h\n y_min, x_min, y_max, x_max = int(y_min), int(x_min), int(y_max), int(x_max) \n\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(width, y_center + y_len // 2)\n \n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n\n # full_pad = ((0, max(375-height, 0)), (0, max(1242-width, 0)))\n # vm_no_crop = np.pad(vm_no_crop, full_pad)[:375, :1242]\n # fm_no_crop = np.pad(fm_no_crop, full_pad)[:375, :1242]\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n loss_mask = fm_no_crop-vm_no_crop\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n # data augmentation\n vm_crop_aug = self.data_augmentation(vm_crop[0])[np.newaxis, ...]\n \n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_aug = torch.from_numpy(vm_crop_aug).to(self.dtype).to(self.device)\n img_crop = torch.from_numpy(img_crop).to(self.dtype).to(self.device)\n img = torch.from_numpy(img).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n \n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n img_id = torch.from_numpy(np.array(img_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n # category_id = torch.from_numpy(np.array(category_id)).to(self.dtype).to(self.device)\n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop_aug,\n \"vm_crop_gt\": vm_crop,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\": counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # \"category_id\": category_id,\n # for vq\n # \"mask_crop\": fm_crop\n # \"img_no_crop\": img\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": img_id,\n \"anno_id\": anno_id,\n # \"category_id\": category_id,\n # for vq\n # \"mask_crop\": fm_crop\n \"img_no_crop\": img,\n }\n return meta\n \n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask\n\n # def data_augmentation(self, mask):\n # return mask\n \n def data_augmentation(self, mask):\n mask = mask.astype(np.float)\n rdv = random.random()\n n_repeat = random.randint(1, 4)\n if rdv <= 0.2:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n elif rdv > 0.2 and rdv <0.9:\n rdv_1 = random.random()\n rdv_2 = random.random()\n for i in range(n_repeat):\n w = random.randint(5, 13)\n h = random.randint(5, 13)\n kernel = np.ones((w, h), dtype=np.uint8)\n if rdv_1 <= 0.6:\n mask = cv2.dilate(mask, kernel, 1)\n elif rdv_1 > 0.6 and rdv_1 <= 1.0:\n mask = cv2.erode(mask, kernel, 1)\n if rdv_2 <= 0.2:\n mask = cv2.GaussianBlur(mask, (35,35), 11)\n else:\n mask = mask\n return (mask>0.5)\n \n def make_json_dict(self, imgs, anns):\n imgs_dict = {}\n anns_dict = {}\n for ann in anns:\n image_id = ann[\"image_id\"]\n if not image_id in anns_dict:\n anns_dict[image_id] = []\n anns_dict[image_id].append(ann)\n else:\n anns_dict[image_id].append(ann)\n \n for img in imgs:\n image_id = img['id']\n imgs_dict[image_id] = img['file_name']\n\n return imgs_dict, anns_dict" }, { "identifier": "COCOA_VRSP", "path": "data/dataloader_COCOA.py", "snippet": "class COCOA_VRSP(torch.utils.data.Dataset):\n def __init__(self, config, mode):\n super(COCOA_VRSP, self).__init__()\n self.config = config\n self.mode = mode\n self.data_info = pickle.load(open(os.path.join(self.root_path, \"fusion_{}.pkl\".format(self.mode)), \"rb\"))\n self.label_info = np.genfromtxt(os.path.join(self.root_path, \"c2f_seg_{}_list.txt\".format(self.mode)), dtype=np.str, encoding='utf-8')\n \n if self.mode==\"train\":\n self.img_root_path = os.path.join(self.root_path, \"train2014\")\n elif self.mode==\"test\":\n self.img_root_path = os.path.join(self.root_path, \"val2014\")\n\n self.dtype = torch.float32\n self.enlarge_coef = 2\n self.patch_h = 256\n self.patch_w = 256\n self.device = \"cpu\"\n\n \n def __len__(self):\n return self.label_info.shape[0]\n\n def __getitem__(self, index):\n return self.load_item(index)\n \n def generate_heatmap(self, mask, kernel, sigma):\n heatmap = cv2.GaussianBlur(mask, kernel, sigma)\n am = np.amax(heatmap)\n heatmap /= am / 1\n return heatmap\n \n def load_item(self, index):\n image_id, anno_id = self.label_info[index].split(\"_\")\n image_id, anno_id = int(image_id), int(anno_id)\n if self.mode==\"train\":\n img_path = os.path.join(self.img_root_path, \"COCO_{}2014_{}.jpg\".format(self.mode, str(image_id).zfill(12)))\n elif self.mode==\"test\":\n img_path = os.path.join(self.img_root_path, \"COCO_val2014_{}.jpg\".format(str(image_id).zfill(12)))\n img = np.array(Image.open(img_path))\n if len(img.shape)==2:\n img = np.repeat(img[:, :, np.newaxis], 3, axis=2)\n instances = self.data_info[image_id][anno_id]\n segmentation = instances[\"pred_visible_mask\"]\n height, weight = segmentation[\"size\"]\n occlude_rate = instances[\"occlude_rate\"]\n vm_no_crop = mask_utils.decode([segmentation]).astype(bool)\n fm_no_crop = mask_utils.decode([instances[\"gt_full_mask\"]]).astype(bool)\n vm_no_crop_gt = mask_utils.decode([instances[\"gt_visible_mask\"]]).astype(bool)\n\n bbox = instances[\"pred_visible_mask_bbox\"]\n y_min, x_min, w, h = bbox\n y_max, x_max = y_min + w, x_min + h\n x_center = (x_min + x_max) // 2\n y_center = (y_min + y_max) // 2\n x_len = int((x_max - x_min) * self.enlarge_coef)\n y_len = int((y_max - y_min) * self.enlarge_coef)\n x_min = max(0, x_center - x_len // 2)\n x_max = min(height, x_center + x_len // 2)\n y_min = max(0, y_center - y_len // 2)\n y_max = min(weight, y_center + y_len // 2)\n x_min, x_max, y_min, y_max = int(x_min), int(x_max), int(y_min), int(y_max)\n \n x_center_crop = x_center - x_min\n y_center_crop = y_center - y_min\n\n vm_crop = vm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n fm_crop = fm_no_crop[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n img_crop = img[x_min:x_max+1, y_min:y_max+1]\n vm_crop_gt = vm_no_crop_gt[x_min:x_max+1, y_min:y_max+1, 0].astype(bool)\n\n h, w = vm_crop.shape[:2]\n m = transform.rescale(vm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop = m[np.newaxis, ...]\n\n center_crop = np.zeros_like(vm_crop[0])\n x_center_crop = int(x_center_crop*self.patch_h/h)\n y_center_crop = int(y_center_crop*self.patch_w/w)\n center_crop[x_center_crop: x_center_crop+1, y_center_crop: y_center_crop+1]=1\n center_crop = self.generate_heatmap(center_crop.astype(np.float), (35, 35), 9)\n center_crop = center_crop[np.newaxis, ...]\n\n img_ = transform.rescale(img_crop, (self.patch_h/h, self.patch_w/w, 1))\n cur_h, cur_w = img_.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)), (0, 0))\n img_ = np.pad(img_, to_pad)[:self.patch_h, :self.patch_w, :3]\n img_crop = img_\n\n h, w = vm_crop_gt.shape[:2]\n m = transform.rescale(vm_crop_gt, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w]\n vm_crop_gt = m[np.newaxis, ...]\n\n m = transform.rescale(fm_crop, (self.patch_h/h, self.patch_w/w))\n cur_h, cur_w = m.shape[:2]\n to_pad = ((0, max(self.patch_h-cur_h, 0)), (0, max(self.patch_w-cur_w, 0)))\n m = np.pad(m, to_pad)[:self.patch_h, :self.patch_w] \n fm_crop = m[np.newaxis, ...]\n\n loss_mask = fm_no_crop.astype(int)-vm_no_crop_gt.astype(int)\n loss_mask[loss_mask==255]=0\n loss_mask = 1-loss_mask.astype(bool)\n\n vm_no_crop = vm_no_crop[np.newaxis, ...]\n fm_no_crop = fm_no_crop[np.newaxis, ...]\n\n obj_position = np.array([x_min, x_max, y_min, y_max])\n vm_pad = np.array([max(self.patch_h-cur_h, 0), max(self.patch_w-cur_w, 0)])\n vm_scale = np.array([self.patch_h/h, self.patch_w/w])\n counts = np.array([1])\n\n counts = torch.from_numpy(counts).to(self.dtype).to(self.device)\n\n obj_position = torch.from_numpy(obj_position).to(self.dtype).to(self.device)\n vm_pad = torch.from_numpy(vm_pad).to(self.dtype).to(self.device)\n vm_scale = torch.from_numpy(vm_scale).to(self.dtype).to(self.device)\n\n fm_crop = torch.from_numpy(fm_crop).to(self.dtype).to(self.device)\n fm_no_crop = torch.from_numpy(np.array(fm_no_crop)).to(self.dtype).to(self.device)\n vm_crop = torch.from_numpy(vm_crop).to(self.dtype).to(self.device)\n vm_crop_gt = torch.from_numpy(vm_crop_gt).to(self.dtype).to(self.device)\n vm_no_crop = torch.from_numpy(np.array(vm_no_crop)).to(self.dtype).to(self.device)\n center_crop = torch.from_numpy(np.array(center_crop)).to(self.dtype).to(self.device)\n \n img_crop = torch.from_numpy(np.array(img_crop)).to(self.dtype).to(self.device)\n img = torch.from_numpy(np.array(img)).to(self.dtype).to(self.device)\n\n loss_mask = torch.from_numpy(np.array(loss_mask)).to(self.dtype).to(self.device)\n \n image_id = torch.from_numpy(np.array(image_id)).to(self.dtype).to(self.device)\n anno_id = torch.from_numpy(np.array(anno_id)).to(self.dtype).to(self.device)\n occlude_rate = torch.from_numpy(np.array(occlude_rate)).to(self.dtype).to(self.device)\n \n if self.mode==\"train\":\n meta = {\n # \"vm_no_crop\": vm_no_crop,\n \"vm_crop\": vm_crop,\n # \"vm_crop_gt\": vm_crop_gt,\n # \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n # \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n \"img_no_crop\": img,\n }\n elif self.mode==\"test\":\n meta = {\n \"vm_no_crop\": vm_no_crop,\n \"vm_no_crop_gt\": vm_no_crop_gt,\n \"vm_crop\": vm_crop,\n \"vm_crop_gt\": vm_crop_gt,\n \"fm_no_crop\": fm_no_crop,\n \"fm_crop\": fm_crop,\n \"img_crop\": img_crop,\n \"center_crop\": center_crop,\n \"loss_mask\": loss_mask,\n \"obj_position\": obj_position,\n \"vm_pad\": vm_pad,\n \"vm_scale\": vm_scale,\n \"counts\":counts,\n \"img_id\": image_id,\n \"anno_id\": anno_id,\n \"occlude_rate\":occlude_rate,\n # # for vq\n # \"mask_crop\": fm_crop,\n \"img\": img,\n }\n return meta\n\n @staticmethod\n def collate_fn(batch):\n keys = batch[0].keys()\n res = {}\n for k in keys:\n temp_ = []\n for b in batch:\n if b[k] is not None:\n temp_.append(b[k])\n if len(temp_) > 0:\n res[k] = default_collate(temp_)\n else:\n res[k] = None\n\n return res\n\n def create_iterator(self, batch_size):\n while True:\n sample_loader = DataLoader(\n dataset=self,\n batch_size=batch_size,\n drop_last=True,\n collate_fn=self.collate_fn\n )\n\n for item in sample_loader:\n yield item\n\n def polys_to_mask(self, polygons, height, width):\n rles = mask_utils.frPyObjects(polygons, height, width)\n rle = mask_utils.merge(rles)\n mask = mask_utils.decode(rle)\n return mask" } ]
from data.dataloader_Fishbowl import FishBowl from data.dataloader_MOViD_A import MOViD_A from data.dataloader_KINS import Kins_Fusion_dataset, KINS_Aisformer_VRSP_Intersection from data.dataloader_COCOA import COCOA_Fusion_dataset, COCOA_VRSP
21,312
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA": train_dataset = COCOA_Fusion_dataset(config, mode='train') test_dataset = COCOA_Fusion_dataset(config, mode='test') elif args.dataset=="Fishbowl": train_dataset = FishBowl(config, mode='train') test_dataset = FishBowl(config, mode='test') elif args.dataset=="MOViD_A": train_dataset = MOViD_A(config, mode='train') test_dataset = MOViD_A(config, mode='test') return train_dataset, test_dataset else: if args.dataset=="KINS":
def load_dataset(config, args, mode): if mode=="train": if args.dataset=="KINS": train_dataset = Kins_Fusion_dataset(config, mode='train') test_dataset = Kins_Fusion_dataset(config, mode='test') elif args.dataset=="COCOA": train_dataset = COCOA_Fusion_dataset(config, mode='train') test_dataset = COCOA_Fusion_dataset(config, mode='test') elif args.dataset=="Fishbowl": train_dataset = FishBowl(config, mode='train') test_dataset = FishBowl(config, mode='test') elif args.dataset=="MOViD_A": train_dataset = MOViD_A(config, mode='train') test_dataset = MOViD_A(config, mode='test') return train_dataset, test_dataset else: if args.dataset=="KINS":
test_dataset = KINS_Aisformer_VRSP_Intersection(config, mode='test')
3
2023-12-21 04:25:47+00:00
24k
alipay/PainlessInferenceAcceleration
pia/lookahead/models/llama/modeling_llama_batch.py
[ { "identifier": "LookaheadPreTrainedModel", "path": "pia/lookahead/common/pretrained_model_batch.py", "snippet": "class LookaheadPreTrainedModel(PreTrainedModel):\n _batch_generation = True\n _stream_generation = False\n\n def __init__(self, config):\n super().__init__(config=config)\n\n def _get_generation_mode(\n self, generation_config: GenerationConfig, assistant_model: Optional[\"PreTrainedModel\"]\n ) -> GenerationMode:\n \"\"\"\n Returns the generation mode triggered by a [`GenerationConfig`] instance.\n \"\"\"\n if generation_config.constraints is not None or generation_config.force_words_ids is not None:\n generation_mode = GenerationMode.CONSTRAINED_BEAM_SEARCH\n elif generation_config.num_beams == 1:\n if generation_config.do_sample is False:\n if (\n generation_config.top_k is not None\n and generation_config.top_k > 1\n and generation_config.penalty_alpha is not None\n and generation_config.penalty_alpha > 0\n ):\n generation_mode = GenerationMode.CONTRASTIVE_SEARCH\n elif generation_config.use_cache \\\n and hasattr(generation_config, 'decoding_kwargs') \\\n and generation_config.decoding_kwargs.get('use_lookahead', False) \\\n and generation_config.decoding_kwargs.get('decoding_length', 64) > 1 \\\n and generation_config.decoding_kwargs.get('branch_length', 12) > 0:\n generation_mode = GenerationMode.LOOKAHEAD_GENERATION\n else:\n generation_mode = GenerationMode.GREEDY_SEARCH\n else:\n if generation_config.use_cache \\\n and hasattr(generation_config, 'decoding_kwargs') \\\n and generation_config.decoding_kwargs.get('use_lookahead', False) \\\n and generation_config.decoding_kwargs.get('decoding_length', 64) > 1 \\\n and generation_config.decoding_kwargs.get('branch_length', 12) > 0:\n generation_mode = GenerationMode.LOOKAHEAD_GENERATION\n else:\n generation_mode = GenerationMode.SAMPLE\n else:\n if generation_config.num_beam_groups > 1:\n generation_mode = GenerationMode.GROUP_BEAM_SEARCH\n elif generation_config.do_sample is True:\n generation_mode = GenerationMode.BEAM_SAMPLE\n else:\n generation_mode = GenerationMode.BEAM_SEARCH\n\n # Assisted generation may extend some generation modes\n if assistant_model is not None:\n if generation_mode in (\"greedy_search\", \"sample\"):\n generation_mode = GenerationMode.ASSISTED_GENERATION\n else:\n raise ValueError(\n \"You've set `assistant_model`, which triggers assisted generate. Currently, assisted generate \"\n \"is only supported with Greedy Search and Sample.\"\n )\n return generation_mode\n\n @torch.no_grad()\n def generate(\n self,\n inputs: Optional[torch.Tensor] = None,\n generation_config: Optional[GenerationConfig] = None,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n prefix_allowed_tokens_fn: Optional[Callable[[int, torch.Tensor], List[int]]] = None,\n synced_gpus: Optional[bool] = None,\n assistant_model: Optional[\"PreTrainedModel\"] = None,\n streamer: Optional[\"BaseStreamer\"] = None,\n **kwargs,\n ) -> Union[GenerateOutput, torch.LongTensor]:\n r\"\"\"\n\n Generates sequences of token ids for models with a language modeling head.\n\n <Tip warning={true}>\n\n Most generation-controlling parameters are set in `generation_config` which, if not passed, will be set to the\n model's default generation configuration. You can override any `generation_config` by passing the corresponding\n parameters to generate(), e.g. `.generate(inputs, num_beams=4, do_sample=True)`.\n\n For an overview of generation strategies and code examples, check out the [following\n guide](../generation_strategies).\n\n </Tip>\n\n Parameters:\n inputs (`torch.Tensor` of varying shape depending on the modality, *optional*):\n The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the\n method initializes it with `bos_token_id` and a batch size of 1. For decoder-only models `inputs`\n should of in the format of `input_ids`. For encoder-decoder models *inputs* can represent any of\n `input_ids`, `input_values`, `input_features`, or `pixel_values`.\n generation_config (`~generation.GenerationConfig`, *optional*):\n The generation configuration to be used as base parametrization for the generation call. `**kwargs`\n passed to generate matching the attributes of `generation_config` will override them. If\n `generation_config` is not provided, the default will be used, which had the following loading\n priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model\n configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s\n default values, whose documentation should be checked to parameterize generation.\n logits_processor (`LogitsProcessorList`, *optional*):\n Custom logits processors that complement the default logits processors built from arguments and\n generation config. If a logit processor is passed that is already created with the arguments or a\n generation config an error is thrown. This feature is intended for advanced users.\n stopping_criteria (`StoppingCriteriaList`, *optional*):\n Custom stopping criteria that complement the default stopping criteria built from arguments and a\n generation config. If a stopping criteria is passed that is already created with the arguments or a\n generation config an error is thrown. This feature is intended for advanced users.\n prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):\n If provided, this function constraints the beam search to allowed tokens only at each step. If not\n provided no constraint is applied. This function takes 2 arguments: the batch ID `batch_id` and\n `input_ids`. It has to return a list with the allowed tokens for the next generation step conditioned\n on the batch ID `batch_id` and the previously generated tokens `inputs_ids`. This argument is useful\n for constrained generation conditioned on the prefix, as described in [Autoregressive Entity\n Retrieval](https://arxiv.org/abs/2010.00904).\n synced_gpus (`bool`, *optional*):\n Whether to continue running the while loop until max_length. Unless overridden this flag will be set to\n `True` under DeepSpeed ZeRO Stage 3 multiple GPUs environment to avoid hanging if one GPU finished\n generating before other GPUs. Otherwise it'll be set to `False`.\n assistant_model (`PreTrainedModel`, *optional*):\n An assistant model that can be used to accelerate generation. The assistant model must have the exact\n same tokenizer. The acceleration is achieved when forecasting candidate tokens with the assistent model\n is much faster than running generation with the model you're calling generate from. As such, the\n assistant model should be much smaller.\n streamer (`BaseStreamer`, *optional*):\n Streamer object that will be used to stream the generated sequences. Generated tokens are passed\n through `streamer.put(token_ids)` and the streamer is responsible for any further processing.\n kwargs (`Dict[str, Any]`, *optional*):\n Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be\n forwarded to the `forward` function of the model. If the model is an encoder-decoder model, encoder\n specific kwargs should not be prefixed and decoder specific kwargs should be prefixed with *decoder_*.\n\n Return:\n [`~utils.ModelOutput`] or `torch.LongTensor`: A [`~utils.ModelOutput`] (if `return_dict_in_generate=True`\n or when `config.return_dict_in_generate=True`) or a `torch.FloatTensor`.\n\n If the model is *not* an encoder-decoder model (`model.config.is_encoder_decoder=False`), the possible\n [`~utils.ModelOutput`] types are:\n\n - [`~generation.GreedySearchDecoderOnlyOutput`],\n - [`~generation.SampleDecoderOnlyOutput`],\n - [`~generation.BeamSearchDecoderOnlyOutput`],\n - [`~generation.BeamSampleDecoderOnlyOutput`]\n\n If the model is an encoder-decoder model (`model.config.is_encoder_decoder=True`), the possible\n [`~utils.ModelOutput`] types are:\n\n - [`~generation.GreedySearchEncoderDecoderOutput`],\n - [`~generation.SampleEncoderDecoderOutput`],\n - [`~generation.BeamSearchEncoderDecoderOutput`],\n - [`~generation.BeamSampleEncoderDecoderOutput`]\n \"\"\"\n\n if synced_gpus is None:\n # if is_deepspeed_zero3_enabled() and dist.get_world_size() > 1:\n # synced_gpus = True\n # else:\n # synced_gpus = False\n synced_gpus = False\n\n # 1. Handle `generation_config` and kwargs that might update it, and validate the `.generate()` call\n self._validate_model_class()\n\n # priority: `generation_config` argument > `model.generation_config` (the default generation config)\n if generation_config is None:\n # legacy: users may modify the model configuration to control generation -- update the generation config\n # model attribute accordingly, if it was created from the model config\n if self.generation_config._from_model_config:\n new_generation_config = GenerationConfig.from_model_config(self.config)\n if new_generation_config != self.generation_config:\n # warnings.warn(\n # \"You have modified the pretrained model configuration to control generation. This is a\"\n # \" deprecated strategy to control generation and will be removed soon, in a future version.\"\n # \" Please use a generation configuration file (see\"\n # \" https://huggingface.co/docs/transformers/main_classes/text_generation )\"\n # )\n self.generation_config = new_generation_config\n generation_config = self.generation_config\n\n generation_config = copy.deepcopy(generation_config)\n model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs\n generation_config.validate()\n self._validate_model_kwargs(model_kwargs.copy())\n if not hasattr(generation_config, 'decoding_kwargs'):\n generation_config.decoding_kwargs = model_kwargs.get('decoding_kwargs', {})\n\n # 2. Set generation parameters if not already defined\n logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n\n if generation_config.pad_token_id is None and generation_config.eos_token_id is not None:\n if model_kwargs.get(\"attention_mask\", None) is None:\n logger.warning(\n \"The attention mask and the pad token id were not set. As a consequence, you may observe \"\n \"unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\"\n )\n eos_token_id = generation_config.eos_token_id\n if isinstance(eos_token_id, list):\n eos_token_id = eos_token_id[0]\n logger.warning(f\"Setting `pad_token_id` to `eos_token_id`:{eos_token_id} for open-end generation.\")\n generation_config.pad_token_id = eos_token_id\n\n # 3. Define model inputs\n # inputs_tensor has to be defined\n # model_input_name is defined if model-specific keyword input is passed\n # otherwise model_input_name is None\n # all model-specific keyword inputs are removed from `model_kwargs`\n inputs_tensor, model_input_name, model_kwargs = self._prepare_model_inputs(\n inputs, generation_config.bos_token_id, model_kwargs\n )\n batch_size = inputs_tensor.shape[0]\n\n # 4. Define other model kwargs\n model_kwargs[\"output_attentions\"] = generation_config.output_attentions\n model_kwargs[\"output_hidden_states\"] = generation_config.output_hidden_states\n # decoder-only models with inputs_embeds forwarding must use caching (otherwise we can't detect whether we are\n # generating the first new token or not, and we only want to use the embeddings for the first new token)\n if not self.config.is_encoder_decoder and model_input_name == \"inputs_embeds\":\n model_kwargs[\"use_cache\"] = True\n else:\n model_kwargs[\"use_cache\"] = generation_config.use_cache\n\n accepts_attention_mask = \"attention_mask\" in set(inspect.signature(self.forward).parameters.keys())\n requires_attention_mask = \"encoder_outputs\" not in model_kwargs\n\n if model_kwargs.get(\"attention_mask\", None) is None and requires_attention_mask and accepts_attention_mask:\n model_kwargs[\"attention_mask\"] = self._prepare_attention_mask_for_generation(\n inputs_tensor, generation_config.pad_token_id, generation_config.eos_token_id\n )\n\n # decoder-only models should use left-padding for generation\n if not self.config.is_encoder_decoder:\n # If `input_ids` was given, check if the last id in any sequence is `pad_token_id`\n # Note: If using, `inputs_embeds` this check does not work, because we want to be more hands-off.\n if (\n generation_config.pad_token_id is not None\n and len(inputs_tensor.shape) == 2\n and torch.sum(inputs_tensor[:, -1] == generation_config.pad_token_id) > 0\n ):\n logger.warning(\n \"A decoder-only architecture is being used, but right-padding was detected! For correct \"\n \"generation results, please set `padding_side='left'` when initializing the tokenizer.\"\n )\n\n if self.config.is_encoder_decoder and \"encoder_outputs\" not in model_kwargs:\n # if model is encoder decoder encoder_outputs are created\n # and added to `model_kwargs`\n model_kwargs = self._prepare_encoder_decoder_kwargs_for_generation(\n inputs_tensor, model_kwargs, model_input_name\n )\n\n # 5. Prepare `input_ids` which will be used for auto-regressive generation\n if self.config.is_encoder_decoder:\n input_ids, model_kwargs = self._prepare_decoder_input_ids_for_generation(\n batch_size=batch_size,\n model_input_name=model_input_name,\n model_kwargs=model_kwargs,\n decoder_start_token_id=generation_config.decoder_start_token_id,\n bos_token_id=generation_config.bos_token_id,\n device=inputs_tensor.device,\n )\n else:\n input_ids = inputs_tensor if model_input_name == \"input_ids\" else model_kwargs.pop(\"input_ids\")\n\n if streamer is not None:\n streamer.put(input_ids.cpu())\n\n # 6. Prepare `max_length` depending on other stopping criteria.\n input_ids_length = input_ids.shape[-1]\n has_default_max_length = kwargs.get(\"max_length\") is None and generation_config.max_length is not None\n if generation_config.max_new_tokens is not None:\n if not has_default_max_length:\n logger.warning(\n f\"Both `max_new_tokens` (={generation_config.max_new_tokens}) and `max_length`(=\"\n f\"{generation_config.max_length}) seem to have been set. `max_new_tokens` will take precedence. \"\n \"Please refer to the documentation for more information. \"\n \"(https://huggingface.co/docs/transformers/main/en/main_classes/text_generation)\"\n )\n generation_config.max_length = generation_config.max_new_tokens + input_ids_length\n\n # 7. determine generation mode\n generation_mode = self._get_generation_mode(generation_config, assistant_model)\n\n if streamer is not None and (generation_config.num_beams > 1):\n raise ValueError(\n \"`streamer` cannot be used with beam search (yet!). Make sure that `num_beams` is set to 1.\"\n )\n\n if self.device.type != input_ids.device.type:\n warnings.warn(\n \"You are calling .generate() with the `input_ids` being on a device type different\"\n f\" than your model's device. `input_ids` is on {input_ids.device.type}, whereas the model\"\n f\" is on {self.device.type}. You may experience unexpected behaviors or slower generation.\"\n \" Please make sure that you have put `input_ids` to the\"\n f\" correct device by calling for example input_ids = input_ids.to('{self.device.type}') before\"\n \" running `.generate()`.\",\n UserWarning,\n )\n\n # 8. prepare distribution pre_processing samplers\n logits_processor = self._get_logits_processor(\n generation_config=generation_config,\n input_ids_seq_length=input_ids_length,\n encoder_input_ids=inputs_tensor,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n logits_processor=logits_processor,\n )\n\n # 9. prepare stopping criteria\n stopping_criteria = self._get_stopping_criteria(\n generation_config=generation_config, stopping_criteria=stopping_criteria\n )\n\n decoding_kwargs = generation_config.decoding_kwargs if hasattr(generation_config, 'decoding_kwargs') else {}\n decoding_kwargs['generation_mode'] = generation_mode\n decoding_kwargs['do_sample'] = generation_config.do_sample\n decoding_kwargs['inputs_embeds_position'] = generation_config.inputs_embeds_position if hasattr(generation_config, 'inputs_embeds_position') else 0\n decoding_kwargs['max_length'] = generation_config.max_length\n if generation_mode == GenerationMode.LOOKAHEAD_GENERATION:\n decoding_length = decoding_kwargs.get('decoding_length', 64)\n decoding_kwargs['decoding_max_length'] = generation_config.max_length + decoding_length + 1\n else:\n decoding_kwargs['decoding_max_length'] = generation_config.max_length\n model_kwargs['decoding_kwargs'] = decoding_kwargs\n\n # 10. go into different generation modes\n if generation_mode == GenerationMode.ASSISTED_GENERATION:\n if generation_config.num_return_sequences > 1:\n raise ValueError(\n \"num_return_sequences has to be 1 when doing assisted generate, \"\n f\"but is {generation_config.num_return_sequences}.\"\n )\n if batch_size > 1:\n raise ValueError(\"assisted generate is only supported for batch_size = 1\")\n if not model_kwargs[\"use_cache\"]:\n raise ValueError(\"assisted generate requires `use_cache=True`\")\n\n # 11. If the assistant model is an encoder-decoder, prepare its encoder outputs\n if assistant_model.config.is_encoder_decoder:\n assistant_model_kwargs = copy.deepcopy(model_kwargs)\n inputs_tensor, model_input_name, assistant_model_kwargs = assistant_model._prepare_model_inputs(\n inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_model_kwargs\n )\n assistant_model_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation(\n inputs_tensor, assistant_model_kwargs, model_input_name\n )\n model_kwargs[\"assistant_encoder_outputs\"] = assistant_model_kwargs[\"encoder_outputs\"]\n\n # 12. run assisted generate\n return self.assisted_decoding(\n input_ids,\n assistant_model=assistant_model,\n do_sample=generation_config.do_sample,\n logits_processor=logits_processor,\n logits_warper=self._get_logits_warper(generation_config) if generation_config.do_sample else None,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n if generation_mode == GenerationMode.GREEDY_SEARCH:\n # 11. run greedy search\n return self.greedy_search(\n input_ids,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.LOOKAHEAD_GENERATION:\n # 11. run greedy search\n return self.lookahead_generation(\n input_ids,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.CONTRASTIVE_SEARCH:\n if not model_kwargs[\"use_cache\"]:\n raise ValueError(\"Contrastive search requires `use_cache=True`\")\n\n return self.contrastive_search(\n input_ids,\n top_k=generation_config.top_k,\n penalty_alpha=generation_config.penalty_alpha,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n sequential=generation_config.low_memory,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.SAMPLE:\n # 11. prepare logits warper\n logits_warper = self._get_logits_warper(generation_config)\n\n # 12. expand input_ids with `num_return_sequences` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_return_sequences,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n\n # 13. run sample\n return self.sample(\n input_ids,\n logits_processor=logits_processor,\n logits_warper=logits_warper,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n streamer=streamer,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.BEAM_SEARCH:\n # 11. prepare beam search scorer\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n max_length=generation_config.max_length,\n )\n # 12. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n # 13. run beam search\n return self.beam_search(\n input_ids,\n beam_scorer,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.BEAM_SAMPLE:\n # 11. prepare logits warper\n logits_warper = self._get_logits_warper(generation_config)\n\n # 12. prepare beam search scorer\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n max_length=generation_config.max_length,\n )\n\n # 13. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n\n # 14. run beam sample\n return self.beam_sample(\n input_ids,\n beam_scorer,\n logits_processor=logits_processor,\n logits_warper=logits_warper,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.GROUP_BEAM_SEARCH:\n # 11. prepare beam search scorer\n beam_scorer = BeamSearchScorer(\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n num_beam_groups=generation_config.num_beam_groups,\n max_length=generation_config.max_length,\n )\n # 12. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n # 13. run beam search\n return self.group_beam_search(\n input_ids,\n beam_scorer,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n elif generation_mode == GenerationMode.CONSTRAINED_BEAM_SEARCH:\n final_constraints = []\n if generation_config.constraints is not None:\n final_constraints = generation_config.constraints\n\n if generation_config.force_words_ids is not None:\n\n def typeerror():\n raise ValueError(\n \"`force_words_ids` has to either be a `List[List[List[int]]]` or `List[List[int]]`\"\n f\"of positive integers, but is {generation_config.force_words_ids}.\"\n )\n\n if (\n not isinstance(generation_config.force_words_ids, list)\n or len(generation_config.force_words_ids) == 0\n ):\n typeerror()\n\n for word_ids in generation_config.force_words_ids:\n if isinstance(word_ids[0], list):\n if not isinstance(word_ids, list) or len(word_ids) == 0:\n typeerror()\n if any(not isinstance(token_ids, list) for token_ids in word_ids):\n typeerror()\n if any(\n any((not isinstance(token_id, int) or token_id < 0) for token_id in token_ids)\n for token_ids in word_ids\n ):\n typeerror()\n\n constraint = DisjunctiveConstraint(word_ids)\n else:\n if not isinstance(word_ids, list) or len(word_ids) == 0:\n typeerror()\n if any((not isinstance(token_id, int) or token_id < 0) for token_id in word_ids):\n typeerror()\n\n constraint = PhrasalConstraint(word_ids)\n final_constraints.append(constraint)\n\n # 11. prepare beam search scorer\n constrained_beam_scorer = ConstrainedBeamSearchScorer(\n constraints=final_constraints,\n batch_size=batch_size,\n num_beams=generation_config.num_beams,\n device=inputs_tensor.device,\n length_penalty=generation_config.length_penalty,\n do_early_stopping=generation_config.early_stopping,\n num_beam_hyps_to_keep=generation_config.num_return_sequences,\n max_length=generation_config.max_length,\n )\n # 12. interleave input_ids with `num_beams` additional sequences per batch\n input_ids, model_kwargs = self._expand_inputs_for_generation(\n input_ids=input_ids,\n expand_size=generation_config.num_beams,\n is_encoder_decoder=self.config.is_encoder_decoder,\n **model_kwargs,\n )\n # 13. run beam search\n return self.constrained_beam_search(\n input_ids,\n constrained_beam_scorer=constrained_beam_scorer,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n pad_token_id=generation_config.pad_token_id,\n eos_token_id=generation_config.eos_token_id,\n output_scores=generation_config.output_scores,\n return_dict_in_generate=generation_config.return_dict_in_generate,\n synced_gpus=synced_gpus,\n **model_kwargs,\n )\n\n def lookahead_prepare_inputs_for_generation(self,\n input_ids,\n past_key_values=None,\n attention_mask=None,\n inputs_embeds=None,\n **kwargs):\n position_ids = kwargs.get(\"position_ids\", None)\n\n decoding_kwargs = kwargs.get('decoding_kwargs', {})\n decoding_length = decoding_kwargs.get('decoding_length', 64)\n branch_length = decoding_kwargs.get('branch_length', 12)\n decoding_mode = decoding_kwargs.get('decoding_mode', 'hier')\n max_length = decoding_kwargs.get('max_length', 2048)\n batch_indices = decoding_kwargs.get('batch_indices', None)\n decoding_cursors = decoding_kwargs.get('decoding_cursors', None)\n device = input_ids.device\n\n if past_key_values is None:\n if inputs_embeds is not None and input_ids is not None:\n model_inputs = {\"inputs_embeds\": inputs_embeds, \"input_ids\": input_ids}\n length = input_ids.size(1)\n elif input_ids is not None:\n model_inputs = {\"input_ids\": input_ids}\n length = input_ids.size(1)\n elif inputs_embeds is not None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n length = input_ids.size(1)\n else:\n raise ValueError('either input_ids or inputs_embeds is not None')\n update_attention_mask = attention_mask[:, :, :length, :length]\n\n model_inputs.update(\n {\"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": update_attention_mask,\n \"decoding_kwargs\": decoding_kwargs\n })\n\n if position_ids is not None:\n model_inputs[\"position_ids\"] = self._get_position_ids(position_ids, encoding=True, length=length)\n\n else:\n\n cs = torch.tensor([[x - 1, x] for x in decoding_cursors], device=input_ids.device)\n qids = torch.gather(input_ids, 1, cs)\n decoding_qids = qids.tolist()\n\n if decoding_mode in ('hier', 'par', 'one'):\n decoding_mode = decoding_mode + '_mix'\n fmt, mode = decoding_mode.split('_')\n sub_decoding_length = max(decoding_length // len(decoding_qids), 1)\n decoding_ids, decoding_masks, decoding_lengths = self.lookahead_cache.bat_get(decoding_qids,\n decoding_length=sub_decoding_length,\n branch_length=branch_length,\n decoding_cursors=decoding_cursors,\n mode=mode,\n indices=batch_indices,\n decoding_mode=fmt)\n sizes = list(set([len(x) for x in decoding_ids]))\n assert len(sizes) == 1\n decodinged = True if sizes[0] > 1 else False\n input_id_slice = torch.tensor(decoding_ids, device=input_ids.device)\n\n min_cur = min(decoding_cursors)\n decoding_mask_tensor = torch.from_numpy(decoding_masks[:, None]).to(dtype=torch.long,\n device=input_ids.device)\n decoding_attention_mask = torch.cat(\n [attention_mask[:, :, min_cur: min_cur + sizes[0], :min_cur], decoding_mask_tensor], dim=-1)\n\n decoding_kwargs.update({'decoding_qids': decoding_qids,\n 'decoding_ids': decoding_ids,\n 'decoding_masks': decoding_masks,\n 'decoding_lengths': decoding_lengths,\n 'decoding_qids': decoding_qids,\n 'decoding_cursors': decoding_cursors,\n 'decoding_cursors_tensor': torch.tensor(decoding_cursors, \n dtype=torch.int32,\n device=device),\n 'batch_indices': batch_indices,\n })\n\n model_inputs = {'decoding_kwargs': decoding_kwargs}\n\n model_inputs.update(\n {\n \"input_ids\": input_id_slice,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": decoding_attention_mask\n }\n )\n if position_ids is not None:\n indices = torch.sum(decoding_attention_mask, dim=3).squeeze(1)[0]\n model_inputs[\"position_ids\"] = self._get_position_ids(position_ids, indices=indices, encoding=False)\n\n return model_inputs\n\n def _get_position_ids(self, full_position_ids, indices=None, length=None, encoding=True):\n if encoding:\n return full_position_ids[..., :length]\n else:\n return full_position_ids[..., indices]\n\n def _lookahead_update_model_kwargs_for_generation(\n self,\n outputs: ModelOutput,\n model_kwargs: Dict[str, Any],\n is_encoder_decoder: bool = False,\n standardize_cache_format: bool = False,\n logits_processor: Optional[LogitsProcessorList] = None,\n input_ids: Optional[torch.Tensor] = None,\n ) -> Dict[str, Any]:\n bs, input_length = input_ids.shape\n decoding_kwargs = model_kwargs['decoding_kwargs']\n decoding_ids = decoding_kwargs.get('decoding_ids', [])\n eos = decoding_kwargs.get('eos', 2)\n max_length = decoding_kwargs.get('max_length', 1024)\n device = outputs.logits.device\n dtype = outputs.logits.dtype\n\n encoding = model_kwargs.get(\"past_key_values\", None) is None\n dls = []\n edls = []\n if encoding:\n # encoding stage\n past_key_values = outputs.past_key_values\n _, n_head, _, head_dim = past_key_values[0][0].size()\n\n _, nt, nv = outputs.logits.shape\n next_tokens_scores = logits_processor(input_ids, outputs.logits[:, -1]).view(bs, 1, nv)\n\n max_new_tokens = max_length - input_length\n input_ids = torch.cat([input_ids, torch.ones((bs, max_new_tokens), dtype=torch.long, device=device) * eos],\n dim=1)\n decoding_cursors = [input_length] * bs\n\n if decoding_kwargs.get('do_sample', False):\n probs = nn.functional.softmax(next_tokens_scores, dim=-1)\n bs, nt, nv = probs.shape\n next_tokens = torch.multinomial(probs.view(bs * nt, nv), num_samples=1).view(bs, nt)\n else:\n next_tokens = torch.argmax(next_tokens_scores, dim=-1, keepdim=False).long()\n\n input_ids[:, input_length:input_length + 1] = next_tokens\n\n model_kwargs[\"past_key_values\"] = past_key_values\n model_kwargs['next_tokens'] = next_tokens\n model_kwargs['next_token_list'] = next_tokens.tolist()\n model_kwargs['next_tokens_scores'] = next_tokens_scores\n dls.extend([1] * bs)\n edls.extend([1] * bs)\n\n else:\n decoding_cursors = decoding_kwargs.get('decoding_cursors', None)\n min_cur = min(decoding_cursors)\n max_cur = max(decoding_cursors)\n decoding_masks = decoding_kwargs['decoding_masks']\n decoding_lengths = decoding_kwargs['decoding_lengths']\n\n # TODO: accurate logit_processor\n # next_tokens_scores = logits_processor(input_ids[:,:max_cur], outputs.logits)\n bs, nt, nv = outputs.logits.shape\n next_tokens_scores = logits_processor(input_ids[:, :max_cur].repeat(1, nt).view(bs * nt, -1),\n outputs.logits.view(bs * nt, -1)).view(bs, nt, -1)\n\n if decoding_kwargs.get('do_sample', False):\n probs = nn.functional.softmax(next_tokens_scores, dim=-1)\n next_tokens = torch.multinomial(probs.view(bs * nt, nv), num_samples=1).view(bs, nt)\n else:\n next_tokens = torch.argmax(next_tokens_scores, dim=-1, keepdim=False).long()\n\n next_token_list = next_tokens.tolist()\n update_next_token_list = [[] for _ in range(len(next_token_list))]\n for ib in range(bs):\n max_match_index = -1\n max_match_count = 0\n max_decoding_ids_slice = None\n max_next_token_slice = None\n\n decoding_ids_ = decoding_ids[ib][1:]\n org_branch_length = len(decoding_ids_)\n\n cur = decoding_cursors[ib]\n for i in range(len(decoding_ids_)):\n\n mask_indices, = np.nonzero(decoding_masks[ib, i + 1, cur - min_cur + 1:])\n if mask_indices.size == 0:\n continue\n decoding_ids_slice = [decoding_ids_[j] for j in mask_indices]\n # next in logic rather than next in position\n next_token_slice = [next_token_list[ib][0]] + [next_token_list[ib][j + 1] for j in mask_indices]\n\n c = len(decoding_ids_slice)\n for j, p in enumerate(decoding_ids_slice):\n if next_token_slice[j] != p:\n c = j\n break\n\n if c > max_match_count:\n max_match_count = c\n max_match_index = i\n if c >= max_match_count:\n max_decoding_ids_slice = decoding_ids_slice\n max_next_token_slice = next_token_slice\n\n dls.append(org_branch_length + 1)\n edls.append(max_match_count + 1)\n prefix_length = cur + 1\n if cur + max_match_count + 2 > input_length:\n max_match_count = max(input_length - cur - 2, 0)\n if max_match_count > 0:\n match_idx = np.nonzero(decoding_masks[ib, max_match_index + 1, cur - min_cur + 1:])[0][\n : max_match_count]\n\n if len(decoding_ids_) != max_match_count and max_match_index + 1 != max_match_count:\n kv_idx = match_idx + prefix_length\n kv_idx_tensor = torch.from_numpy(kv_idx).to(device)\n self._update_cache(model_kwargs[\"past_key_values\"],\n ib,\n kv_idx,\n prefix_and_next_count=prefix_length,\n max_match_count=max_match_count,\n max_match_index=max_match_index)\n next_token_list_ = next_token_list[ib][0: 1] + [next_token_list[ib][x + 1] for x in match_idx]\n update_next_token_list[ib] = next_token_list_ + (\n org_branch_length - len(next_token_list_) + 1) * [-1]\n next_tokens = torch.tensor(next_token_list_, device=device)\n input_ids[ib, cur + 1: cur + max_match_count + 2] = next_tokens\n else:\n # max_match_count = 0\n next_token_list_ = next_token_list[ib][:1]\n update_next_token_list[ib] = next_token_list_ + org_branch_length * [-1]\n input_ids[ib, cur + 1] = next_token_list_[0]\n\n decoding_cursors[ib] += max_match_count + 1\n\n if decoding_kwargs.get('debug_lookahead', False):\n lengths = np.sum(decoding_masks[ib, :, cur - min_cur:], axis=1) - 1\n larr = np.concatenate([lengths[:-1][(lengths[1:] - lengths[:-1]) <= 0], lengths[-1:]], axis=0)\n ls = ','.join(larr.astype(np.int32).astype(np.str_))\n decoding_qids = decoding_kwargs['decoding_qids'][ib]\n size_str = ','.join([str(x) for x in decoding_lengths[ib]])\n print(\n f'batch_index:{ib}/{bs} decoding_length:{len(decoding_ids_)} accept_length:{max_match_count} '\n f'query:{decoding_qids} source:{size_str} lengths:{ls} index:{max_match_index} '\n f'branch_token:{max_decoding_ids_slice} next_token:{max_next_token_slice}')\n model_kwargs['next_tokens'] = torch.tensor(update_next_token_list, device=device)\n model_kwargs['next_token_list'] = update_next_token_list\n model_kwargs['next_tokens_scores'] = []\n model_kwargs['input_ids'] = input_ids\n decoding_kwargs['decoding_cursors'] = decoding_cursors\n decoding_kwargs['dls'].extend(dls)\n decoding_kwargs['edls'].extend(edls)\n model_kwargs['decoding_kwargs'] = decoding_kwargs\n return model_kwargs\n\n def _early_stop(self,\n unfinished_sequences,\n output_ids,\n batch_indices,\n model_kwargs):\n\n decoding_kwargs = model_kwargs['decoding_kwargs']\n input_ids = model_kwargs['input_ids']\n\n unfinished_sequence_list = unfinished_sequences.tolist()\n unfinished_index_list = []\n for i, (seq,) in enumerate(unfinished_sequence_list):\n if seq == 0:\n idx = batch_indices[i]\n output_ids[idx, :input_ids.size(-1)] = input_ids[i]\n else:\n unfinished_index_list.append(i)\n\n output_batch_indices = [batch_indices[i] for i in unfinished_index_list]\n\n bs = input_ids.size(0)\n finished_count = bs - len(unfinished_index_list)\n\n if finished_count > 0 and bs > 1 and finished_count != bs:\n unfinished_indices = torch.tensor(unfinished_index_list, device=unfinished_sequences.device)\n unfinished_sequences = unfinished_sequences[unfinished_indices]\n\n model_kwargs['input_ids'] = input_ids[unfinished_indices]\n position_ids = model_kwargs.get('position_ids', None)\n if position_ids is not None:\n position_ids = position_ids[unfinished_indices]\n model_kwargs['position_ids'] = position_ids\n model_kwargs['attention_mask'] = model_kwargs['attention_mask'][unfinished_indices]\n decoding_kwargs = model_kwargs['decoding_kwargs']\n decoding_cursors = decoding_kwargs['decoding_cursors']\n decoding_kwargs['decoding_cursors'] = [decoding_cursors[i] for i in unfinished_index_list]\n batch_indices = decoding_kwargs['batch_indices']\n decoding_kwargs['batch_indices'] = [batch_indices[i] for i in unfinished_index_list]\n\n past_key_values = []\n for kv in model_kwargs['past_key_values']:\n k, v = kv\n k = k[unfinished_indices]\n v = v[unfinished_indices]\n past_key_values.append((k, v))\n model_kwargs['past_key_values'] = tuple(past_key_values)\n\n return unfinished_sequences, output_ids, output_batch_indices, model_kwargs\n\n def _update_cache(self, past_key_values, batch_idx, kv_idx, prefix_and_next_count=None, max_match_count=None,\n max_match_index=None):\n for k, v in past_key_values:\n k[batch_idx, :, prefix_and_next_count:prefix_and_next_count + max_match_count] = k[batch_idx, :, kv_idx]\n v[batch_idx, :, prefix_and_next_count:prefix_and_next_count + max_match_count] = v[batch_idx, :, kv_idx]\n\n def lookahead_generation(\n self,\n input_ids: torch.LongTensor,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n max_length: Optional[int] = None,\n pad_token_id: Optional[int] = None,\n eos_token_id: Optional[Union[int, List[int]]] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n output_scores: Optional[bool] = None,\n return_dict_in_generate: Optional[bool] = None,\n synced_gpus: bool = False,\n streamer: Optional[\"BaseStreamer\"] = None,\n **model_kwargs,\n ) -> Union[GreedySearchOutput, torch.LongTensor]:\n r\"\"\"\n Generates sequences of token ids for models with a language modeling head using **greedy decoding** and can be\n used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.\n\n <Tip warning={true}>\n\n In most cases, you do not need to call [`~generation.GenerationMixin.greedy_search`] directly. Use generate()\n instead. For an overview of generation strategies and code examples, check the [following\n guide](../generation_strategies).\n\n </Tip>\n\n\n Parameters:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n The sequence used as a prompt for the generation.\n logits_processor (`LogitsProcessorList`, *optional*):\n An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]\n used to modify the prediction scores of the language modeling head applied at each generation step.\n stopping_criteria (`StoppingCriteriaList`, *optional*):\n An instance of [`StoppingCriteriaList`]. List of instances of class derived from [`StoppingCriteria`]\n used to tell if the generation loop should stop.\n\n max_length (`int`, *optional*, defaults to 20):\n **DEPRECATED**. Use `logits_processor` or `stopping_criteria` directly to cap the number of generated\n tokens. The maximum length of the sequence to be generated.\n pad_token_id (`int`, *optional*):\n The id of the *padding* token.\n eos_token_id (`Union[int, List[int]]`, *optional*):\n The id of the *end-of-sequence* token. Optionally, use a list to set multiple *end-of-sequence* tokens.\n output_attentions (`bool`, *optional*, defaults to `False`):\n Whether or not to return the attentions tensors of all attention layers. See `attentions` under\n returned tensors for more details.\n output_hidden_states (`bool`, *optional*, defaults to `False`):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors\n for more details.\n output_scores (`bool`, *optional*, defaults to `False`):\n Whether or not to return the prediction scores. See `scores` under returned tensors for more details.\n return_dict_in_generate (`bool`, *optional*, defaults to `False`):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n synced_gpus (`bool`, *optional*, defaults to `False`):\n Whether to continue running the while loop until max_length (needed for ZeRO stage 3)\n streamer (`BaseStreamer`, *optional*):\n Streamer object that will be used to stream the generated sequences. Generated tokens are passed\n through `streamer.put(token_ids)` and the streamer is responsible for any further processing.\n model_kwargs:\n Additional model specific keyword arguments will be forwarded to the `forward` function of the model.\n If model is an encoder-decoder model the kwargs should include `encoder_outputs`.\n\n Return:\n [`~generation.GreedySearchDecoderOnlyOutput`], [`~generation.GreedySearchEncoderDecoderOutput`] or\n `torch.LongTensor`: A `torch.LongTensor` containing the generated tokens (default behaviour) or a\n [`~generation.GreedySearchDecoderOnlyOutput`] if `model.config.is_encoder_decoder=False` and\n `return_dict_in_generate=True` or a [`~generation.GreedySearchEncoderDecoderOutput`] if\n `model.config.is_encoder_decoder=True`.\n\n Examples:\n\n ```python\n >>> from transformers import (\n ... AutoTokenizer,\n ... AutoModelForCausalLM,\n ... LogitsProcessorList,\n ... MinLengthLogitsProcessor,\n ... StoppingCriteriaList,\n ... MaxLengthCriteria,\n ... )\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n >>> model = AutoModelForCausalLM.from_pretrained(\"gpt2\")\n\n >>> # set pad_token_id to eos_token_id because GPT2 does not have a PAD token\n >>> model.generation_config.pad_token_id = model.generation_config.eos_token_id\n\n >>> input_prompt = \"It might be possible to\"\n >>> input_ids = tokenizer(input_prompt, return_tensors=\"pt\").input_ids\n\n >>> # instantiate logits processors\n >>> logits_processor = LogitsProcessorList(\n ... [\n ... MinLengthLogitsProcessor(10, eos_token_id=model.generation_config.eos_token_id),\n ... ]\n ... )\n >>> stopping_criteria = StoppingCriteriaList([MaxLengthCriteria(max_length=20)])\n\n >>> outputs = model.greedy_search(\n ... input_ids, logits_processor=logits_processor, stopping_criteria=stopping_criteria\n ... )\n\n >>> tokenizer.batch_decode(outputs, skip_special_tokens=True)\n [\"It might be possible to get a better understanding of the nature of the problem, but it's not\"]\n ```\"\"\"\n # init values\n\n if not hasattr(self, 'lookahead_cache'):\n self.lookahead_cache = LookaheadCache()\n\n logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()\n stopping_criteria = stopping_criteria if stopping_criteria is not None else StoppingCriteriaList()\n if max_length is not None:\n warnings.warn(\n \"`max_length` is deprecated in this function, use\"\n \" `stopping_criteria=StoppingCriteriaList([MaxLengthCriteria(max_length=max_length)])` instead.\",\n UserWarning,\n )\n stopping_criteria = validate_stopping_criteria(stopping_criteria, max_length)\n pad_token_id = pad_token_id if pad_token_id is not None else self.generation_config.pad_token_id\n eos_token_id = eos_token_id if eos_token_id is not None else self.generation_config.eos_token_id\n if isinstance(eos_token_id, int):\n eos_token_id = [eos_token_id]\n eos_token_id_tensor = torch.tensor(eos_token_id, device=input_ids.device) if eos_token_id is not None else None\n output_scores = output_scores if output_scores is not None else self.generation_config.output_scores\n output_attentions = (\n output_attentions if output_attentions is not None else self.generation_config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.generation_config.output_hidden_states\n )\n return_dict_in_generate = (\n return_dict_in_generate\n if return_dict_in_generate is not None\n else self.generation_config.return_dict_in_generate\n )\n\n # init attention / hidden states / scores tuples\n scores = () if (return_dict_in_generate and output_scores) else None\n decoder_attentions = () if (return_dict_in_generate and output_attentions) else None\n cross_attentions = () if (return_dict_in_generate and output_attentions) else None\n decoder_hidden_states = () if (return_dict_in_generate and output_hidden_states) else None\n\n # if model is an encoder-decoder, retrieve encoder attention weights and hidden states\n if return_dict_in_generate and self.config.is_encoder_decoder:\n encoder_attentions = model_kwargs[\"encoder_outputs\"].get(\"attentions\") if output_attentions else None\n encoder_hidden_states = (\n model_kwargs[\"encoder_outputs\"].get(\"hidden_states\") if output_hidden_states else None\n )\n\n decoding_kwargs = model_kwargs['decoding_kwargs']\n decoding_kwargs.update({\n 'eos': eos_token_id[0] if eos_token_id is not None else 2,\n 'edls': [],\n 'dls': [],\n 'fts': []\n })\n\n decoding_length = decoding_kwargs.get('decoding_length', 63)\n stop_max_length = stopping_criteria.max_length\n decoding_max_length = stop_max_length + decoding_length + 1\n attention_mask = model_kwargs.get('attention_mask', None)\n input_device = input_ids.device\n if attention_mask is None:\n bs = input_ids.size(0)\n full_attention_mask = torch.tril(\n torch.ones((bs, 1, decoding_max_length, decoding_max_length), dtype=torch.long, device=input_device),\n 0)\n elif len(attention_mask.shape) == 2:\n # from [bs, src_len] to [bs,1,max_len,max_len]\n bs, src_len = attention_mask.shape\n pad_len = decoding_max_length - src_len\n attention_mask = attention_mask.long()\n if pad_len > 0:\n pad_mask = torch.ones((bs, pad_len), dtype=torch.long, device=attention_mask.device)\n attention_mask = torch.cat([attention_mask, pad_mask], 1)\n full_attention_mask = torch.tril(attention_mask[:, None, None].expand(-1, -1, decoding_max_length, -1), 0)\n elif len(attention_mask.shape) == 4:\n bs, _, src_len, tgt_len = attention_mask.shape\n attention_mask = attention_mask.long()\n if src_len < decoding_max_length or tgt_len < decoding_max_length:\n full_attention_mask = torch.tril(\n torch.ones((bs, 1, decoding_max_length, decoding_max_length), dtype=torch.long,\n device=input_device),\n 0)\n full_attention_mask[:, :, :src_len, :tgt_len] = attention_mask\n else:\n full_attention_mask = attention_mask\n else:\n raise ValueError(f'unsupport attention_mask.shape:{attention_mask.shape}')\n model_kwargs['attention_mask'] = full_attention_mask\n decoding_kwargs['max_length'] = stop_max_length\n decoding_kwargs['decoding_max_length'] = decoding_max_length\n\n # keep track of which sequences are already finished\n unfinished_sequences = input_ids.new_ones((input_ids.shape[0], 1)) # ones([bs, 1])\n\n # import time\n # pts = time.time()\n branch_length = decoding_kwargs.get('branch_length', 8)\n decoding_mode = decoding_kwargs.get('decoding_mode', 'hier')\n\n input_id_list = input_ids.tolist()\n for i, ids in enumerate(input_id_list):\n ids = ids[1:-1]\n self.lookahead_cache.put(ids, branch_length=branch_length + 1, mode='input', idx=i)\n # pitv = time.time()-pts\n # print(f'decoding_1:{round(pitv*1000,3)}ms')\n\n input_bs, input_length = input_ids.shape\n eos = decoding_kwargs.get('eos', 2)\n output_ids = torch.cat(\n [input_ids,\n eos * torch.ones((input_bs, stop_max_length - input_length), dtype=torch.long, device=input_ids.device)],\n dim=1)\n batch_indices = [i for i in range(input_bs)]\n decoding_kwargs['batch_indices'] = batch_indices\n model_kwargs['input_ids'] = input_ids\n model_kwargs['decoding_kwargs'] = decoding_kwargs\n ts = time.time()\n\n # if use early stop func when batch size > 1\n # if use decoding, use_early_stop must be true, or it will exceed max length and cause error\n while True:\n if synced_gpus:\n # Under synced_gpus the `forward` call must continue until all gpus complete their sequence.\n # The following logic allows an early break if all peers finished generating their sequence\n this_peer_finished_flag = torch.tensor(0.0 if this_peer_finished else 1.0, device=input_ids.device)\n # send 0.0 if we finished, 1.0 otherwise\n dist.all_reduce(this_peer_finished_flag, op=dist.ReduceOp.SUM)\n # did all peers finish? the reduced sum will be 0.0 then\n if this_peer_finished_flag.item() == 0.0:\n break\n\n # prepare model inputs\n input_ids = model_kwargs.pop('input_ids', None)\n model_inputs = self.lookahead_prepare_inputs_for_generation(input_ids, **model_kwargs)\n decoding_kwargs = model_inputs.pop('decoding_kwargs', {})\n\n # forward pass to get next token\n outputs = self(\n **model_inputs,\n return_dict=True,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n decoding_kwargs=decoding_kwargs\n )\n\n if synced_gpus and this_peer_finished:\n continue # don't waste resources running the code we don't need\n\n model_kwargs['decoding_kwargs'] = decoding_kwargs\n\n model_kwargs = self._lookahead_update_model_kwargs_for_generation(\n outputs,\n model_kwargs,\n is_encoder_decoder=self.config.is_encoder_decoder,\n input_ids=input_ids,\n logits_processor=logits_processor\n )\n\n next_tokens = model_kwargs['next_tokens']\n next_tokens_scores = model_kwargs['next_tokens_scores']\n next_token_list = model_kwargs['next_token_list']\n\n # finished sentences should have their next token be a padding token\n if eos_token_id is not None:\n if pad_token_id is None:\n raise ValueError(\"If `eos_token_id` is defined, make sure that `pad_token_id` is defined.\")\n next_tokens = next_tokens * unfinished_sequences + pad_token_id * (1 - unfinished_sequences)\n\n # update generated ids, model inputs, and length for next step\n # input_ids = torch.cat([input_ids, next_tokens], dim=-1)\n if streamer is not None:\n streamer.put(next_tokens.cpu())\n\n batch_indices = model_kwargs['decoding_kwargs']['batch_indices']\n for k, tids in enumerate(next_token_list):\n tids = [x for x in tids if x != -1]\n batch_index = batch_indices[k]\n self.lookahead_cache.stream_put(tids, branch_length=branch_length + 1, final=False, mode='output',\n idx=batch_index)\n\n # Store scores, attentions and hidden_states when required\n if return_dict_in_generate:\n if output_scores:\n scores += (next_tokens_scores,)\n if output_attentions:\n decoder_attentions += (\n (outputs.decoder_attentions,) if self.config.is_encoder_decoder else (outputs.attentions,)\n )\n if self.config.is_encoder_decoder:\n cross_attentions += (outputs.cross_attentions,)\n\n if output_hidden_states:\n decoder_hidden_states += (\n (outputs.decoder_hidden_states,)\n if self.config.is_encoder_decoder\n else (outputs.hidden_states,)\n )\n\n # if eos_token was found in one sentence, set sentence to finished\n if eos_token_id_tensor is not None:\n # unfinished_sequences = unfinished_sequences.mul(\n # next_tokens.tile(eos_token_id_tensor.shape[0], 1).ne(eos_token_id_tensor.unsqueeze(1)).prod(dim=0)\n # )\n unfinished_sequences = unfinished_sequences.mul(\n next_tokens[:, :, None].ne(eos_token_id_tensor).prod(dim=2).prod(dim=1, keepdim=True))\n\n # stop when each sentence is finished\n if unfinished_sequences.max() == 0:\n this_peer_finished = True\n\n # stop if we exceed the maximum length\n decoding_cursors = decoding_kwargs['decoding_cursors']\n for i in range(input_ids.size(0)):\n cur = decoding_cursors[i]\n if stopping_criteria(input_ids[i:i + 1, :cur + 1], None):\n unfinished_sequences[i] = 0\n\n unfinished_sequences, output_ids, batch_indices, model_kwargs = self._early_stop(\n unfinished_sequences, output_ids, batch_indices, model_kwargs\n )\n te = time.time()\n decoding_kwargs['fts'].append(te - ts)\n ts = te\n if len(batch_indices) == 0:\n for i in range(input_bs):\n self.lookahead_cache.stream_put([], branch_length=branch_length + 1, final=True, mode='output',\n idx=i)\n max_cur = max(decoding_cursors)\n input_ids = output_ids[:, :max_cur + 1]\n break\n\n if streamer is not None:\n streamer.end()\n\n if return_dict_in_generate:\n if self.config.is_encoder_decoder:\n return GreedySearchEncoderDecoderOutput(\n sequences=input_ids,\n scores=scores,\n encoder_attentions=encoder_attentions,\n encoder_hidden_states=encoder_hidden_states,\n decoder_attentions=decoder_attentions,\n cross_attentions=cross_attentions,\n decoder_hidden_states=decoder_hidden_states,\n )\n else:\n kwargs = {'dls': model_kwargs['decoding_kwargs']['dls'],\n 'edls': model_kwargs['decoding_kwargs']['edls'],\n 'fts': model_kwargs['decoding_kwargs']['fts']}\n return LookaheadDecoderOnlyOutput(\n sequences=input_ids,\n scores=scores,\n attentions=decoder_attentions,\n hidden_states=decoder_hidden_states,\n kwargs=kwargs\n )\n else:\n return input_ids\n\n def _validate_model_kwargs(self, model_kwargs: Dict[str, Any]):\n \"\"\"Validates model kwargs for generation. Generate argument typos will also be caught here.\"\"\"\n # Excludes arguments that are handled before calling any model function\n if self.config.is_encoder_decoder:\n for key in [\"decoder_input_ids\"]:\n model_kwargs.pop(key, None)\n\n unused_model_args = []\n model_args = set(inspect.signature(self.prepare_inputs_for_generation).parameters)\n # `kwargs`/`model_kwargs` is often used to handle optional forward pass inputs like `attention_mask`. If\n # `prepare_inputs_for_generation` doesn't accept them, then a stricter check can be made ;)\n if \"kwargs\" in model_args or \"model_kwargs\" in model_args:\n model_args |= set(inspect.signature(self.forward).parameters)\n\n # Encoder-Decoder models may also need Encoder arguments from `model_kwargs`\n if self.config.is_encoder_decoder:\n base_model = getattr(self, self.base_model_prefix, None)\n\n # allow encoder kwargs\n encoder = getattr(self, \"encoder\", None)\n # `MusicgenForConditionalGeneration` has `text_encoder` and `audio_encoder`.\n # Also, it has `base_model_prefix = \"encoder_decoder\"` but there is no `self.encoder_decoder`\n # TODO: A better way to handle this.\n if encoder is None and base_model is not None:\n encoder = getattr(base_model, \"encoder\", None)\n\n if encoder is not None:\n encoder_model_args = set(inspect.signature(encoder.forward).parameters)\n model_args |= encoder_model_args\n\n # allow decoder kwargs\n decoder = getattr(self, \"decoder\", None)\n if decoder is None and base_model is not None:\n decoder = getattr(base_model, \"decoder\", None)\n\n if decoder is not None:\n decoder_model_args = set(inspect.signature(decoder.forward).parameters)\n model_args |= {f\"decoder_{x}\" for x in decoder_model_args}\n\n decoding_kwargs = ['decoding_kwargs']\n for key, value in model_kwargs.items():\n if value is not None and key not in model_args and key not in decoding_kwargs:\n unused_model_args.append(key)\n\n if unused_model_args:\n raise ValueError(\n f\"The following `model_kwargs` are not used by the model: {unused_model_args} (note: typos in the\"\n \" generate arguments will also show up in this list)\"\n )" }, { "identifier": "rmsnorm_wrapper", "path": "pia/lookahead/csrc/triton/rms_norm.py", "snippet": "def rmsnorm_wrapper(x, rms_weights, eps=1e-6):\n batch, M, K = x.shape\n out = torch.empty_like(x)\n rmsnorm_triton[(batch, M,)](x, rms_weights, out,\n *x.stride(),\n *rms_weights.stride(), # 1\n *out.stride(),\n N_SIZE=K, eps=eps, BLOCK_N_SIZE=4096,\n num_warps=16\n )\n return out" } ]
import math import os import torch import torch.nn.functional as F import torch.utils.checkpoint from typing import List, Optional, Tuple, Union from torch import nn from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers.activations import ACT2FN from transformers.modeling_outputs import BaseModelOutputWithPast, CausalLMOutputWithPast, \ SequenceClassifierOutputWithPast from transformers.models.llama.configuration_llama import LlamaConfig from transformers.utils import add_start_docstrings, add_start_docstrings_to_model_forward, logging, \ replace_return_docstrings from pia.lookahead.common.pretrained_model_batch import LookaheadPreTrainedModel from pia.lookahead.csrc.triton.rms_norm import rmsnorm_wrapper
16,207
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch LLaMA model.""" # from transformers.modeling_utils import PreTrainedModel os.environ['TOKENIZERS_PARALLELISM'] = 'false' logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LlamaConfig" # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class LlamaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ LlamaRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states):
# coding=utf-8 # Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved. # # This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX # and OPT implementations in this library. It has been modified from its # original forms to accommodate minor architectural differences compared # to GPT-NeoX and OPT used by the Meta AI team that trained the model. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch LLaMA model.""" # from transformers.modeling_utils import PreTrainedModel os.environ['TOKENIZERS_PARALLELISM'] = 'false' logger = logging.get_logger(__name__) _CONFIG_FOR_DOC = "LlamaConfig" # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class LlamaRMSNorm(nn.Module): def __init__(self, hidden_size, eps=1e-6): """ LlamaRMSNorm is equivalent to T5LayerNorm """ super().__init__() self.weight = nn.Parameter(torch.ones(hidden_size)) self.variance_epsilon = eps def forward(self, hidden_states):
return rmsnorm_wrapper(hidden_states, self.weight, eps=self.variance_epsilon)
1
2023-12-19 13:11:38+00:00
24k
MingtaoGuo/AnimateAnyone_unofficial
aldm/aldm.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "linear", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)" }, { "identifier": "zero_module", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "SpatialTransformer", "path": "ldm/modules/attention.py", "snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in" }, { "identifier": "SpatialTransformerPlus", "path": "ldm/modules/attention.py", "snippet": "class SpatialTransformerPlus(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True, use_temporal_attention=False):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n self.spatial_attn = SpatialSelfAttention(in_channels)\n if use_temporal_attention:\n self.temporal_attn = TemporalTransformer(in_channels)\n\n def forward(self, x, context=None, ref=None):\n x = torch.cat([x, ref], dim=-1)\n x = self.spatial_attn(x)\n x = x[..., :ref.shape[-1]]\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in" }, { "identifier": "ResBlock", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "def convert_module_to_f16(x):\ndef convert_module_to_f32(x):\n def __init__(\n self,\n spacial_dim: int,\n embed_dim: int,\n num_heads_channels: int,\n output_dim: int = None,\n ):\n def forward(self, x):\n def forward(self, x, emb):\n def forward(self, x, emb, context=None):\n def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1):\n def forward(self, x):\n def __init__(self, channels, out_channels=None, ks=5):\n def forward(self,x):\n def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):\n def forward(self, x):\n def __init__(\n self,\n channels,\n emb_channels,\n dropout,\n out_channels=None,\n use_conv=False,\n use_scale_shift_norm=False,\n dims=2,\n use_checkpoint=False,\n up=False,\n down=False,\n ):\n def forward(self, x, emb):\n def _forward(self, x, emb):\n def __init__(\n self,\n channels,\n dropout,\n out_channels=None,\n use_conv=False,\n dims=2,\n use_checkpoint=False,\n up=False,\n down=False,\n ):\n def forward(self, x):\n def _forward(self, x):\n def __init__(\n self,\n channels,\n num_heads=1,\n num_head_channels=-1,\n use_checkpoint=False,\n use_new_attention_order=False,\n ):\n def forward(self, x):\n def _forward(self, x):\ndef count_flops_attn(model, _x, y):\n def __init__(self, n_heads):\n def forward(self, qkv):\n def count_flops(model, _x, y):\n def __init__(self, n_heads):\n def forward(self, qkv):\n def count_flops(model, _x, y):\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n disable_self_attentions=None,\n num_attention_blocks=None,\n disable_middle_self_attn=False,\n use_linear_in_transformer=False,\n ):\n def convert_to_fp16(self):\n def convert_to_fp32(self):\n def forward(self, x, timesteps=None, context=None, y=None,**kwargs):\nclass AttentionPool2d(nn.Module):\nclass TimestepBlock(nn.Module):\nclass TimestepEmbedSequential(nn.Sequential, TimestepBlock):\nclass Upsample(nn.Module):\nclass TransposedUpsample(nn.Module):\nclass Downsample(nn.Module):\nclass ResBlock(TimestepBlock):\nclass ResBlockNoTime(TimestepBlock):\nclass AttentionBlock(nn.Module):\nclass QKVAttentionLegacy(nn.Module):\nclass QKVAttention(nn.Module):\nclass UNetModel(nn.Module):" }, { "identifier": "LatentDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(self,\n first_stage_config,\n cond_stage_config,\n num_timesteps_cond=None,\n cond_stage_key=\"image\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n force_null_conditioning=False,\n *args, **kwargs):\n self.force_null_conditioning = force_null_conditioning\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs['timesteps']\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = 'concat' if concat_mode else 'crossattn'\n if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n reset_ema = kwargs.pop(\"reset_ema\", False)\n reset_num_ema_updates = kwargs.pop(\"reset_num_ema_updates\", False)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.cond_stage_key = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer('scale_factor', torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n if reset_ema:\n assert self.use_ema\n print(\n f\"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.\")\n self.model_ema = LitEma(self.model)\n if reset_num_ema_updates:\n print(\" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ \")\n assert self.use_ema\n self.model_ema.reset_num_updates()\n\n def make_cond_schedule(self, ):\n self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)\n ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()\n self.cond_ids[:self.num_timesteps_cond] = ids\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n # only for very first batch\n if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:\n assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'\n # set rescale weight to 1./std of encodings\n print(\"### USING STD-RESCALING ###\")\n x = super().get_input(batch, self.first_stage_key)\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n del self.scale_factor\n self.register_buffer('scale_factor', 1. / z.flatten().std())\n print(f\"setting self.scale_factor to {self.scale_factor}\")\n print(\"### USING STD-RESCALING ###\")\n\n def register_schedule(self,\n given_betas=None, beta_schedule=\"linear\", timesteps=1000,\n linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != '__is_first_stage__'\n assert config != '__is_unconditional__'\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n\n def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):\n denoise_row = []\n for zd in tqdm(samples, desc=desc):\n denoise_row.append(self.decode_first_stage(zd.to(self.device),\n force_not_quantize=force_no_decoder_quantization))\n n_imgs_per_row = len(denoise_row)\n denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W\n denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')\n denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')\n denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)\n return denoise_grid\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\")\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]\n return edge_dist\n\n def get_weighting(self, h, w, Ly, Lx, device):\n weighting = self.delta_border(h, w)\n weighting = torch.clip(weighting, self.split_input_params[\"clip_min_weight\"],\n self.split_input_params[\"clip_max_weight\"], )\n weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)\n\n if self.split_input_params[\"tie_braker\"]:\n L_weighting = self.delta_border(Ly, Lx)\n L_weighting = torch.clip(L_weighting,\n self.split_input_params[\"clip_min_tie_weight\"],\n self.split_input_params[\"clip_max_tie_weight\"])\n\n L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)\n weighting = weighting * L_weighting\n return weighting\n\n def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code\n \"\"\"\n :param x: img of size (bs, c, h, w)\n :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])\n \"\"\"\n bs, nc, h, w = x.shape\n\n # number of crops in image\n Ly = (h - kernel_size[0]) // stride[0] + 1\n Lx = (w - kernel_size[1]) // stride[1] + 1\n\n if uf == 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)\n\n weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))\n\n elif uf > 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),\n dilation=1, padding=0,\n stride=(stride[0] * uf, stride[1] * uf))\n fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))\n\n elif df > 1 and uf == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),\n dilation=1, padding=0,\n stride=(stride[0] // df, stride[1] // df))\n fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))\n\n else:\n raise NotImplementedError\n\n return fold, unfold, normalization, weighting\n\n @torch.no_grad()\n def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,\n cond_key=None, return_original_cond=False, bs=None, return_x=False):\n x = super().get_input(batch, k)\n if bs is not None:\n x = x[:bs]\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n\n if self.model.conditioning_key is not None and not self.force_null_conditioning:\n if cond_key is None:\n cond_key = self.cond_stage_key\n if cond_key != self.first_stage_key:\n if cond_key in ['caption', 'coordinates_bbox', 'txt', 'vision']:\n xc = batch[cond_key]\n xc = rearrange(xc, 'b h w c -> b c h w')\n elif cond_key in ['class_label', 'cls']:\n xc = batch\n else:\n xc = super().get_input(batch, cond_key).to(self.device)\n else:\n xc = x\n if not self.cond_stage_trainable or force_c_encode:\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n c = self.get_learned_conditioning(xc.to(self.device))\n else:\n c = xc\n if bs is not None:\n c = c[:bs]\n\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n ckey = __conditioning_keys__[self.model.conditioning_key]\n c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}\n\n else:\n c = None\n xc = None\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n c = {'pos_x': pos_x, 'pos_y': pos_y}\n out = [z, c]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_x:\n out.extend([x])\n if return_original_cond:\n out.append(xc)\n return out\n\n @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, 'b h w c -> b c h w').contiguous()\n\n z = 1. / self.scale_factor * z\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c)\n return loss\n\n def forward(self, x, c, *args, **kwargs):\n t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()\n if self.model.conditioning_key is not None:\n assert c is not None\n if self.cond_stage_trainable:\n c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n if isinstance(cond, dict):\n # hybrid case, cond is expected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'\n cond = {key: cond}\n\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \\\n extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.apply_model(x_noisy, t, cond)\n \n loss_dict = {}\n prefix = 'train' if self.training else 'val'\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n elif self.parameterization == \"v\":\n target = self.get_v(x_start, noise, t)\n else:\n raise NotImplementedError()\n\n loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])\n loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})\n loss_dict.update({'logvar': self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})\n loss += (self.original_elbo_weight * loss_vlb)\n loss_dict.update({f'{prefix}/loss': loss})\n return loss, loss_dict\n\n def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,\n return_x0=False, score_corrector=None, corrector_kwargs=None):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1., 1.)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,\n return_codebook_ids=False, quantize_denoised=False, return_x0=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,\n img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,\n score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,\n log_every_t=None):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',\n total=timesteps) if verbose else reversed(\n range(0, timesteps))\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised, return_x0=True,\n temperature=temperature[i], noise_dropout=noise_dropout,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(self, cond, shape, return_intermediates=False,\n x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, start_T=None,\n log_every_t=None):\n\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(\n range(0, timesteps))\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised)\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,\n verbose=True, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, shape=None, **kwargs):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n return self.p_sample_loop(cond,\n shape,\n return_intermediates=return_intermediates, x_T=x_T,\n verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,\n mask=mask, x0=x0)\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):\n if ddim:\n ddim_sampler = DDIMSampler(self)\n shape = (self.channels, self.image_size, self.image_size)\n samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,\n shape, cond, verbose=False, **kwargs)\n\n else:\n samples, intermediates = self.sample(cond=cond, batch_size=batch_size,\n return_intermediates=True, **kwargs)\n\n return samples, intermediates\n\n @torch.no_grad()\n def get_unconditional_conditioning(self, batch_size, null_label=None):\n if null_label is not None:\n xc = null_label\n if isinstance(xc, ListConfig):\n xc = list(xc)\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n if hasattr(xc, \"to\"):\n xc = xc.to(self.device)\n c = self.get_learned_conditioning(xc)\n else:\n if self.cond_stage_key in [\"class_label\", \"cls\"]:\n xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)\n return self.get_learned_conditioning(xc)\n else:\n raise NotImplementedError(\"todo\")\n if isinstance(c, list): # in case the encoder gives us a list\n for i in range(len(c)):\n c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)\n else:\n c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)\n return c\n\n @torch.no_grad()\n def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,\n quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,\n plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,\n use_ema_scope=True,\n **kwargs):\n ema_scope = self.ema_scope if use_ema_scope else nullcontext\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=N)\n N = min(x.shape[0], N)\n n_row = min(x.shape[0], n_row)\n log[\"inputs\"] = x\n log[\"reconstruction\"] = xrec\n if self.model.conditioning_key is not None:\n if hasattr(self.cond_stage_model, \"decode\"):\n xc = self.cond_stage_model.decode(c)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in [\"caption\", \"txt\"]:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in ['class_label', \"cls\"]:\n try:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[\"human_label\"], size=x.shape[2] // 25)\n log['conditioning'] = xc\n except KeyError:\n # probably no \"human_label\" in batch\n pass\n elif isimage(xc):\n log[\"conditioning\"] = xc\n if ismap(xc):\n log[\"original_conditioning\"] = self.to_rgb(xc)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), '1 -> b', b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')\n diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n with ema_scope(\"Sampling\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(\n self.first_stage_model, IdentityFirstStage):\n # also display when quantizing x0 while sampling\n with ema_scope(\"Plotting Quantized Denoised\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n quantize_denoised=True)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,\n # quantize_denoised=True)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_x0_quantized\"] = x_samples\n\n if unconditional_guidance_scale > 1.0:\n uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)\n if self.model.conditioning_key == \"crossattn-adm\":\n uc = {\"c_crossattn\": [uc], \"c_adm\": c[\"c_adm\"]}\n with ema_scope(\"Sampling with classifier-free guidance\"):\n samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc,\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"] = x_samples_cfg\n\n if inpaint:\n # make a simple center square\n b, h, w = z.shape[0], z.shape[2], z.shape[3]\n mask = torch.ones(N, h, w).to(self.device)\n # zeros will be filled in\n mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.\n mask = mask[:, None, ...]\n with ema_scope(\"Plotting Inpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_inpainting\"] = x_samples\n log[\"mask\"] = mask\n\n # outpaint\n mask = 1. - mask\n with ema_scope(\"Plotting Outpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_outpainting\"] = x_samples\n\n if plot_progressive_rows:\n with ema_scope(\"Plotting Progressives\"):\n img, progressives = self.progressive_denoising(c,\n shape=(self.channels, self.image_size, self.image_size),\n batch_size=N)\n prog_row = self._get_denoise_row_from_list(progressives, desc=\"Progressive Generation\")\n log[\"progressive_row\"] = prog_row\n\n if return_keys:\n if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:\n return log\n else:\n return {key: log[key] for key in return_keys}\n return log\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = list(self.model.parameters())\n if self.cond_stage_trainable:\n print(f\"{self.__class__.__name__}: Also optimizing conditioner params!\")\n params = params + list(self.cond_stage_model.parameters())\n if self.learn_logvar:\n print('Diffusion model optimizing logvar')\n params.append(self.logvar)\n opt = torch.optim.AdamW(params, lr=lr)\n if self.use_scheduler:\n assert 'target' in self.scheduler_config\n scheduler = instantiate_from_config(self.scheduler_config)\n\n print(\"Setting up LambdaLR scheduler...\")\n scheduler = [\n {\n 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),\n 'interval': 'step',\n 'frequency': 1\n }]\n return [opt], scheduler\n return opt\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.\n return x" }, { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import einops import torch import torch as th import torch.nn as nn from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer, SpatialTransformerPlus from ldm.modules.diffusionmodules.openaimodel import ResBlock, TimestepEmbedSequential, Downsample, AttentionBlock, Upsample, normalization, checkpoint, convert_module_to_f16, convert_module_to_f32 from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig from omegaconf.listconfig import ListConfig
19,184
use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ), ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) self.output_blocks.apply(convert_module_to_f32) def forward(self, x, timesteps=None, context=None, y=None,**kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" refs = [] hs = []
class ReferenceNet(nn.Module): """ The full UNet model with attention and timestep embedding. :param in_channels: channels in the input Tensor. :param model_channels: base channel count for the model. :param out_channels: channels in the output Tensor. :param num_res_blocks: number of residual blocks per downsample. :param attention_resolutions: a collection of downsample rates at which attention will take place. May be a set, list, or tuple. For example, if this contains 4, then at 4x downsampling, attention will be used. :param dropout: the dropout probability. :param channel_mult: channel multiplier for each level of the UNet. :param conv_resample: if True, use learned convolutions for upsampling and downsampling. :param dims: determines if the signal is 1D, 2D, or 3D. :param num_classes: if specified (as an int), then this model will be class-conditional with `num_classes` classes. :param use_checkpoint: use gradient checkpointing to reduce memory usage. :param num_heads: the number of attention heads in each attention layer. :param num_heads_channels: if specified, ignore num_heads and instead use a fixed channel width per attention head. :param num_heads_upsample: works with num_heads to set a different number of heads for upsampling. Deprecated. :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. :param resblock_updown: use residual blocks for up/downsampling. :param use_new_attention_order: use a different attention pattern for potentially increased efficiency. """ def __init__( self, image_size, in_channels, model_channels, num_res_blocks, attention_resolutions, dropout=0, channel_mult=(1, 2, 4, 8), conv_resample=True, dims=2, num_classes=None, use_checkpoint=False, use_fp16=False, num_heads=-1, num_head_channels=-1, num_heads_upsample=-1, use_scale_shift_norm=False, resblock_updown=False, use_new_attention_order=False, use_spatial_transformer=False, # custom transformer support transformer_depth=1, # custom transformer support context_dim=None, # custom transformer support n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model legacy=True, disable_self_attentions=None, num_attention_blocks=None, disable_middle_self_attn=False, use_linear_in_transformer=False, ): super().__init__() if use_spatial_transformer: assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' if context_dim is not None: assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' if type(context_dim) == ListConfig: context_dim = list(context_dim) if num_heads_upsample == -1: num_heads_upsample = num_heads if num_heads == -1: assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' if num_head_channels == -1: assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' self.image_size = image_size self.in_channels = in_channels self.model_channels = model_channels if isinstance(num_res_blocks, int): self.num_res_blocks = len(channel_mult) * [num_res_blocks] else: if len(num_res_blocks) != len(channel_mult): raise ValueError("provide num_res_blocks either as an int (globally constant) or " "as a list/tuple (per-level) with the same length as channel_mult") self.num_res_blocks = num_res_blocks if disable_self_attentions is not None: # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not assert len(disable_self_attentions) == len(channel_mult) if num_attention_blocks is not None: assert len(num_attention_blocks) == len(self.num_res_blocks) assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks)))) print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. " f"This option has LESS priority than attention_resolutions {attention_resolutions}, " f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, " f"attention will still not be set.") self.attention_resolutions = attention_resolutions self.dropout = dropout self.channel_mult = channel_mult self.conv_resample = conv_resample self.num_classes = num_classes self.use_checkpoint = use_checkpoint self.dtype = th.float16 if use_fp16 else th.float32 self.num_heads = num_heads self.num_head_channels = num_head_channels self.num_heads_upsample = num_heads_upsample self.predict_codebook_ids = n_embed is not None time_embed_dim = model_channels * 4 self.time_embed = nn.Sequential( linear(model_channels, time_embed_dim), nn.SiLU(), linear(time_embed_dim, time_embed_dim), ) self.input_blocks = nn.ModuleList( [ TimestepEmbedSequential( conv_nd(dims, in_channels, model_channels, 3, padding=1) ) ] ) self._feature_size = model_channels input_block_chans = [model_channels] ch = model_channels ds = 1 for level, mult in enumerate(channel_mult): for nr in range(self.num_res_blocks[level]): layers = [ ResBlock( ch, time_embed_dim, dropout, out_channels=mult * model_channels, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = mult * model_channels if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or nr < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ) ) self.input_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch input_block_chans.append(ch) if level != len(channel_mult) - 1: out_ch = ch self.input_blocks.append( TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, down=True, ) if resblock_updown else Downsample( ch, conv_resample, dims=dims, out_channels=out_ch ) ) ) ch = out_ch input_block_chans.append(ch) ds *= 2 self._feature_size += ch if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels self.middle_block = TimestepEmbedSequential( ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ), ResBlock( ch, time_embed_dim, dropout, out_channels=ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ), ) self._feature_size += ch self.output_blocks = nn.ModuleList([]) for level, mult in list(enumerate(channel_mult))[::-1]: for i in range(self.num_res_blocks[level] + 1): ich = input_block_chans.pop() layers = [ ResBlock( ch + ich, time_embed_dim, dropout, out_channels=model_channels * mult, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, ) ] ch = model_channels * mult if ds in attention_resolutions: if num_head_channels == -1: dim_head = ch // num_heads else: num_heads = ch // num_head_channels dim_head = num_head_channels if legacy: #num_heads = 1 dim_head = ch // num_heads if use_spatial_transformer else num_head_channels if exists(disable_self_attentions): disabled_sa = disable_self_attentions[level] else: disabled_sa = False if not exists(num_attention_blocks) or i < num_attention_blocks[level]: layers.append( AttentionBlock( ch, use_checkpoint=use_checkpoint, num_heads=num_heads_upsample, num_head_channels=dim_head, use_new_attention_order=use_new_attention_order, ) if not use_spatial_transformer else SpatialTransformer( ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim, disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer, use_checkpoint=use_checkpoint ) ) if level and i == self.num_res_blocks[level]: out_ch = ch layers.append( ResBlock( ch, time_embed_dim, dropout, out_channels=out_ch, dims=dims, use_checkpoint=use_checkpoint, use_scale_shift_norm=use_scale_shift_norm, up=True, ) if resblock_updown else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) ) ds //= 2 self.output_blocks.append(TimestepEmbedSequential(*layers)) self._feature_size += ch def convert_to_fp16(self): """ Convert the torso of the model to float16. """ self.input_blocks.apply(convert_module_to_f16) self.middle_block.apply(convert_module_to_f16) self.output_blocks.apply(convert_module_to_f16) def convert_to_fp32(self): """ Convert the torso of the model to float32. """ self.input_blocks.apply(convert_module_to_f32) self.middle_block.apply(convert_module_to_f32) self.output_blocks.apply(convert_module_to_f32) def forward(self, x, timesteps=None, context=None, y=None,**kwargs): """ Apply the model to an input batch. :param x: an [N x C x ...] Tensor of inputs. :param timesteps: a 1-D batch of timesteps. :param context: conditioning plugged in via crossattn :param y: an [N] Tensor of labels, if class-conditional. :return: an [N x C x ...] Tensor of outputs. """ assert (y is not None) == ( self.num_classes is not None ), "must specify y if and only if the model is class-conditional" refs = [] hs = []
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
3
2023-12-16 03:31:33+00:00
24k
yasserben/CLOUDS
train_net.py
[ { "identifier": "add_maskformer2_config", "path": "clouds/config.py", "snippet": "def add_maskformer2_config(cfg):\n \"\"\"\n Add config for MASK_FORMER.\n \"\"\"\n # NOTE: configs from original maskformer\n # data config\n # select the dataset mapper\n cfg.INPUT.DATASET_MAPPER_NAME = \"mask_former_semantic\"\n # Color augmentation\n cfg.INPUT.COLOR_AUG_SSD = False\n # We retry random cropping until no single category in semantic segmentation GT occupies more\n # than `SINGLE_CATEGORY_MAX_AREA` part of the crop.\n cfg.INPUT.CROP.SINGLE_CATEGORY_MAX_AREA = 1.0\n # Pad image and segmentation GT in dataset mapper.\n cfg.INPUT.SIZE_DIVISIBILITY = -1\n\n # solver config\n # weight decay on embedding\n cfg.SOLVER.WEIGHT_DECAY_EMBED = 0.0\n # optimizer\n cfg.SOLVER.OPTIMIZER = \"ADAMW\"\n cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1\n\n # mask_former model config\n cfg.MODEL.MASK_FORMER = CN()\n\n # loss\n cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION = True\n cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT = 0.1\n cfg.MODEL.MASK_FORMER.CLASS_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.DICE_WEIGHT = 1.0\n cfg.MODEL.MASK_FORMER.MASK_WEIGHT = 20.0\n\n # transformer config\n cfg.MODEL.MASK_FORMER.NHEADS = 8\n cfg.MODEL.MASK_FORMER.DROPOUT = 0.1\n cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD = 2048\n cfg.MODEL.MASK_FORMER.ENC_LAYERS = 0\n cfg.MODEL.MASK_FORMER.DEC_LAYERS = 6\n cfg.MODEL.MASK_FORMER.PRE_NORM = False\n\n cfg.MODEL.MASK_FORMER.HIDDEN_DIM = 256\n cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES = 100\n\n cfg.MODEL.MASK_FORMER.TRANSFORMER_IN_FEATURE = \"res5\"\n cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ = False\n\n # mask_former inference config\n cfg.MODEL.MASK_FORMER.TEST = CN()\n cfg.MODEL.MASK_FORMER.TEST.SEMANTIC_ON = True\n cfg.MODEL.MASK_FORMER.TEST.INSTANCE_ON = False\n cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON = False\n cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD = 0.0\n cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE = False\n\n # Sometimes `backbone.size_divisibility` is set to 0 for some backbone (e.g. ResNet)\n # you can use this config to override\n cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY = 32\n\n # pixel decoder config\n cfg.MODEL.SEM_SEG_HEAD.MASK_DIM = 256\n # adding transformer in pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.TRANSFORMER_ENC_LAYERS = 0\n # pixel decoder\n cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME = \"BasePixelDecoder\"\n\n # swin transformer backbone\n cfg.MODEL.SWIN = CN()\n cfg.MODEL.SWIN.PRETRAIN_IMG_SIZE = 224\n cfg.MODEL.SWIN.PATCH_SIZE = 4\n cfg.MODEL.SWIN.EMBED_DIM = 96\n cfg.MODEL.SWIN.DEPTHS = [2, 2, 6, 2]\n cfg.MODEL.SWIN.NUM_HEADS = [3, 6, 12, 24]\n cfg.MODEL.SWIN.WINDOW_SIZE = 7\n cfg.MODEL.SWIN.MLP_RATIO = 4.0\n cfg.MODEL.SWIN.QKV_BIAS = True\n cfg.MODEL.SWIN.QK_SCALE = None\n cfg.MODEL.SWIN.DROP_RATE = 0.0\n cfg.MODEL.SWIN.ATTN_DROP_RATE = 0.0\n cfg.MODEL.SWIN.DROP_PATH_RATE = 0.3\n cfg.MODEL.SWIN.APE = False\n cfg.MODEL.SWIN.PATCH_NORM = True\n cfg.MODEL.SWIN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n cfg.MODEL.SWIN.USE_CHECKPOINT = False\n\n # NOTE: maskformer2 extra configs\n # transformer module\n cfg.MODEL.MASK_FORMER.TRANSFORMER_DECODER_NAME = (\n \"MultiScaleMaskedTransformerDecoder\"\n )\n\n # LSJ aug\n cfg.INPUT.IMAGE_SIZE = 1024\n cfg.INPUT.MIN_SCALE = 0.1\n cfg.INPUT.MAX_SCALE = 2.0\n\n # MSDeformAttn encoder configs\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES = [\n \"res3\",\n \"res4\",\n \"res5\",\n ]\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_POINTS = 4\n cfg.MODEL.SEM_SEG_HEAD.DEFORMABLE_TRANSFORMER_ENCODER_N_HEADS = 8\n\n # point loss configs\n # Number of points sampled during training for a mask point head.\n cfg.MODEL.MASK_FORMER.TRAIN_NUM_POINTS = 112 * 112\n # Oversampling parameter for PointRend point sampling during training. Parameter `k` in the\n # original paper.\n cfg.MODEL.MASK_FORMER.OVERSAMPLE_RATIO = 3.0\n # Importance sampling parameter for PointRend point sampling during training. Parametr `beta` in\n # the original paper.\n cfg.MODEL.MASK_FORMER.IMPORTANCE_SAMPLE_RATIO = 0.75\n\n # Resizing disabled for Synthia\n cfg.INPUT.RESIZE = CN()\n cfg.INPUT.RESIZE.ENABLED = True\n cfg.INPUT.RESIZE.SIZE_TRAIN = (1280, 720)\n\n # Saving Pseudo Labels during test time\n cfg.MODEL.SAVE_PSEUDO_LABELS = False\n\n # for the Dataset repeat factor\n # cfg.DATASETS.TRAIN_REPEAT_FACTOR = [(\"sd_v99\",5.0), (\"cityscapes_train\",1.0)]" }, { "identifier": "add_clouds_config", "path": "clouds/config.py", "snippet": "def add_clouds_config(cfg):\n # CLOUDS model config\n cfg.MODEL.CLOUDS = CN()\n cfg.MODEL.CLOUDS.CLIP_MODEL_NAME = \"convnext_large_d_320\"\n cfg.MODEL.CLOUDS.CLIP_PRETRAINED_WEIGHTS = \"laion2b_s29b_b131k_ft_soup\"\n cfg.MODEL.CLOUDS.EMBED_DIM = 768\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_ALPHA = 0.4\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_BETA = 0.8\n cfg.MODEL.CLOUDS.ENSEMBLE_ON_VALID_MASK = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE = False\n cfg.MODEL.CLOUDS.GEOMETRIC_ENSEMBLE_EMA = False\n cfg.MODEL.CLOUDS.SAM = CN()\n cfg.MODEL.CLOUDS.SAM.ENABLED = False\n cfg.MODEL.CLOUDS.SAM.MOBILE = True\n cfg.MODEL.CLOUDS.SAM.MINIBATCH = False\n cfg.MODEL.CLOUDS.SAM.SIZE_THRESHOLD = 5000\n cfg.MODEL.CLOUDS.SAM.EROSION = False\n cfg.MODEL.CLOUDS.SAM.EROSION_SIZE = 3\n cfg.MODEL.CLOUDS.SAM.NUM_POINTS = 5\n cfg.MODEL.CLOUDS.SAM.SELECTION_MODE = \"random\"\n cfg.MODEL.CLOUDS.SAM.RM_INTERSECTION = True\n cfg.MODEL.CLOUDS.SAM.REFINEMENT = False\n cfg.MODEL.CLOUDS.SAM.ALPHA_EMA = 0.999\n cfg.MODEL.CLOUDS.OVERWRITING = True\n cfg.MODEL.CLOUDS.ITERATION_UPDATE = 100" }, { "identifier": "add_wandb_config", "path": "clouds/config.py", "snippet": "def add_wandb_config(cfg):\n # Wandb\n cfg.WANDB = CN()\n cfg.WANDB.PROJECT = \"clouds\"\n cfg.WANDB.NAME = None\n # use flash attention\n cfg.MODEL.FLASH = False" }, { "identifier": "add_prerocessing_training_set_config", "path": "clouds/config.py", "snippet": "def add_prerocessing_training_set_config(cfg):\n cfg.INPUT.FLIP = True\n cfg.INPUT.INITIAL_HEIGHT = 1052\n cfg.INPUT.INITIAL_WIDTH = 1914\n cfg.INPUT.RESIZE_HEIGHT = 720\n cfg.INPUT.RESIZE_WIDTH = 1280\n cfg.INPUT.PL_THRESHOLD = 0.0\n\n cfg.DATASETS.SOURCE_FACTOR = 1.0\n cfg.DATASETS.TARGET_FACTOR = 1.0" }, { "identifier": "add_repeat_factors", "path": "clouds/config.py", "snippet": "def add_repeat_factors(cfg):\n # for the Dataset repeat factor\n if (\n len(cfg.DATASETS.TRAIN) == 2\n and cfg.DATALOADER.SAMPLER_TRAIN == \"WeightedTrainingSampler\"\n ):\n if \"sd\" in cfg.DATASETS.TRAIN[0]:\n target_dataset = cfg.DATASETS.TRAIN[0]\n source_dataset = cfg.DATASETS.TRAIN[1]\n else:\n target_dataset = cfg.DATASETS.TRAIN[1]\n source_dataset = cfg.DATASETS.TRAIN[0]\n\n TRAIN_REPEAT_FACTOR = [\n (target_dataset, cfg.DATASETS.TARGET_FACTOR),\n (source_dataset, cfg.DATASETS.SOURCE_FACTOR),\n ]\n cfg.DATASETS.TRAIN_REPEAT_FACTOR = TRAIN_REPEAT_FACTOR\n return cfg\n else:\n return cfg" }, { "identifier": "MapperTrain", "path": "clouds/data/dataset_mappers/mapper_train.py", "snippet": "class MapperTrain:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by MaskFormer for semantic segmentation.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies geometric transforms to the image and annotation\n 3. Find and applies suitable cropping to the image and annotation\n 4. Prepare image and annotation to Tensors\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train=True,\n *,\n augmentations_src,\n augmentations_sd,\n augmentations_photo,\n image_format,\n ignore_label,\n size_divisibility,\n ):\n \"\"\"\n NOTE: this interface is experimental.\n Args:\n is_train: for training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n ignore_label: the label that is ignored to evaluation\n size_divisibility: pad image size to be divisible by this value\n \"\"\"\n self.is_train = is_train\n self.tfm_gens_src = augmentations_src\n self.tfm_gens_sd = augmentations_sd\n self.tfm_gens_photometric = augmentations_photo\n self.img_format = image_format\n self.ignore_label = ignore_label\n self.size_divisibility = size_divisibility\n\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(\n f\"[{self.__class__.__name__}] Augmentations used in {mode}: {augmentations_src}\"\n )\n\n @classmethod\n def from_config(cls, cfg, is_train=True):\n augs_src = []\n augs_sd = []\n augs_photometric = []\n # Build augmentation\n if cfg.INPUT.RESIZE.ENABLED:\n augs_src.append(\n T.ResizeScale(\n min_scale=0.5,\n max_scale=2.0,\n target_height=cfg.INPUT.INITIAL_HEIGHT,\n target_width=cfg.INPUT.INITIAL_WIDTH,\n interp=Image.BILINEAR,\n )\n )\n if cfg.INPUT.CROP.ENABLED:\n augs_src.append(\n T.FixedSizeCrop(\n (768, 768),\n pad=True,\n seg_pad_value=255,\n pad_value=0,\n )\n )\n if cfg.INPUT.COLOR_AUG_SSD:\n augs_src.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n augs_photometric.append(ColorAugSSDTransform(img_format=cfg.INPUT.FORMAT))\n if cfg.INPUT.FLIP:\n augs_src.append(T.RandomFlip())\n augs_sd.append(T.RandomFlip())\n\n # Assume always applies to the training set.\n dataset_names = cfg.DATASETS.TRAIN\n meta = MetadataCatalog.get(dataset_names[0])\n ignore_label = meta.ignore_label\n\n ret = {\n \"is_train\": is_train,\n \"augmentations_src\": augs_src,\n \"augmentations_sd\": augs_sd,\n \"augmentations_photo\": augs_photometric,\n \"image_format\": cfg.INPUT.FORMAT,\n \"ignore_label\": ignore_label,\n \"size_divisibility\": cfg.INPUT.SIZE_DIVISIBILITY,\n }\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n assert (\n self.is_train\n ), \"MaskFormerSemanticDatasetMapper should only be used for training!\"\n\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.img_format)\n utils.check_image_size(dataset_dict, image)\n\n if \"sem_seg_file_name\" in dataset_dict:\n # PyTorch transformation not implemented for uint16, so converting it to double first\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\")).astype(\n \"double\"\n )\n else:\n sem_seg_gt = np.full(\n (dataset_dict[\"height\"], dataset_dict[\"width\"]), self.ignore_label\n ).astype(\"double\")\n\n if sem_seg_gt is None:\n raise ValueError(\n \"Cannot find 'sem_seg_file_name' for semantic segmentation dataset {}.\".format(\n dataset_dict[\"file_name\"]\n )\n )\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n if not (\"generated\" in str(dataset_dict[\"image_id\"])):\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens_src, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n else:\n aug_input, transforms = T.apply_transform_gens(self.tfm_gens_sd, aug_input)\n image = aug_input.image\n sem_seg_gt = aug_input.sem_seg\n aug_input_photo, transforms = T.apply_transform_gens(\n self.tfm_gens_photometric, aug_input\n )\n image_aug = aug_input_photo.image\n\n # Pad image and segmentation label here!\n image = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n image_aug = torch.as_tensor(\n np.ascontiguousarray(image_aug.transpose(2, 0, 1))\n )\n if sem_seg_gt is not None:\n sem_seg_gt = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n if self.size_divisibility > 0:\n image_size = (image.shape[-2], image.shape[-1])\n padding_size = [\n 0,\n self.size_divisibility - image_size[1],\n 0,\n self.size_divisibility - image_size[0],\n ]\n image = F.pad(image, padding_size, value=128).contiguous()\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n image_aug = F.pad(image_aug, padding_size, value=128).contiguous()\n if sem_seg_gt is not None:\n sem_seg_gt = F.pad(\n sem_seg_gt, padding_size, value=self.ignore_label\n ).contiguous()\n\n image_shape = (image.shape[-2], image.shape[-1]) # h, w\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = image\n if \"generated\" in str(dataset_dict[\"image_id\"]):\n dataset_dict[\"image_aug\"] = image_aug\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = sem_seg_gt.long()\n\n if \"annotations\" in dataset_dict:\n raise ValueError(\n \"Semantic segmentation dataset should not have 'annotations'.\"\n )\n\n # Prepare per-category binary masks\n if sem_seg_gt is not None:\n sem_seg_gt = sem_seg_gt.numpy()\n instances = Instances(image_shape)\n classes = np.unique(sem_seg_gt)\n # remove ignored region\n classes = classes[classes != self.ignore_label]\n instances.gt_classes = torch.tensor(classes, dtype=torch.int64)\n\n masks = []\n for class_id in classes:\n masks.append(sem_seg_gt == class_id)\n\n if len(masks) == 0:\n # Some image does not have annotation (all ignored)\n instances.gt_masks = torch.zeros(\n (0, sem_seg_gt.shape[-2], sem_seg_gt.shape[-1])\n )\n else:\n masks = BitMasks(\n torch.stack(\n [\n torch.from_numpy(np.ascontiguousarray(x.copy()))\n for x in masks\n ]\n )\n )\n instances.gt_masks = masks.tensor\n\n dataset_dict[\"instances\"] = instances\n\n return dataset_dict" }, { "identifier": "MapperTest", "path": "clouds/data/dataset_mappers/mapper_test.py", "snippet": "class MapperTest:\n \"\"\"\n A callable which takes a dataset dict in Detectron2 Dataset format,\n and map it into a format used by the model.\n\n This is the default callable to be used to map your dataset dict into training data.\n You may need to follow it to implement your own one for customized logic,\n such as a different way to read or transform images.\n See :doc:`/tutorials/data_loading` for details.\n\n The callable currently does the following:\n\n 1. Read the image from \"file_name\"\n 2. Applies cropping/geometric transforms to the image and annotations\n 3. Prepare data and annotations to Tensor and :class:`Instances`\n \"\"\"\n\n @configurable\n def __init__(\n self,\n is_train: bool,\n *,\n augmentations: List[Union[T.Augmentation, T.Transform]],\n image_format: str,\n\n ):\n \"\"\"\n NOTE: this interface is experimental.\n\n Args:\n is_train: whether it's used in training or inference\n augmentations: a list of augmentations or deterministic transforms to apply\n image_format: an image format supported by :func:`detection_utils.read_image`.\n \"\"\"\n # if recompute_boxes:\n # assert use_instance_mask, \"recompute_boxes requires instance masks\"\n # fmt: off\n self.is_train = is_train\n self.augmentations = augmentations\n self.image_format = image_format\n logger = logging.getLogger(__name__)\n mode = \"training\" if is_train else \"inference\"\n logger.info(f\"[DatasetMapper] Augmentations used in {mode}: {augmentations}\")\n\n @classmethod\n def from_config(cls, cfg, is_train: bool = True):\n augs = [T.ResizeShortestEdge(short_edge_length=[1024], sample_style=\"choice\")]\n\n ret = {\n \"is_train\": is_train,\n \"augmentations\": augs,\n \"image_format\": cfg.INPUT.FORMAT,\n }\n\n\n return ret\n\n def __call__(self, dataset_dict):\n \"\"\"\n Args:\n dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.\n\n Returns:\n dict: a format that builtin models in detectron2 accept\n \"\"\"\n dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below\n # USER: Write your own image loading if it's not from a file\n image = utils.read_image(dataset_dict[\"file_name\"], format=self.image_format)\n utils.check_image_size(dataset_dict, image)\n\n # USER: Remove if you don't do semantic/panoptic segmentation.\n if \"sem_seg_file_name\" in dataset_dict:\n sem_seg_gt = utils.read_image(dataset_dict.pop(\"sem_seg_file_name\"), \"L\").squeeze(2)\n else:\n sem_seg_gt = None\n\n aug_input = T.AugInput(image, sem_seg=sem_seg_gt)\n aug_input, transformation = T.apply_transform_gens(self.augmentations, aug_input)\n image, sem_seg_gt = aug_input.image, aug_input.sem_seg\n\n # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,\n # but not efficient on large generic data structures due to the use of pickle & mp.Queue.\n # Therefore it's important to use torch.Tensor.\n dataset_dict[\"image\"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))\n\n if sem_seg_gt is not None:\n dataset_dict[\"sem_seg\"] = torch.as_tensor(sem_seg_gt.astype(\"long\"))\n\n dataset_dict['height'] = dataset_dict[\"image\"].shape[1]\n dataset_dict['width'] = dataset_dict[\"image\"].shape[2]\n if not self.is_train:\n # USER: Modify this if you want to keep them for some reason.\n dataset_dict.pop(\"sem_seg_file_name\", None)\n return dataset_dict\n\n return dataset_dict" }, { "identifier": "CityscapesSemSegEvaluator", "path": "clouds/evaluation/cityscapes_evaluation.py", "snippet": "class CityscapesSemSegEvaluator(CityscapesEvaluator):\n \"\"\"\n Evaluate semantic segmentation results on cityscapes dataset using cityscapes API.\n\n Note:\n * It does not work in multi-machine distributed training.\n * It contains a synchronization, therefore has to be used on all ranks.\n * Only the main process runs evaluation.\n \"\"\"\n\n def process(self, inputs, outputs):\n from cityscapesscripts.helpers.labels import trainId2label\n for input, output in zip(inputs, outputs):\n file_name = input[\"file_name\"]\n basename = os.path.splitext(os.path.basename(file_name))[0]\n pred_filename = os.path.join(self._temp_dir, basename + \"_pred.png\")\n\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device).numpy()\n pred = 255 * np.ones(output.shape, dtype=np.uint8)\n for train_id, label in trainId2label.items():\n if label.ignoreInEval:\n continue\n pred[output == train_id] = label.id\n Image.fromarray(pred).save(pred_filename)\n\n\n def evaluate(self):\n comm.synchronize()\n if comm.get_rank() > 0:\n return\n # Load the Cityscapes eval script *after* setting the required env var,\n # since the script reads CITYSCAPES_DATASET into global variables at load time.\n import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval\n\n self._logger.info(\"Evaluating results under {} ...\".format(self._temp_dir))\n\n # set some global states in cityscapes evaluation API, before evaluating\n cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)\n cityscapes_eval.args.predictionWalk = None\n cityscapes_eval.args.JSONOutput = False\n cityscapes_eval.args.colorized = False\n\n # These lines are adopted from\n # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa\n gt_dir = PathManager.get_local_path(self._metadata.gt_dir)\n groundTruthImgList = glob.glob(\n os.path.join(gt_dir, \"*\", \"*_gtFine_labelIds.png\")\n )\n assert len(\n groundTruthImgList\n ), \"Cannot find any ground truth images to use for evaluation. Searched for: {}\".format(\n cityscapes_eval.args.groundTruthSearch\n )\n predictionImgList = []\n for gt in groundTruthImgList:\n predictionImgList.append(\n cityscapes_eval.getPrediction(cityscapes_eval.args, gt)\n )\n results = cityscapes_eval.evaluateImgLists(\n predictionImgList, groundTruthImgList, cityscapes_eval.args\n )\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"mIoU\": 100.0 * results[\"averageScoreClasses\"],\n \"IoU.road\": 100.0 * results[\"classScores\"][\"road\"],\n \"IoU.sidewalk\": 100.0 * results[\"classScores\"][\"sidewalk\"],\n \"IoU.building\": 100.0 * results[\"classScores\"][\"building\"],\n \"IoU.wall\": 100.0 * results[\"classScores\"][\"wall\"],\n \"IoU.fence\": 100.0 * results[\"classScores\"][\"fence\"],\n \"IoU.pole\": 100.0 * results[\"classScores\"][\"pole\"],\n \"IoU.traffic light\": 100.0 * results[\"classScores\"][\"traffic light\"],\n \"IoU.traffic sign\": 100.0 * results[\"classScores\"][\"traffic sign\"],\n \"IoU.vegetation\": 100.0 * results[\"classScores\"][\"vegetation\"],\n \"IoU.terrain\": 100.0 * results[\"classScores\"][\"terrain\"],\n \"IoU.sky\": 100.0 * results[\"classScores\"][\"sky\"],\n \"IoU.person\": 100.0 * results[\"classScores\"][\"person\"],\n \"IoU.rider\": 100.0 * results[\"classScores\"][\"rider\"],\n \"IoU.car\": 100.0 * results[\"classScores\"][\"car\"],\n \"IoU.truck\": 100.0 * results[\"classScores\"][\"truck\"],\n \"IoU.bus\": 100.0 * results[\"classScores\"][\"bus\"],\n \"IoU.train\": 100.0 * results[\"classScores\"][\"train\"],\n \"IoU.motorcycle\": 100.0 * results[\"classScores\"][\"motorcycle\"],\n \"IoU.bicycle\": 100.0 * results[\"classScores\"][\"bicycle\"],\n }\n if not self._save_pl:\n self._working_dir.cleanup()\n return ret" }, { "identifier": "ClassicalSemSegEvaluator", "path": "clouds/evaluation/semantic_evaluation.py", "snippet": "class ClassicalSemSegEvaluator(DatasetEvaluator):\n \"\"\"\n Evaluate semantic segmentation metrics.\n \"\"\"\n\n def __init__(\n self,\n dataset_name,\n distributed=True,\n output_dir=None,\n *,\n sem_seg_loading_fn=load_image_into_numpy_array,\n num_classes=None,\n ignore_label=None,\n save_pl=False,\n ):\n \"\"\"\n Args:\n dataset_name (str): name of the dataset to be evaluated.\n distributed (bool): if True, will collect results from all ranks for evaluation.\n Otherwise, will evaluate the results in the current process.\n output_dir (str): an output directory to dump results.\n sem_seg_loading_fn: function to read sem seg file and load into numpy array.\n Default provided, but projects can customize.\n num_classes, ignore_label: deprecated argument\n \"\"\"\n self._logger = logging.getLogger(__name__)\n if num_classes is not None:\n self._logger.warn(\n \"SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata.\"\n )\n if ignore_label is not None:\n self._logger.warn(\n \"SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata.\"\n )\n self._dataset_name = dataset_name\n self._distributed = distributed\n self._output_dir = output_dir\n\n self._cpu_device = torch.device(\"cpu\")\n\n self.input_file_to_gt_file = {\n dataset_record[\"file_name\"]: dataset_record[\"sem_seg_file_name\"]\n for dataset_record in DatasetCatalog.get(dataset_name)\n }\n\n meta = MetadataCatalog.get(dataset_name)\n # Dict that maps contiguous training ids to COCO category ids\n try:\n c2d = meta.stuff_dataset_id_to_contiguous_id\n self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}\n except AttributeError:\n self._contiguous_id_to_dataset_id = None\n self._class_names = meta.stuff_classes\n self.sem_seg_loading_fn = sem_seg_loading_fn\n self._num_classes = len(meta.stuff_classes)\n if num_classes is not None:\n assert (\n self._num_classes == num_classes\n ), f\"{self._num_classes} != {num_classes}\"\n self._ignore_label = (\n ignore_label if ignore_label is not None else meta.ignore_label\n )\n\n # This is because cv2.erode did not work for int datatype. Only works for uint8.\n self._compute_boundary_iou = True\n if not _CV2_IMPORTED:\n self._compute_boundary_iou = False\n self._logger.warn(\n \"\"\"Boundary IoU calculation requires OpenCV. B-IoU metrics are\n not going to be computed because OpenCV is not available to import.\"\"\"\n )\n if self._num_classes >= np.iinfo(np.uint8).max:\n self._compute_boundary_iou = False\n self._logger.warn(\n f\"\"\"SemSegEvaluator(num_classes) is more than supported value for Boundary IoU calculation!\n B-IoU metrics are not going to be computed. Max allowed value (exclusive)\n for num_classes for calculating Boundary IoU is {np.iinfo(np.uint8).max}.\n The number of classes of dataset {self._dataset_name} is {self._num_classes}\"\"\"\n )\n self._save_pl = save_pl\n\n def reset(self):\n self._conf_matrix = np.zeros(\n (self._num_classes + 1, self._num_classes + 1), dtype=np.int64\n )\n self._b_conf_matrix = np.zeros(\n (self._num_classes + 1, self._num_classes + 1), dtype=np.int64\n )\n self._predictions = []\n\n def process(self, inputs, outputs):\n \"\"\"\n Args:\n inputs: the inputs to a model.\n It is a list of dicts. Each dict corresponds to an image and\n contains keys like \"height\", \"width\", \"file_name\".\n outputs: the outputs of a model. It is either list of semantic segmentation predictions\n (Tensor [H, W]) or list of dicts with key \"sem_seg\" that contains semantic\n segmentation prediction in the same format.\n \"\"\"\n for input, output in zip(inputs, outputs):\n output = output[\"sem_seg\"].argmax(dim=0).to(self._cpu_device)\n pred = np.array(output, dtype=int)\n gt = input[\"sem_seg\"].numpy()\n\n gt[gt == self._ignore_label] = self._num_classes\n\n self._conf_matrix += np.bincount(\n (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),\n minlength=self._conf_matrix.size,\n ).reshape(self._conf_matrix.shape)\n\n if self._compute_boundary_iou:\n b_gt = self._mask_to_boundary(gt.astype(np.uint8))\n b_pred = self._mask_to_boundary(pred.astype(np.uint8))\n\n self._b_conf_matrix += np.bincount(\n (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1),\n minlength=self._conf_matrix.size,\n ).reshape(self._conf_matrix.shape)\n\n if self._save_pl:\n self._predictions.extend(\n [dict(file_name=input[\"file_name\"], pred=pred)]\n )\n else:\n self._predictions.extend(\n self.encode_json_sem_seg(pred, input[\"file_name\"])\n )\n\n def evaluate(self):\n \"\"\"\n Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):\n\n * Mean intersection-over-union averaged across classes (mIoU)\n * Frequency Weighted IoU (fwIoU)\n * Mean pixel accuracy averaged across classes (mACC)\n * Pixel Accuracy (pACC)\n \"\"\"\n if self._distributed:\n synchronize()\n conf_matrix_list = all_gather(self._conf_matrix)\n b_conf_matrix_list = all_gather(self._b_conf_matrix)\n self._predictions = all_gather(self._predictions)\n self._predictions = list(itertools.chain(*self._predictions))\n if not is_main_process():\n return\n\n self._conf_matrix = np.zeros_like(self._conf_matrix)\n for conf_matrix in conf_matrix_list:\n self._conf_matrix += conf_matrix\n\n self._b_conf_matrix = np.zeros_like(self._b_conf_matrix)\n for b_conf_matrix in b_conf_matrix_list:\n self._b_conf_matrix += b_conf_matrix\n\n if self._output_dir:\n first_elem = self._predictions[0]\n if \"bdd\" in first_elem[\"file_name\"]:\n self._output_dir = os.path.join(self._output_dir, \"bdd_eval_pl\")\n elif \"mapillary\" in first_elem[\"file_name\"]:\n self._output_dir = os.path.join(self._output_dir, \"mapillary_eval_pl\")\n PathManager.mkdirs(self._output_dir)\n if self._save_pl:\n # A function that will iterate over the list of dictionnaries and write the corresponding image\n # in the output directory\n def write_image_from_dict(dict):\n filename = os.path.join(\n self._output_dir,\n dict[\"file_name\"].split(\"/\")[-1].split(\".\")[0] + \"_pred.png\",\n )\n pred = dict[\"pred\"]\n pred = get_rgb_from_semantic_map_maxed(pred)\n # pred = Image.fromarray(pred)\n pred.save(filename)\n\n # We apply the function to the list of dictionnaries\n list(map(write_image_from_dict, self._predictions))\n\n else:\n file_path = os.path.join(self._output_dir, \"sem_seg_predictions.json\")\n with PathManager.open(file_path, \"w\") as f:\n f.write(json.dumps(self._predictions))\n\n acc = np.full(self._num_classes, np.nan, dtype=float)\n iou = np.full(self._num_classes, np.nan, dtype=float)\n tp = self._conf_matrix.diagonal()[:-1].astype(float)\n pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(float)\n class_weights = pos_gt / np.sum(pos_gt)\n pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(float)\n acc_valid = pos_gt > 0\n acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]\n union = pos_gt + pos_pred - tp\n iou_valid = np.logical_and(acc_valid, union > 0)\n iou[iou_valid] = tp[iou_valid] / union[iou_valid]\n macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)\n miou = np.sum(iou[iou_valid]) / np.sum(iou_valid)\n fiou = np.sum(iou[iou_valid] * class_weights[iou_valid])\n pacc = np.sum(tp) / np.sum(pos_gt)\n\n if self._compute_boundary_iou:\n b_iou = np.full(self._num_classes, np.nan, dtype=float)\n b_tp = self._b_conf_matrix.diagonal()[:-1].astype(float)\n b_pos_gt = np.sum(self._b_conf_matrix[:-1, :-1], axis=0).astype(float)\n b_pos_pred = np.sum(self._b_conf_matrix[:-1, :-1], axis=1).astype(float)\n b_union = b_pos_gt + b_pos_pred - b_tp\n b_iou_valid = b_union > 0\n b_iou[b_iou_valid] = b_tp[b_iou_valid] / b_union[b_iou_valid]\n\n res = {}\n res[\"mIoU\"] = 100 * miou\n res[\"fwIoU\"] = 100 * fiou\n for i, name in enumerate(self._class_names):\n res[f\"IoU-{name}\"] = 100 * iou[i]\n if self._compute_boundary_iou:\n res[f\"BoundaryIoU-{name}\"] = 100 * b_iou[i]\n res[f\"min(IoU, B-Iou)-{name}\"] = 100 * min(iou[i], b_iou[i])\n res[\"mACC\"] = 100 * macc\n res[\"pACC\"] = 100 * pacc\n for i, name in enumerate(self._class_names):\n res[f\"ACC-{name}\"] = 100 * acc[i]\n\n if self._output_dir:\n file_path = os.path.join(self._output_dir, \"sem_seg_evaluation.pth\")\n with PathManager.open(file_path, \"wb\") as f:\n torch.save(res, f)\n results = OrderedDict({\"sem_seg\": res})\n self._logger.info(results)\n\n def get_miou_value_from_dict(dict, subkey):\n for key, value in dict.items():\n if subkey in key and \"IoU\" in key:\n if np.isnan(value):\n return 0\n else:\n return value\n\n ret = OrderedDict()\n ret[\"sem_seg\"] = {\n \"mIoU\": results[\"sem_seg\"][\"mIoU\"],\n \"IoU.road\": get_miou_value_from_dict(results[\"sem_seg\"], \"road\"),\n \"IoU.sidewalk\": get_miou_value_from_dict(results[\"sem_seg\"], \"sidewalk\"),\n \"IoU.building\": get_miou_value_from_dict(results[\"sem_seg\"], \"building\"),\n \"IoU.wall\": get_miou_value_from_dict(results[\"sem_seg\"], \"wall\"),\n \"IoU.fence\": get_miou_value_from_dict(results[\"sem_seg\"], \"fence\"),\n \"IoU.pole\": get_miou_value_from_dict(results[\"sem_seg\"], \"pole\"),\n \"IoU.traffic light\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"traffic light\"\n ),\n \"IoU.traffic sign\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"traffic sign\"\n ),\n \"IoU.vegetation\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"vegetation\"\n ),\n \"IoU.terrain\": get_miou_value_from_dict(results[\"sem_seg\"], \"terrain\"),\n \"IoU.sky\": get_miou_value_from_dict(results[\"sem_seg\"], \"sky\"),\n \"IoU.person\": get_miou_value_from_dict(results[\"sem_seg\"], \"person\"),\n \"IoU.rider\": get_miou_value_from_dict(results[\"sem_seg\"], \"rider\"),\n \"IoU.car\": get_miou_value_from_dict(results[\"sem_seg\"], \"car\"),\n \"IoU.truck\": get_miou_value_from_dict(results[\"sem_seg\"], \"truck\"),\n \"IoU.bus\": get_miou_value_from_dict(results[\"sem_seg\"], \"bus\"),\n \"IoU.train\": get_miou_value_from_dict(results[\"sem_seg\"], \"train\"),\n \"IoU.motorcycle\": get_miou_value_from_dict(\n results[\"sem_seg\"], \"motorcycle\"\n ),\n \"IoU.bicycle\": get_miou_value_from_dict(results[\"sem_seg\"], \"bicycle\"),\n }\n return ret\n\n def encode_json_sem_seg(self, sem_seg, input_file_name):\n \"\"\"\n Convert semantic segmentation to COCO stuff format with segments encoded as RLEs.\n See http://cocodataset.org/#format-results\n \"\"\"\n json_list = []\n for label in np.unique(sem_seg):\n if self._contiguous_id_to_dataset_id is not None:\n assert (\n label in self._contiguous_id_to_dataset_id\n ), \"Label {} is not in the metadata info for {}\".format(\n label, self._dataset_name\n )\n dataset_id = self._contiguous_id_to_dataset_id[label]\n else:\n dataset_id = int(label)\n mask = (sem_seg == label).astype(np.uint8)\n mask_rle = mask_util.encode(np.array(mask[:, :, None], order=\"F\"))[0]\n mask_rle[\"counts\"] = mask_rle[\"counts\"].decode(\"utf-8\")\n json_list.append(\n {\n \"file_name\": input_file_name,\n \"category_id\": dataset_id,\n \"segmentation\": mask_rle,\n }\n )\n return json_list\n\n def _mask_to_boundary(self, mask: np.ndarray, dilation_ratio=0.02):\n assert mask.ndim == 2, \"mask_to_boundary expects a 2-dimensional image\"\n h, w = mask.shape\n diag_len = np.sqrt(h ** 2 + w ** 2)\n dilation = max(1, int(round(dilation_ratio * diag_len)))\n kernel = np.ones((3, 3), dtype=np.uint8)\n\n padded_mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)\n eroded_mask_with_padding = cv2.erode(padded_mask, kernel, iterations=dilation)\n eroded_mask = eroded_mask_with_padding[1:-1, 1:-1]\n boundary = mask - eroded_mask\n return boundary" }, { "identifier": "PersoEvalHook", "path": "clouds/engine/hooks.py", "snippet": "class PersoEvalHook(HookBase):\n \"\"\"\n Run an evaluation function periodically, and at the end of training.\n\n It is executed every ``eval_period`` iterations and after the last iteration.\n \"\"\"\n\n def __init__(self, eval_period, eval_function, eval_after_train=True):\n \"\"\"\n Args:\n eval_period (int): the period to run `eval_function`. Set to 0 to\n not evaluate periodically (but still evaluate after the last iteration\n if `eval_after_train` is True).\n eval_function (callable): a function which takes no arguments, and\n returns a nested dict of evaluation metrics.\n eval_after_train (bool): whether to evaluate after the last iteration\n\n Note:\n This hook must be enabled in all or none workers.\n If you would like only certain workers to perform evaluation,\n give other workers a no-op function (`eval_function=lambda: None`).\n \"\"\"\n self._period = eval_period\n self._func = eval_function\n self._eval_after_train = eval_after_train\n\n def _do_eval(self):\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)\n\n # Evaluation may take different time among workers.\n # A barrier make them start the next iteration together.\n comm.synchronize()\n\n def before_train(self):\n \"\"\"\n Called before the first iteration.\n \"\"\"\n if \"debug\" in self.trainer.cfg.OUTPUT_DIR:\n pass\n else:\n results = self._func()\n\n if results:\n assert isinstance(\n results, dict\n ), \"Eval function must return a dict. Got {} instead.\".format(results)\n\n flattened_results = flatten_results_dict(results)\n for k, v in flattened_results.items():\n try:\n v = float(v)\n except Exception as e:\n raise ValueError(\n \"[EvalHook] eval_function should return a nested dict of float. \"\n \"Got '{}: {}' instead.\".format(k, v)\n ) from e\n self.trainer.storage.put_scalars(\n **flattened_results, smoothing_hint=False\n )\n\n def after_step(self):\n next_iter = self.trainer.iter + 1\n if self._period > 0 and next_iter % self._period == 0:\n # do the last eval in after_train\n if next_iter != self.trainer.max_iter:\n self._do_eval()\n\n def after_train(self):\n # This condition is to prevent the eval from running after a failed training\n if self._eval_after_train and self.trainer.iter + 1 >= self.trainer.max_iter:\n self._do_eval()\n # func is likely a closure that holds reference to the trainer\n # therefore we clean it to avoid circular reference in the end\n del self._func" }, { "identifier": "WandbWriter", "path": "clouds/utils/events.py", "snippet": "class WandbWriter(EventWriter):\n \"\"\"\n Write all scalars to a tensorboard file.\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Args:\n log_dir (str): the directory to save the output events\n kwargs: other arguments passed to `torch.utils.tensorboard.SummaryWriter(...)`\n \"\"\"\n self._last_write = -1\n self._group_rules = [\n (IsIn(\"/\"), BaseRule()),\n (IsIn(\"loss\"), Prefix(\"train\")),\n # (IsIn(\"sem_seg\"), Prefix(\"val\")),\n (\n IsInList([\"lr\", \"time\", \"eta_seconds\", \"rank_data_time\", \"data_time\"]),\n Prefix(\"stats\"),\n ),\n ]\n\n def write(self):\n storage = get_event_storage()\n\n def _group_name(scalar_name):\n for rule, op in self._group_rules:\n if rule(scalar_name):\n return op(scalar_name)\n return scalar_name\n\n stats = {\n _group_name(name): scalars[0]\n for name, scalars in storage.latest().items()\n if scalars[1] > self._last_write\n }\n if len(stats) > 0:\n self._last_write = max([v[1] for k, v in storage.latest().items()])\n\n # storage.put_{image,histogram} is only meant to be used by\n # tensorboard writer. So we access its internal fields directly from here.\n if len(storage._vis_data) >= 1:\n stats[\"image\"] = [\n wandb.Image(img, caption=img_name)\n for img_name, img, step_num in storage._vis_data\n ]\n # Storage stores all image data and rely on this writer to clear them.\n # As a result it assumes only one writer will use its image data.\n # An alternative design is to let storage store limited recent\n # data (e.g. only the most recent image) that all writers can access.\n # In that case a writer may not see all image data if its period is long.\n storage.clear_images()\n\n if len(storage._histograms) >= 1:\n\n def create_bar(tag, bucket_limits, bucket_counts, **kwargs):\n data = [\n [label, val] for (label, val) in zip(bucket_limits, bucket_counts)\n ]\n table = wandb.Table(data=data, columns=[\"label\", \"value\"])\n return wandb.plot.bar(table, \"label\", \"value\", title=tag)\n\n stats[\"hist\"] = [create_bar(**params) for params in storage._histograms]\n\n storage.clear_histograms()\n\n if len(stats) == 0:\n return\n wandb.log(stats, step=storage.iter)\n\n def close(self):\n wandb.finish()" }, { "identifier": "setup_wandb", "path": "clouds/utils/events.py", "snippet": "def setup_wandb(cfg, args):\n if comm.is_main_process():\n init_args = {\n k.lower(): v\n for k, v in cfg.WANDB.items()\n if isinstance(k, str) and k not in [\"config\", \"name\"]\n }\n if \"config_exclude_keys\" in init_args:\n init_args[\"config\"] = cfg\n init_args[\"config\"][\"cfg_file\"] = args.config_file\n else:\n init_args[\"config\"] = {\n \"output_dir\": cfg.OUTPUT_DIR,\n \"train\": extract_dataset_from_string(cfg.DATASETS.TRAIN),\n \"test\": extract_dataset_from_string(cfg.DATASETS.TEST),\n \"iter\": cfg.SOLVER.MAX_ITER,\n \"lr\": cfg.SOLVER.BASE_LR,\n \"batch_size\": cfg.SOLVER.IMS_PER_BATCH,\n \"cfg_file\": args.config_file,\n }\n\n init_args[\"group\"] = get_base_name(cfg)\n if cfg.WANDB.NAME is not None:\n init_args[\"name\"] = cfg.WANDB.NAME\n else:\n init_args[\"name\"] = get_full_name_xp(init_args[\"group\"], cfg)\n if \"debug\" in cfg.OUTPUT_DIR:\n init_args[\"project\"] = \"debug\"\n wandb.init(**init_args)" } ]
from shapely.errors import ShapelyDeprecationWarning from collections import OrderedDict from typing import Any, Dict, List, Set from detectron2.checkpoint import DetectionCheckpointer from detectron2.config import get_cfg from detectron2.data import ( MetadataCatalog, build_detection_train_loader, build_detection_test_loader, ) from detectron2.engine import ( DefaultTrainer, default_argument_parser, default_setup, launch, ) from detectron2.modeling import build_model from detectron2.evaluation import ( CityscapesInstanceEvaluator, CityscapesSemSegEvaluator, COCOEvaluator, COCOPanopticEvaluator, DatasetEvaluators, LVISEvaluator, SemSegEvaluator, verify_results, inference_on_dataset, print_csv_format, DatasetEvaluator, ) from detectron2.projects.deeplab import add_deeplab_config, build_lr_scheduler from detectron2.solver.build import maybe_add_gradient_clipping from detectron2.utils.logger import setup_logger from detectron2.engine import hooks from fvcore.nn.precise_bn import get_bn_modules from clouds import ( CityscapesSemSegEvaluator, ClassicalSemSegEvaluator, MapperTrain, MapperTest, add_maskformer2_config, add_clouds_config, add_wandb_config, add_prerocessing_training_set_config, PersoEvalHook, add_repeat_factors, ) from clouds.utils import setup_wandb, WandbWriter import warnings import copy import itertools import logging import os import ast import torch import detectron2.utils.comm as comm
14,536
""" Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg) add_clouds_config(cfg) add_wandb_config(cfg)
""" Copyright 2023 Telecom Paris, Yasser BENIGMIM. All rights reserved. Licensed under the Apache License, Version 2.0 Reference: https://github.com/facebookresearch/Mask2Former/blob/main/train_net.py CLOUDS Training Script. This script is a simplified version of the training script in detectron2/tools. """ try: # ignore ShapelyDeprecationWarning from fvcore warnings.filterwarnings("ignore", category=ShapelyDeprecationWarning) except: pass class Trainer(DefaultTrainer): """ Extension of the Trainer class adapted to CLOUDS. """ def build_writers(self): writers = super().build_writers() # use wandb writer instead. writers[-1] = WandbWriter() return writers @classmethod def build_model(cls, cfg): """ Returns: torch.nn.Module: It now calls :func:`detectron2.modeling.build_model`. Overwrite it if you'd like a different model. """ model = build_model(cfg) # logger = logging.getLogger(__name__) # logger.info("Model:\n{}".format(model)) return model # @classmethod # def build_model(cls, cfg): # """ # Returns: # torch.nn.Module: # # It now calls :func:`detectron2.modeling.build_model`. # Overwrite it if you'd like a different model. # """ # model = build_model(cfg) # # logger = logging.getLogger(__name__) # # logger.info("Model:\n{}".format(model)) # return model @classmethod def build_evaluator(cls, cfg, dataset_name, output_folder=None): """ Create evaluator(s) for a given dataset. This uses the special metadata "evaluator_type" associated with each builtin dataset. For your own dataset, you can simply create an evaluator manually in your script and do not have to worry about the hacky if-else logic here. """ if output_folder is None: output_folder = os.path.join(cfg.OUTPUT_DIR, "inference") else: output_folder = os.path.join(cfg.OUTPUT_DIR, output_folder, "inference") evaluator_list = [] evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type # semantic segmentation if ( evaluator_type == "bdd_sem_seg" or evaluator_type == "mapillary_sem_seg" or evaluator_type == "acdc_sem_seg" ): evaluator_list.append( ClassicalSemSegEvaluator( dataset_name, distributed=True, output_dir=output_folder, save_pl=cfg.MODEL.SAVE_PSEUDO_LABELS, ) ) # Cityscapes if evaluator_type == "cityscapes_sem_seg": assert ( torch.cuda.device_count() > comm.get_rank() ), "CityscapesEvaluator currently do not work with multiple machines." # return CityscapesSemSegEvaluator(dataset_name) if cfg.MODEL.SAVE_PSEUDO_LABELS: return CityscapesSemSegEvaluator( dataset_name, save_pl=True, output_dir=output_folder ) else: return CityscapesSemSegEvaluator(dataset_name) if len(evaluator_list) == 0: raise NotImplementedError( "no Evaluator for the dataset {} with the type {}".format( dataset_name, evaluator_type ) ) elif len(evaluator_list) == 1: return evaluator_list[0] return DatasetEvaluators(evaluator_list) @classmethod def build_train_loader(cls, cfg): # Semantic segmentation dataset mapper mapper = MapperTrain(cfg, True) return build_detection_train_loader(cfg, mapper=mapper) @classmethod def build_test_loader(cls, cfg, dataset_name): mapper = MapperTest(cfg, False) return build_detection_test_loader( cfg, dataset_name, batch_size=1, mapper=mapper ) @classmethod def build_lr_scheduler(cls, cfg, optimizer): """ It now calls :func:`detectron2.solver.build_lr_scheduler`. Overwrite it if you'd like a different scheduler. """ return build_lr_scheduler(cfg, optimizer) @classmethod def build_optimizer(cls, cfg, model): weight_decay_norm = cfg.SOLVER.WEIGHT_DECAY_NORM weight_decay_embed = cfg.SOLVER.WEIGHT_DECAY_EMBED defaults = {} defaults["lr"] = cfg.SOLVER.BASE_LR defaults["weight_decay"] = cfg.SOLVER.WEIGHT_DECAY norm_module_types = ( torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d, torch.nn.SyncBatchNorm, # NaiveSyncBatchNorm inherits from BatchNorm2d torch.nn.GroupNorm, torch.nn.InstanceNorm1d, torch.nn.InstanceNorm2d, torch.nn.InstanceNorm3d, torch.nn.LayerNorm, torch.nn.LocalResponseNorm, ) params: List[Dict[str, Any]] = [] memo: Set[torch.nn.parameter.Parameter] = set() for module_name, module in model.named_modules(): for module_param_name, value in module.named_parameters(recurse=False): if not value.requires_grad: continue if cfg.MODEL.CLOUDS.OVERWRITING: if any( ignored_module in module_name for ignored_module in ["sem_seg_head_ema.", "sam.sam."] ): continue # Avoid duplicating parameters if value in memo: continue memo.add(value) hyperparams = copy.copy(defaults) if "backbone" in module_name: hyperparams["lr"] = ( hyperparams["lr"] * cfg.SOLVER.BACKBONE_MULTIPLIER ) if ( "relative_position_bias_table" in module_param_name or "absolute_pos_embed" in module_param_name ): print(module_param_name) hyperparams["weight_decay"] = 0.0 if isinstance(module, norm_module_types): hyperparams["weight_decay"] = weight_decay_norm if isinstance(module, torch.nn.Embedding): hyperparams["weight_decay"] = weight_decay_embed params.append({"params": [value], **hyperparams}) def maybe_add_full_model_gradient_clipping(optim): # detectron2 doesn't have full model gradient clipping now clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE enable = ( cfg.SOLVER.CLIP_GRADIENTS.ENABLED and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model" and clip_norm_val > 0.0 ) class FullModelGradientClippingOptimizer(optim): def step(self, closure=None): all_params = itertools.chain( *[x["params"] for x in self.param_groups] ) torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val) super().step(closure=closure) return FullModelGradientClippingOptimizer if enable else optim optimizer_type = cfg.SOLVER.OPTIMIZER if optimizer_type == "SGD": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)( params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM ) elif optimizer_type == "ADAMW": optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)( params, cfg.SOLVER.BASE_LR ) else: raise NotImplementedError(f"no optimizer type {optimizer_type}") if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model": optimizer = maybe_add_gradient_clipping(cfg, optimizer) return optimizer @classmethod def test(cls, cfg, model, output_folder=None, evaluators=None): """ Evaluate the given model. The given model is expected to already contain weights to evaluate. Args: cfg (CfgNode): model (nn.Module): evaluators (list[DatasetEvaluator] or None): if None, will call :meth:`build_evaluator`. Otherwise, must have the same length as ``cfg.DATASETS.TEST``. Returns: dict: a dict of result metrics """ logger = logging.getLogger(__name__) if isinstance(evaluators, DatasetEvaluator): evaluators = [evaluators] if evaluators is not None: assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format( len(cfg.DATASETS.TEST), len(evaluators) ) results = OrderedDict() for idx, dataset_name in enumerate(cfg.DATASETS.TEST): data_loader = cls.build_test_loader(cfg, dataset_name) # When evaluators are passed in as arguments, # implicitly assume that evaluators can be created before data_loader. if evaluators is not None: evaluator = evaluators[idx] else: try: evaluator = cls.build_evaluator( cfg, dataset_name, output_folder=output_folder ) except NotImplementedError: logger.warn( "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, " "or implement its `build_evaluator` method." ) results[dataset_name] = {} continue results_i = inference_on_dataset(model, data_loader, evaluator) results[dataset_name] = results_i if comm.is_main_process(): assert isinstance( results_i, dict ), "Evaluator must return a dict on the main process. Got {} instead.".format( results_i ) logger.info( "Evaluation results for {} in csv format:".format(dataset_name) ) print_csv_format(results_i) if len(results) == 1: results = list(results.values())[0] return results def build_hooks(self): """ Build a list of default hooks, including timing, evaluation, checkpointing, lr scheduling, precise BN, writing events. Returns: list[HookBase]: """ cfg = self.cfg.clone() cfg.defrost() cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN ret = [ hooks.IterationTimer(), hooks.LRScheduler(), hooks.PreciseBN( # Run at the same freq as (but before) evaluation. cfg.TEST.EVAL_PERIOD, self.model, # Build a new data loader to not affect training self.build_train_loader(cfg), cfg.TEST.PRECISE_BN.NUM_ITER, ) if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model) else None, ] # Do PreciseBN before checkpointer, because it updates the model and need to # be saved by checkpointer. # This is not always the best: if checkpointing has a different frequency, # some checkpoints may have more precise statistics than others. if comm.is_main_process(): ret.append( hooks.PeriodicCheckpointer(self.checkpointer, cfg.TEST.EVAL_PERIOD * 5) ) def test_and_save_results(): self._last_eval_results = self.test(self.cfg, self.model) return self._last_eval_results # Do evaluation after checkpointer, because then if it fails, # we can use the saved checkpoint to debug. # ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) ret.append(PersoEvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results)) if comm.is_main_process(): # Here the default print/log frequency of each writer is used. # run writers in the end, so that evaluation metrics are written ret.append(hooks.PeriodicWriter(self.build_writers(), period=20)) return ret def setup(args): """ Create configs and perform basic setups. """ cfg = get_cfg() # for poly lr schedule add_deeplab_config(cfg) add_maskformer2_config(cfg) add_clouds_config(cfg) add_wandb_config(cfg)
add_prerocessing_training_set_config(cfg)
3
2023-12-15 15:40:58+00:00
24k
modelscope/scepter
scepter/studio/inference/inference.py
[ { "identifier": "Config", "path": "scepter/modules/utils/config.py", "snippet": "class Config(object):\n def __init__(self,\n cfg_dict={},\n load=True,\n cfg_file=None,\n logger=None,\n parser_ins=None):\n '''\n support to parse json/dict/yaml_file of parameters.\n :param load: whether load parameters or not.\n :param cfg_dict: default None.\n :param cfg_level: default None, means the current cfg-level for recurrent cfg presentation.\n :param logger: logger instance for print the cfg log.\n one examples:\n import argparse\n parser = argparse.ArgumentParser(\n description=\"Argparser for Cate process:\\n\"\n )\n parser.add_argument(\n \"--stage\",\n dest=\"stage\",\n help=\"Running stage!\",\n default=\"train\",\n choices=[\"train\"]\n )\n\n cfg = Config(load=True, parser_ins=parser)\n '''\n # checking that the logger exists or not\n if logger is None:\n self.logger = StdMsg(name='Config')\n else:\n self.logger = logger\n self.cfg_dict = cfg_dict\n if load:\n if cfg_file is None:\n assert parser_ins is not None\n self.args = _parse_args(parser_ins)\n self.load_from_file(self.args.cfg_file)\n # os.environ[\"LAUNCHER\"] = self.args.launcher\n os.environ['DATA_ONLINE'] = str(self.args.data_online).lower()\n os.environ['SHARE_STORAGE'] = str(\n self.args.share_storage).lower()\n os.environ['ES_DEBUG'] = str(self.args.debug).lower()\n else:\n self.load_from_file(cfg_file)\n if 'ENV' not in self.cfg_dict:\n self.cfg_dict['ENV'] = {\n 'SEED': 2023,\n 'USE_PL': False,\n 'BACKEND': 'nccl',\n 'SYNC_BN': False,\n 'CUDNN_DETERMINISTIC': True,\n 'CUDNN_BENCHMARK': False\n }\n self.logger.info(\n f\"ENV is not set and will use default ENV as {self.cfg_dict['ENV']}; \"\n f'If want to change this value, please set them in your config.'\n )\n else:\n if 'SEED' not in self.cfg_dict['ENV']:\n self.cfg_dict['ENV']['SEED'] = 2023\n self.logger.info(\n f\"SEED is not set and will use default SEED as {self.cfg_dict['ENV']['SEED']}; \"\n f'If want to change this value, please set it in your config.'\n )\n os.environ['ES_SEED'] = str(self.cfg_dict['ENV']['SEED'])\n self._update_dict(self.cfg_dict)\n if load:\n self.logger.info(f'Parse cfg file as \\n {self.dump()}')\n\n def load_from_file(self, file_name):\n self.logger.info(f'Loading config from {file_name}')\n if file_name is None or not os.path.exists(file_name):\n self.logger.info(f'File {file_name} does not exist!')\n self.logger.warning(\n f\"Cfg file is None or doesn't exist, Skip loading config from {file_name}.\"\n )\n return\n if file_name.endswith('.json'):\n self.cfg_dict = self._load_json(file_name)\n self.logger.info(\n f'System take {file_name} as json, because we find json in this file'\n )\n elif file_name.endswith('.yaml'):\n self.cfg_dict = self._load_yaml(file_name)\n self.logger.info(\n f'System take {file_name} as yaml, because we find yaml in this file'\n )\n else:\n self.logger.info(\n f'No config file found! Because we do not find json or yaml in --cfg {file_name}'\n )\n\n def _update_dict(self, cfg_dict):\n def recur(key, elem):\n if type(elem) is dict:\n return key, Config(load=False,\n cfg_dict=elem,\n logger=self.logger)\n elif type(elem) is list:\n config_list = []\n for idx, ele in enumerate(elem):\n if type(ele) is str and ele[1:3] == 'e-':\n ele = float(ele)\n config_list.append(ele)\n elif type(ele) is str:\n config_list.append(ele)\n elif type(ele) is dict:\n config_list.append(\n Config(load=False,\n cfg_dict=ele,\n logger=self.logger))\n elif type(ele) is list:\n config_list.append(ele)\n else:\n config_list.append(ele)\n return key, config_list\n else:\n if type(elem) is str and elem[1:3] == 'e-':\n elem = float(elem)\n return key, elem\n\n dic = dict(recur(k, v) for k, v in cfg_dict.items())\n self.__dict__.update(dic)\n\n def _load_json(self, cfg_file):\n '''\n :param cfg_file:\n :return:\n '''\n if cfg_file is None:\n self.logger.warning(\n f'Cfg file is None, Skip loading config from {cfg_file}.')\n return {}\n file_name = cfg_file\n try:\n cfg = json.load(open(file_name, 'r'))\n except Exception as e:\n self.logger.error(f'Load json from {cfg_file} error. Message: {e}')\n sys.exit()\n return cfg\n\n def _load_yaml(self, cfg_file):\n '''\n if replace some parameters from Base, You can reference the base parameters use Base.\n\n :param cfg_file:\n :return:\n '''\n if cfg_file is None:\n self.logger.warning(\n f'Cfg file is None, Skip loading config from {cfg_file}.')\n return {}\n file_name = cfg_file\n try:\n with open(cfg_file, 'r') as f:\n cfg = yaml.load(f.read(), Loader=yaml.SafeLoader)\n except Exception as e:\n self.logger.error(f'Load yaml from {cfg_file} error. Message: {e}')\n sys.exit()\n if '_BASE_RUN' not in cfg.keys() and '_BASE_MODEL' not in cfg.keys(\n ) and '_BASE' not in cfg.keys():\n return cfg\n\n if '_BASE' in cfg.keys():\n if cfg['_BASE'][1] == '.':\n prev_count = cfg['_BASE'].count('..')\n cfg_base_file = self._path_join(\n file_name.split('/')[:(-1 - cfg['_BASE'].count('..'))] +\n cfg['_BASE'].split('/')[prev_count:])\n else:\n cfg_base_file = cfg['_BASE'].replace(\n './', file_name.replace(file_name.split('/')[-1], ''))\n cfg_base = self._load_yaml(cfg_base_file)\n cfg = self._merge_cfg_from_base(cfg_base, cfg)\n else:\n if '_BASE_RUN' in cfg.keys():\n if cfg['_BASE_RUN'][1] == '.':\n prev_count = cfg['_BASE_RUN'].count('..')\n cfg_base_file = self._path_join(\n file_name.split('/')[:(-1 - prev_count)] +\n cfg['_BASE_RUN'].split('/')[prev_count:])\n else:\n cfg_base_file = cfg['_BASE_RUN'].replace(\n './', file_name.replace(file_name.split('/')[-1], ''))\n cfg_base = self._load_yaml(cfg_base_file)\n cfg = self._merge_cfg_from_base(cfg_base,\n cfg,\n preserve_base=True)\n if '_BASE_MODEL' in cfg.keys():\n if cfg['_BASE_MODEL'][1] == '.':\n prev_count = cfg['_BASE_MODEL'].count('..')\n cfg_base_file = self._path_join(\n file_name.split('/')[:(\n -1 - cfg['_BASE_MODEL'].count('..'))] +\n cfg['_BASE_MODEL'].split('/')[prev_count:])\n else:\n cfg_base_file = cfg['_BASE_MODEL'].replace(\n './', file_name.replace(file_name.split('/')[-1], ''))\n cfg_base = self._load_yaml(cfg_base_file)\n cfg = self._merge_cfg_from_base(cfg_base, cfg)\n return cfg\n\n def _path_join(self, path_list):\n path = ''\n for p in path_list:\n path += p + '/'\n return path[:-1]\n\n def items(self):\n return self.cfg_dict.items()\n\n def _merge_cfg_from_base(self, cfg_base, cfg, preserve_base=False):\n for k, v in cfg.items():\n if k in cfg_base.keys():\n if isinstance(v, dict):\n self._merge_cfg_from_base(cfg_base[k], v)\n else:\n cfg_base[k] = v\n else:\n if 'BASE' not in k or preserve_base:\n cfg_base[k] = v\n return cfg_base\n\n def _merge_cfg_from_command(self, args, cfg):\n assert len(\n args.opts\n ) % 2 == 0, f'Override list {args.opts} has odd length: {len(args.opts)}'\n\n keys = args.opts[0::2]\n vals = args.opts[1::2]\n\n # maximum supported depth 3\n for idx, key in enumerate(keys):\n key_split = key.split('.')\n assert len(\n key_split\n ) <= 4, 'Key depth error. \\n Maximum depth: 3\\n Get depth: {}'.format(\n len(key_split))\n assert key_split[0] in cfg.keys(), 'Non-existant key: {}.'.format(\n key_split[0])\n if len(key_split) == 2:\n assert key_split[1] in cfg[\n key_split[0]].keys(), 'Non-existant key: {}'.format(key)\n elif len(key_split) == 3:\n assert key_split[1] in cfg[\n key_split[0]].keys(), 'Non-existant key: {}'.format(key)\n assert key_split[2] in cfg[key_split[0]][\n key_split[1]].keys(), 'Non-existant key: {}'.format(key)\n elif len(key_split) == 4:\n assert key_split[1] in cfg[\n key_split[0]].keys(), 'Non-existant key: {}'.format(key)\n assert key_split[2] in cfg[key_split[0]][\n key_split[1]].keys(), 'Non-existant key: {}'.format(key)\n assert key_split[3] in cfg[key_split[0]][key_split[1]][\n key_split[2]].keys(), 'Non-existant key: {}'.format(key)\n\n if len(key_split) == 1:\n cfg[key_split[0]] = vals[idx]\n elif len(key_split) == 2:\n cfg[key_split[0]][key_split[1]] = vals[idx]\n elif len(key_split) == 3:\n cfg[key_split[0]][key_split[1]][key_split[2]] = vals[idx]\n elif len(key_split) == 4:\n cfg[key_split[0]][key_split[1]][key_split[2]][\n key_split[3]] = vals[idx]\n\n return cfg\n\n def __repr__(self):\n return '{}\\n'.format(self.dump())\n\n def dump(self):\n return json.dumps(self.cfg_dict, indent=2)\n\n def deep_copy(self):\n return copy.deepcopy(self)\n\n def have(self, name):\n if name in self.__dict__:\n return True\n return False\n\n def get(self, name, default=None):\n if name in self.__dict__:\n return self.__dict__[name]\n return default\n\n def __getitem__(self, key):\n return self.__dict__.__getitem__(key)\n\n def __setattr__(self, key, value):\n super().__setattr__(key, value)\n if hasattr(self, 'cfg_dict') and key in self.cfg_dict:\n if isinstance(value, Config):\n value = value.cfg_dict\n self.cfg_dict[key] = value\n\n def __setitem__(self, key, value):\n self.__dict__[key] = value\n self.__setattr__(key, value)\n\n def __iter__(self):\n return iter(self.__dict__)\n\n def set(self, name, value):\n new_dict = {name: value}\n self.__dict__.update(new_dict)\n self.__setattr__(name, value)\n\n def get_dict(self):\n return self.cfg_dict\n\n def get_lowercase_dict(self, cfg_dict=None):\n if cfg_dict is None:\n cfg_dict = self.get_dict()\n config_new = {}\n for key, val in cfg_dict.items():\n if isinstance(key, str):\n if isinstance(val, dict):\n config_new[key.lower()] = self.get_lowercase_dict(val)\n else:\n config_new[key.lower()] = val\n else:\n config_new[key] = val\n return config_new\n\n @staticmethod\n def get_plain_cfg(cfg=None):\n if isinstance(cfg, Config):\n cfg_new = {}\n cfg_dict = cfg.get_dict()\n for key, val in cfg_dict.items():\n if isinstance(val, (Config, dict, list)):\n cfg_new[key] = Config.get_plain_cfg(val)\n elif isinstance(val, (str, numbers.Number)):\n cfg_new[key] = val\n return cfg_new\n elif isinstance(cfg, dict):\n cfg_new = {}\n cfg_dict = cfg\n for key, val in cfg_dict.items():\n if isinstance(val, (Config, dict, list)):\n cfg_new[key] = Config.get_plain_cfg(val)\n elif isinstance(val, (str, numbers.Number)):\n cfg_new[key] = val\n return cfg_new\n elif isinstance(cfg, list):\n cfg_new = []\n cfg_list = cfg\n for val in cfg_list:\n if isinstance(val, (Config, dict, list)):\n cfg_new.append(Config.get_plain_cfg(val))\n elif isinstance(val, (str, numbers.Number)):\n cfg_new.append(val)\n return cfg_new\n else:\n return cfg" }, { "identifier": "FS", "path": "scepter/modules/utils/file_system.py", "snippet": "FS = FileSystem()" }, { "identifier": "PipelineManager", "path": "scepter/studio/inference/inference_manager/infer_runer.py", "snippet": "class PipelineManager():\n def __init__(self, logger=None):\n '''\n Args:\n logger:\n '''\n \"\"\"\n Only (refine) cond model and (refine) diffusion model are binded strictly.\n Users can choose any vae or refiner according to the given diffusion model.\n \"\"\"\n if logger is None:\n logger = get_logger(name='scepter')\n\n self.module_list = [\n 'diffusion_model', 'first_stage_model', 'cond_stage_model',\n 'refiner_cond_model', 'refiner_diffusion_model'\n ]\n self.pipeline_level_modules = {}\n self.module_level_choices = {}\n self.model_level_info = {}\n self.logger = logger\n\n def contruct_models_index(self, pipeline_name, pipeline):\n \"\"\"\n\n Args:\n pipeline_name:\n pipeline:\n\n Returns:\n\n \"\"\"\n \"\"\"\n self.pipeline_level_modules is used to index the modules given pipeline\n {\n \"SD_XL1.0\": {\n \"diffusion_model\": \"\",\n ....\n }\n }\n self.module_level_choices is used to provide all choices for modules.\n an example for self.module_level_models\n {\n \"diffusion_model\" : {\n \"choices\": [],\n \"default\": \"\"\n }\n ....\n }\n\n self.model_level_info is used to index the best combination for different modules give model name\n and check the combination is legal or not.\n {\n \"xxxxxx\": {\n \"pipeline\": [],\n \"check_bind_module\": [],\n \"model_info\": {}\n ....\n }\n }\n\n \"\"\"\n self.pipeline_level_modules[pipeline_name] = pipeline\n for module_name in self.module_list:\n if module_name not in self.module_level_choices:\n self.module_level_choices[module_name] = {\n 'choices': [],\n 'default': ''\n }\n module = getattr(pipeline, module_name)\n if module is None:\n continue\n model_name = f\"{pipeline_name}_{module['name']}\"\n self.module_level_choices[module_name]['choices'].append(\n model_name)\n if pipeline.is_default or self.module_level_choices[module_name][\n 'default'] == '':\n self.module_level_choices[module_name]['default'] = model_name\n if model_name not in self.model_level_info:\n self.model_level_info[model_name] = {\n 'pipeline': [],\n 'check_bind_module': [],\n 'model_info': {}\n }\n self.model_level_info[model_name]['pipeline'].append(pipeline_name)\n self.model_level_info[model_name]['model_info'] = module\n\n def construct_new_pipeline(self):\n pass\n\n def register_pipeline(self, cfg):\n new_inference = DiffusionInference(logger=self.logger)\n new_inference.init_from_cfg(cfg)\n self.contruct_models_index(cfg.NAME, new_inference)\n\n def register_tuner(self, cfg, name=None, is_customized=False):\n '''\n Args:\n cfg: {\n NAME: \"\"\n NAME_ZH: \"\"\n BASE_MODEL: \"\"\n MODEL_PATH: \"\",\n DESCRIPTION: \"\"\n }\n\n Returns:\n\n '''\n if not is_customized:\n tuners_key = 'tuners'\n else:\n tuners_key = 'customized_tuners'\n\n if tuners_key not in self.module_level_choices:\n self.module_level_choices[tuners_key] = {}\n\n if cfg.BASE_MODEL not in self.module_level_choices[tuners_key]:\n self.module_level_choices[tuners_key][cfg.BASE_MODEL] = {\n 'choices': [],\n 'default': ''\n }\n if name not in self.module_level_choices[tuners_key][\n cfg.BASE_MODEL]['choices']:\n self.module_level_choices[tuners_key][\n cfg.BASE_MODEL]['choices'].append(name)\n self.module_level_choices[tuners_key][cfg.BASE_MODEL]['default'] = name\n if tuners_key not in self.model_level_info:\n self.model_level_info[tuners_key] = {}\n if cfg.BASE_MODEL not in self.model_level_info[tuners_key]:\n self.model_level_info[tuners_key][cfg.BASE_MODEL] = {}\n self.model_level_info[tuners_key][cfg.BASE_MODEL][name] = {\n 'pipeline': [],\n 'check_bind_module': [],\n 'model_info': cfg\n }\n\n def register_controllers(self, cfg):\n '''\n Args:\n cfg: {\n NAME: \"\"\n NAME_ZH: \"\"\n BASE_MODEL: \"\"\n MODEL_PATH: \"\",\n DESCRIPTION: \"\"\n }\n\n Returns:\n\n '''\n if 'controllers' not in self.module_level_choices:\n self.module_level_choices['controllers'] = {}\n\n if cfg.BASE_MODEL not in self.module_level_choices['controllers']:\n self.module_level_choices['controllers'][cfg.BASE_MODEL] = {}\n if cfg.TYPE not in self.module_level_choices['controllers'][\n cfg.BASE_MODEL]:\n self.module_level_choices['controllers'][cfg.BASE_MODEL][\n cfg.TYPE] = {\n 'choices': [],\n 'default': ''\n }\n controller_name = cfg.BASE_MODEL + '_' + cfg.NAME\n self.module_level_choices['controllers'][cfg.BASE_MODEL][\n cfg.TYPE]['choices'].append(controller_name)\n self.module_level_choices['controllers'][cfg.BASE_MODEL][\n cfg.TYPE]['default'] = controller_name\n if 'controllers' not in self.model_level_info:\n self.model_level_info['controllers'] = {}\n if cfg.BASE_MODEL not in self.model_level_info['controllers']:\n self.model_level_info['controllers'][cfg.BASE_MODEL] = {}\n self.model_level_info['controllers'][\n cfg.BASE_MODEL][controller_name] = {\n 'pipeline': [],\n 'check_bind_module': [],\n 'model_info': cfg\n }\n\n def get_pipeline_given_modules(self, modules):\n diffusion_model = modules['diffusion_model']\n pipepline_name = self.model_level_info[diffusion_model]['pipeline'][0]\n return self.pipeline_level_modules[pipepline_name]" }, { "identifier": "InferenceUIName", "path": "scepter/studio/inference/inference_ui/component_names.py", "snippet": "class InferenceUIName():\n def __init__(self, language='en'):\n if language == 'en':\n self.advance_block_name = 'Advance Setting'\n self.check_box_for_setting = [\n 'Use Mantra', 'Use Tuners', 'Use Controller'\n ]\n self.diffusion_paras = 'Generation Setting'\n self.mantra_paras = 'Mantra Book'\n self.tuner_paras = 'Tuners'\n self.contrl_paras = 'Controlable Generation'\n self.refine_paras = 'Refiner Setting'\n elif language == 'zh':\n self.advance_block_name = '生成选项'\n self.check_box_for_setting = ['使用咒语', '使用微调', '使用控制']\n self.diffusion_paras = '生成参数设置'\n self.mantra_paras = '咒语书'\n self.tuner_paras = '微调模型'\n self.contrl_paras = '可控生成'\n self.refine_paras = 'Refine设置'" }, { "identifier": "ControlUI", "path": "scepter/studio/inference/inference_ui/control_ui.py", "snippet": "class ControlUI(UIBase):\n def __init__(self, cfg, pipe_manager, is_debug=False, language='en'):\n self.cfg = cfg\n self.pipe_manager = pipe_manager\n\n controlable_anno = cfg.CONTROLABLE_ANNOTATORS\n self.controlable_annotators = {}\n self.control_choices = []\n self.control_defult = None\n for control_anno in controlable_anno:\n self.controlable_annotators[control_anno.TYPE] = {\n 'cfg': control_anno,\n 'device': 'offline',\n 'model': None\n }\n if control_anno.IS_DEFAULT:\n self.control_defult = control_anno.TYPE\n self.control_choices.append(control_anno.TYPE)\n if self.control_defult is None:\n self.control_defult = self.control_choices[0] if len(\n self.control_choices) > 0 else None\n\n default_choices = pipe_manager.module_level_choices\n default_diffusion_model = default_choices['diffusion_model']['default']\n default_pipeline = pipe_manager.model_level_info[\n default_diffusion_model]['pipeline'][0]\n if default_pipeline in default_choices[\n 'controllers'] and self.control_defult is not None:\n self.controller_choices = default_choices['controllers'][\n default_pipeline][self.control_defult]['choices']\n self.controller_default = default_choices['controllers'][\n default_pipeline][self.control_defult]['default']\n else:\n self.controller_choices = []\n self.controller_default = ''\n self.component_names = ControlUIName(language)\n\n def load_annotator(self, annotator):\n if annotator['device'] == 'offline':\n annotator['model'] = ANNOTATORS.build(annotator['cfg'])\n annotator['device'] = 'cpu'\n if annotator['device'] == 'cpu':\n annotator['model'] = annotator['model'].to(we.device_id)\n annotator['device'] = we.device_id\n return annotator\n\n def unload_annotator(self, annotator):\n if not annotator['device'] == 'offline' and not annotator[\n 'device'] == 'cpu':\n annotator['model'] = annotator['model'].to('cpu')\n annotator['device'] = 'cpu'\n return annotator\n\n def create_ui(self, *args, **kwargs):\n gr.Markdown(self.component_names.preprocess)\n with gr.Group():\n with gr.Row():\n with gr.Column(scale=1, min_width=0):\n self.source_image = gr.Image(\n label=self.component_names.source_image,\n type='pil',\n tool='editor',\n interactive=True)\n with gr.Column(scale=1, min_width=0):\n self.cond_image = gr.Image(\n label=self.component_names.cond_image,\n type='pil',\n tool='editor',\n interactive=True)\n with gr.Row():\n with gr.Column(scale=1, min_width=0):\n with gr.Row():\n self.control_mode = gr.Dropdown(\n label=self.component_names.control_preprocessor,\n choices=self.control_choices,\n value=self.control_defult,\n interactive=True)\n self.crop_type = gr.Dropdown(\n label=self.component_names.crop_type,\n choices=['CenterCrop', 'NoCrop'],\n value='CenterCrop',\n interactive=True)\n self.control_model = gr.Dropdown(\n label=self.component_names.control_model,\n choices=self.controller_choices,\n value=self.controller_default,\n interactive=True)\n with gr.Column(scale=1, min_width=0):\n self.cond_button = gr.Button('Extract')\n gr.Markdown(self.component_names.direction)\n\n def set_callbacks(self, model_manage_ui, diffusion_ui):\n def extract_condition(source_image, control_mode, crop_type,\n output_height, output_width):\n if control_mode not in self.controlable_annotators:\n gr.Error(self.component_names.control_err1 + ' ' +\n control_mode)\n annotator = self.controlable_annotators[control_mode]\n annotator = self.load_annotator(annotator)\n if crop_type == 'CenterCrop':\n source_image = TT.Resize(max(output_height,\n output_width))(source_image)\n source_image = TT.CenterCrop(\n (output_height, output_width))(source_image)\n cond_image = annotator['model'](np.array(source_image))\n self.controlable_annotators[control_mode] = self.unload_annotator(\n annotator)\n if cond_image is None:\n gr.Error(self.component_names.control_err2)\n cond_image = Image.fromarray(cond_image)\n return gr.Image(value=cond_image)\n\n self.cond_button.click(extract_condition,\n inputs=[\n self.source_image, self.control_mode,\n self.crop_type, diffusion_ui.output_height,\n diffusion_ui.output_width\n ],\n outputs=[self.cond_image])\n\n def change_control_mode(control_mode, diffusion_model):\n default_choices = self.pipe_manager.module_level_choices\n now_pipeline = self.pipe_manager.model_level_info[diffusion_model][\n 'pipeline'][0]\n if now_pipeline in default_choices[\n 'controllers'] and control_mode in default_choices[\n 'controllers'][now_pipeline]:\n controller_choices = default_choices['controllers'][\n now_pipeline][control_mode]['choices']\n controller_default = default_choices['controllers'][\n now_pipeline][control_mode]['default']\n else:\n controller_choices = []\n controller_default = ''\n return gr.Dropdown(choices=controller_choices,\n value=controller_default)\n\n self.control_mode.change(\n change_control_mode,\n inputs=[self.control_mode, model_manage_ui.diffusion_model],\n outputs=[self.control_model])" }, { "identifier": "DiffusionUI", "path": "scepter/studio/inference/inference_ui/diffusion_ui.py", "snippet": "class DiffusionUI(UIBase):\n def __init__(self, cfg, pipe_manager, is_debug=False, language='en'):\n self.cfg = cfg\n self.pipe_manager = pipe_manager\n\n default_choices = pipe_manager.module_level_choices\n default_diffusion = default_choices['diffusion_model']['default']\n now_pipeline = pipe_manager.model_level_info[default_diffusion][\n 'pipeline'][0]\n\n self.default_resolutions = pipe_manager.pipeline_level_modules[\n now_pipeline].paras.RESOLUTIONS\n self.default_input = pipe_manager.pipeline_level_modules[\n now_pipeline].input\n\n self.diffusion_paras = self.load_all_paras()\n # deal with resolution\n self.h_level_dict = {}\n for hw_tuple in self.diffusion_paras.RESOLUTIONS.get('VALUES', []):\n h, w = hw_tuple\n if h not in self.h_level_dict:\n self.h_level_dict[h] = []\n self.h_level_dict[h].append(w)\n self.component_names = DiffusionUIName(language)\n\n def merge_resolutions(self, ori_h_level_dict, default_resolutions):\n h_level_dict = copy.deepcopy(ori_h_level_dict)\n for res in default_resolutions:\n h, w = res\n if h not in h_level_dict:\n h_level_dict[h] = []\n h_level_dict[h].append(w)\n if len(self.default_resolutions) > 0:\n default_res = default_resolutions[0]\n else:\n default_res = self.diffusion_paras.RESOLUTIONS.DEFAULT\n return h_level_dict, default_res\n\n def get_default(self, ori_diffusion_paras, cur_default):\n diffusion_paras = copy.deepcopy(ori_diffusion_paras)\n for key in diffusion_paras:\n if key.lower() in cur_default:\n diffusion_paras.get(key).DEFAULT = cur_default.get(key.lower())\n value = diffusion_paras.get(key).get('VALUES')\n if value is not None and cur_default.get(\n key.lower()) not in value:\n value.VALUES.append(cur_default.get(key.lower()))\n return diffusion_paras\n\n def load_all_paras(self):\n diffusion_paras = self.cfg.DIFFUSION_PARAS\n return diffusion_paras\n\n def create_ui(self, *args, **kwargs):\n self.cur_paras = self.get_default(self.diffusion_paras,\n self.default_input)\n with gr.Row(equal_height=True):\n self.negative_prompt = gr.Textbox(\n label=self.component_names.negative_prompt,\n show_label=True,\n placeholder=self.component_names.negative_prompt_placeholder,\n info=self.component_names.negative_prompt_description,\n value=self.cur_paras.NEGATIVE_PROMPT.get('DEFAULT', ''),\n lines=2)\n with gr.Row(equal_height=True):\n with gr.Column(scale=1):\n self.prompt_prefix = gr.Textbox(\n label=self.component_names.prompt_prefix,\n value=self.cur_paras.PROMPT_PREFIX.get('DEFAULT', ''),\n interactive=True)\n with gr.Column(scale=2):\n self.sampler = gr.Dropdown(\n label=self.component_names.sample,\n choices=self.cur_paras.SAMPLE.get('VALUES', []),\n value=self.cur_paras.SAMPLE.get('DEFAULT', ''),\n interactive=True)\n with gr.Row(equal_height=True):\n with gr.Column(scale=1):\n self.discretization = gr.Dropdown(\n label=self.component_names.discretization,\n choices=self.cur_paras.DISCRETIZATION.get('VALUES', []),\n value=self.cur_paras.DISCRETIZATION.get('DEFAULT', ''),\n interactive=True)\n self.cur_h_level_dict, default_res = self.merge_resolutions(\n self.h_level_dict, self.default_resolutions)\n with gr.Column(scale=1):\n self.output_height = gr.Dropdown(\n label=self.component_names.resolutions_height,\n choices=[key for key in self.cur_h_level_dict.keys()],\n value=default_res[0],\n interactive=True)\n with gr.Column(scale=1):\n self.output_width = gr.Dropdown(\n label=self.component_names.resolutions_width,\n choices=self.cur_h_level_dict[default_res[0]],\n value=default_res[1],\n interactive=True)\n with gr.Row(equal_height=True):\n self.image_number = gr.Slider(\n label=self.component_names.image_number,\n minimum=self.cur_paras.SAMPLES.get('MIN', 1),\n maximum=self.cur_paras.SAMPLES.get('MAX', 4),\n step=1,\n value=self.cur_paras.SAMPLES.get('DEFAULT', 1),\n interactive=True)\n with gr.Row(equal_height=True):\n self.sample_steps = gr.Slider(\n label=self.component_names.sample_steps,\n minimum=self.cur_paras.SAMPLE_STEPS.get('MIN', 1),\n maximum=self.cur_paras.SAMPLE_STEPS.get('MAX', 100),\n step=1,\n value=self.cur_paras.SAMPLE_STEPS.get('DEFAULT', 30),\n interactive=True)\n\n self.guide_scale = gr.Slider(\n label=self.component_names.guide_scale,\n minimum=self.cur_paras.GUIDE_SCALE.get('MIN', 1),\n maximum=self.cur_paras.GUIDE_SCALE.get('MAX', 10),\n step=0.5,\n value=self.cur_paras.GUIDE_SCALE.get('DEFAULT', 7.5),\n interactive=True)\n self.guide_rescale = gr.Slider(\n label=self.component_names.guide_rescale,\n minimum=self.cur_paras.GUIDE_RESCALE.get('MIN', 1),\n maximum=self.cur_paras.GUIDE_RESCALE.get('MAX', 1.0),\n step=0.1,\n value=self.cur_paras.GUIDE_RESCALE.get('DEFAULT', 0.5),\n interactive=True)\n with gr.Row(equal_height=True):\n with gr.Column(scale=1):\n self.seed_random = gr.Checkbox(\n label=self.component_names.random_seed, value=True)\n with gr.Row(equal_height=True, visible=False) as self.seed_panel:\n with gr.Column(scale=2):\n self.image_seed = gr.Textbox(label=self.component_names.seed,\n value=-1,\n max_lines=1,\n interactive=True)\n with gr.Column(scale=1):\n self.refresh_seed = gr.Button(value=refresh_symbol)\n\n def set_callbacks(self, model_manage_ui):\n def random_checked(r):\n value = -1\n return (gr.Row(visible=not r), gr.Textbox(value=value))\n\n def refresh_seed():\n return random.randint(0, 10**12)\n\n self.seed_random.change(random_checked,\n inputs=[self.seed_random],\n outputs=[self.seed_panel, self.image_seed],\n queue=False,\n show_progress=False)\n self.refresh_seed.click(refresh_seed, outputs=[self.image_seed])\n\n def change_height(h):\n if h not in self.cur_h_level_dict:\n return gr.Dropdown()\n all_choices = self.cur_h_level_dict[h]\n if len(all_choices) > 0:\n default = all_choices[-1]\n else:\n default = -1\n return gr.Dropdown(choices=all_choices, value=default)\n\n self.output_height.change(change_height,\n inputs=[self.output_height],\n outputs=[self.output_width])" }, { "identifier": "GalleryUI", "path": "scepter/studio/inference/inference_ui/gallery_ui.py", "snippet": "class GalleryUI(UIBase):\n def __init__(self, cfg, pipe_manager, is_debug=False, language='en'):\n self.pipe_manager = pipe_manager\n self.component_names = GalleryUIName(language)\n\n def create_ui(self, *args, **kwargs):\n with gr.Group():\n gr.Markdown(value=self.component_names.gallery_block_name)\n with gr.Row(variant='panel', equal_height=True):\n with gr.Column(scale=2, min_width=0,\n visible=False) as self.before_refine_panel:\n self.before_refine_gallery = gr.Gallery(\n label=self.component_names.\n gallery_before_refine_output,\n value=[])\n with gr.Column(scale=2, min_width=0):\n self.output_gallery = gr.Gallery(\n label=self.component_names.gallery_diffusion_output,\n value=[])\n with gr.Row(elem_classes='type_row'):\n with gr.Column(scale=17):\n self.prompt = gr.Textbox(\n show_label=False,\n placeholder=self.component_names.prompt_input,\n elem_id='positive_prompt',\n container=False,\n autofocus=True,\n elem_classes='type_row',\n lines=1)\n\n with gr.Column(scale=3, min_width=0):\n self.generate_button = gr.Button(\n label='Generate',\n value=self.component_names.generate,\n elem_classes='type_row',\n elem_id='generate_button',\n visible=True)\n\n def set_callbacks(self, inference_ui, model_manage_ui, diffusion_ui,\n mantra_ui, tuner_ui, refiner_ui, control_ui):\n def generate_image(\n mantra_state, tuner_state, control_state, diffusion_model,\n first_stage_model, cond_stage_model, refiner_cond_model,\n refiner_diffusion_model, tuner_model, custom_tuner_model,\n control_model, crop_type, control_cond_image, prompt,\n negative_prompt, prompt_prefix, sample, discretization,\n output_height, output_width, image_number, sample_steps,\n guide_scale, guide_rescale, refine_state, refine_strength,\n refine_sampler, refine_discretization, refine_guide_scale,\n refine_guide_rescale, style_template, style_negative_template,\n image_seed):\n current_pipeline = self.pipe_manager.get_pipeline_given_modules({\n 'diffusion_model':\n diffusion_model,\n 'first_stage_model':\n first_stage_model,\n 'cond_stage_model':\n cond_stage_model,\n 'refiner_cond_model':\n refiner_cond_model,\n 'refiner_diffusion_model':\n refiner_diffusion_model\n })\n now_pipeline = self.pipe_manager.model_level_info[diffusion_model][\n 'pipeline'][0]\n used_tuner_model = []\n if not isinstance(tuner_model, list):\n tuner_model = [tuner_model]\n for tuner_m in tuner_model:\n if tuner_m is None or tuner_m == '':\n continue\n if (now_pipeline\n in self.pipe_manager.model_level_info['tuners']\n and tuner_m in self.pipe_manager.\n model_level_info['tuners'][now_pipeline]):\n tuner_m = self.pipe_manager.model_level_info['tuners'][\n now_pipeline][tuner_m]['model_info']\n used_tuner_model.append(tuner_m)\n used_custom_tuner_model = []\n if not isinstance(custom_tuner_model, list):\n custom_tuner_model = [custom_tuner_model]\n for tuner_m in custom_tuner_model:\n if tuner_m is None or tuner_m == '':\n continue\n if (now_pipeline in\n self.pipe_manager.model_level_info['customized_tuners']\n and tuner_m in self.pipe_manager.\n model_level_info['customized_tuners'][now_pipeline]):\n tuner_m = self.pipe_manager.model_level_info[\n 'customized_tuners'][now_pipeline][tuner_m][\n 'model_info']\n used_custom_tuner_model.append(tuner_m)\n\n if (now_pipeline\n in self.pipe_manager.model_level_info['controllers']\n and control_model in self.pipe_manager.\n model_level_info['controllers'][now_pipeline]):\n control_model = self.pipe_manager.model_level_info[\n 'controllers'][now_pipeline][control_model]['model_info']\n\n prompt_rephrased = style_template.replace(\n '{prompt}', prompt\n ) if not style_template == '' and mantra_state else prompt\n prompt_rephrased = f'{prompt_prefix}{prompt_rephrased}' if not prompt_prefix == '' else prompt_rephrased\n negative_prompt_rephrased = negative_prompt + style_negative_template if mantra_state else negative_prompt\n pipeline_input = {\n 'prompt': prompt_rephrased,\n 'negative_prompt': negative_prompt_rephrased,\n 'sample': sample,\n 'sample_steps': sample_steps,\n 'discretization': discretization,\n 'original_size_as_tuple':\n [int(output_height), int(output_width)],\n 'target_size_as_tuple':\n [int(output_height), int(output_width)],\n 'crop_coords_top_left': [0, 0],\n 'guide_scale': guide_scale,\n 'guide_rescale': guide_rescale,\n }\n if refine_state:\n pipeline_input['refine_sampler'] = refine_sampler\n pipeline_input['refine_discretization'] = refine_discretization\n pipeline_input['refine_guide_scale'] = refine_guide_scale\n pipeline_input['refine_guide_rescale'] = refine_guide_rescale\n else:\n refine_strength = 0\n results = current_pipeline(\n pipeline_input,\n num_samples=image_number,\n intermediate_callback=None,\n refine_strength=refine_strength,\n img_to_img_strength=0,\n tuner_model=used_tuner_model +\n used_custom_tuner_model if tuner_state else None,\n control_model=control_model if control_state else None,\n control_cond_image=control_cond_image\n if control_state else None,\n crop_type=crop_type if control_state else None,\n seed=int(image_seed))\n images = []\n before_images = []\n if 'images' in results:\n images_tensor = results['images'] * 255\n images = [\n Image.fromarray(images_tensor[idx].permute(\n 1, 2, 0).cpu().numpy().astype(np.uint8))\n for idx in range(images_tensor.shape[0])\n ]\n if 'before_refine_images' in results and results[\n 'before_refine_images'] is not None:\n before_refine_images_tensor = results[\n 'before_refine_images'] * 255\n before_images = [\n Image.fromarray(before_refine_images_tensor[idx].permute(\n 1, 2, 0).cpu().numpy().astype(np.uint8))\n for idx in range(before_refine_images_tensor.shape[0])\n ]\n if 'seed' in results:\n print(results['seed'])\n print(images, before_images)\n return (\n gr.Column(visible=len(before_images) > 0),\n before_images,\n images,\n )\n\n self.generate_button.click(\n generate_image,\n inputs=[\n inference_ui.mantra_state, inference_ui.tuner_state,\n inference_ui.control_state, model_manage_ui.diffusion_model,\n model_manage_ui.first_stage_model,\n model_manage_ui.cond_stage_model,\n refiner_ui.refiner_cond_model,\n refiner_ui.refiner_diffusion_model, tuner_ui.tuner_model,\n tuner_ui.custom_tuner_model, control_ui.control_model,\n control_ui.crop_type, control_ui.cond_image, self.prompt,\n diffusion_ui.negative_prompt, diffusion_ui.prompt_prefix,\n diffusion_ui.sampler, diffusion_ui.discretization,\n diffusion_ui.output_height, diffusion_ui.output_width,\n diffusion_ui.image_number, diffusion_ui.sample_steps,\n diffusion_ui.guide_scale, diffusion_ui.guide_rescale,\n refiner_ui.refine_state, refiner_ui.refine_strength,\n refiner_ui.refine_sampler, refiner_ui.refine_discretization,\n refiner_ui.refine_guide_scale, refiner_ui.refine_guide_rescale,\n mantra_ui.style_template, mantra_ui.style_negative_template,\n diffusion_ui.image_seed\n ],\n outputs=[\n self.before_refine_panel, self.before_refine_gallery,\n self.output_gallery\n ])" }, { "identifier": "MantraUI", "path": "scepter/studio/inference/inference_ui/mantra_ui.py", "snippet": "class MantraUI(UIBase):\n def __init__(self, cfg, pipe_manager, is_debug=False, language='en'):\n self.cfg = cfg\n self.language = language\n self.pipe_manager = pipe_manager\n default_choices = pipe_manager.module_level_choices\n default_diffusion_model = default_choices['diffusion_model']['default']\n self.default_pipeline = pipe_manager.model_level_info[\n default_diffusion_model]['pipeline'][0]\n self.cfg_mantra = cfg.MANTRAS\n self.name_level_style, self.all_styles = self.load_all_styles()\n self.component_names = MantraUIName(language)\n\n def load_all_styles(self):\n all_styles = {}\n name_level_style = {}\n for one_style in tqdm(self.cfg_mantra):\n if one_style.BASE_MODEL not in name_level_style:\n name_level_style[one_style.BASE_MODEL] = {}\n if one_style.BASE_MODEL not in all_styles:\n all_styles[one_style.BASE_MODEL] = []\n if self.language == 'zh':\n name_level_style[one_style.BASE_MODEL][\n one_style.NAME_ZH] = one_style\n all_styles[one_style.BASE_MODEL].append(one_style.NAME_ZH)\n else:\n name_level_style[one_style.BASE_MODEL][\n one_style.NAME] = one_style\n all_styles[one_style.BASE_MODEL].append(one_style.NAME)\n # if one_style.get('IMAGE_PATH', None):\n # one_style.IMAGE_PATH = FS.get_from(one_style.IMAGE_PATH)\n return name_level_style, all_styles\n\n def create_ui(self, *args, **kwargs):\n with gr.Row(equal_height=True):\n with gr.Column(scale=1):\n with gr.Group(visible=True):\n with gr.Row(equal_height=True):\n self.style = gr.Dropdown(\n label=self.component_names.mantra_styles,\n choices=self.all_styles[self.default_pipeline],\n value=None,\n multiselect=True,\n interactive=True)\n with gr.Row(equal_height=True):\n with gr.Column(scale=1):\n self.style_name = gr.Text(\n value='',\n label=self.component_names.style_name)\n with gr.Column(scale=1):\n self.style_source = gr.Text(\n value='',\n label=self.component_names.style_source)\n with gr.Column(scale=1):\n self.style_desc = gr.Text(\n value='',\n label=self.component_names.style_desc)\n with gr.Row(equal_height=True):\n self.style_prompt = gr.Text(\n value='',\n label=self.component_names.style_prompt,\n lines=4)\n with gr.Row(equal_height=True):\n self.style_negative_prompt = gr.Text(\n value='',\n label=self.component_names.style_negative_prompt,\n lines=4)\n with gr.Column(scale=1):\n with gr.Group(visible=True):\n with gr.Row(equal_height=True):\n self.style_template = gr.Text(\n value='',\n label=self.component_names.style_template,\n lines=2)\n with gr.Row(equal_height=True):\n self.style_negative_template = gr.Text(\n value='',\n label=self.component_names.style_negative_template,\n lines=2)\n with gr.Row(equal_height=True):\n self.style_example = gr.Image(\n label=self.component_names.style_example,\n source='upload',\n value=None,\n interactive=False)\n with gr.Row(equal_height=True):\n self.style_example_prompt = gr.Text(\n value='',\n label=self.component_names.style_example_prompt,\n lines=2)\n\n def set_callbacks(self, model_manage_ui):\n def change_style(style, diffusion_model):\n style_template = ''\n style_negative_template = []\n if len(style) > 0:\n style_name = style[-1]\n diffusion_model_info = self.pipe_manager.model_level_info[\n diffusion_model]\n now_pipeline = diffusion_model_info['pipeline'][0]\n style_info = self.name_level_style[now_pipeline].get(\n style_name, {})\n for st in style:\n c_style_info = self.name_level_style[now_pipeline].get(\n st, {})\n c_prompt = c_style_info.get('PROMPT', '')\n c_negative_prompt = c_style_info.get('NEGATIVE_PROMPT', '')\n if style_template == '':\n style_template = c_prompt\n elif '{prompt}' in style_template:\n if '{prompt}' in c_prompt:\n style_template = style_template.replace(\n '{prompt}', c_prompt)\n else:\n style_template += c_prompt\n style_negative_template.append(c_negative_prompt)\n else:\n style_name = ''\n style_info = {}\n style_negative_template = ','.join(style_negative_template)\n if style_info.get(\n 'IMAGE_PATH',\n None) and not os.path.exists(style_info.IMAGE_PATH):\n style_info.IMAGE_PATH = FS.get_from(style_info.IMAGE_PATH)\n return (gr.Text(value=style_name),\n gr.Text(value=style_info.get('SOURCE', '')),\n gr.Text(value=style_info.get('PROMPT', '')),\n gr.Text(value=style_info.get('NEGATIVE_PROMPT', '')),\n gr.Text(value=style_template),\n gr.Text(value=style_negative_template),\n gr.Image(value=style_info.get('IMAGE_PATH', None)),\n gr.Text(value=style_info.get('PROMPT_EXAMPLE', '')))\n\n self.style.change(change_style,\n inputs=[self.style, model_manage_ui.diffusion_model],\n outputs=[\n self.style_name, self.style_source,\n self.style_prompt, self.style_negative_prompt,\n self.style_template,\n self.style_negative_template, self.style_example,\n self.style_example_prompt\n ])" }, { "identifier": "ModelManageUI", "path": "scepter/studio/inference/inference_ui/model_manage_ui.py", "snippet": "class ModelManageUI(UIBase):\n def __init__(self, cfg, pipe_manager, is_debug=False, language='en'):\n self.pipe_manager = pipe_manager\n self.default_choices = pipe_manager.module_level_choices\n self.component_names = ModelManageUIName(language)\n\n def create_ui(self, *args, **kwargs):\n with gr.Group():\n gr.Markdown(value=self.component_names.model_block_name)\n with gr.Row(variant='panel', equal_height=True):\n with gr.Column(scale=1, min_width=0) as self.diffusion_panel:\n self.diffusion_model = gr.Dropdown(\n label=self.component_names.diffusion_model,\n choices=self.default_choices['diffusion_model']\n ['choices'],\n value=self.default_choices['diffusion_model']\n ['default'],\n interactive=True)\n with gr.Column(scale=1, min_width=0) as self.first_stage_panel:\n self.first_stage_model = gr.Dropdown(\n label=self.component_names.first_stage_model,\n choices=self.default_choices['first_stage_model']\n ['choices'],\n value=self.default_choices['first_stage_model']\n ['default'],\n interactive=False)\n with gr.Column(scale=1, min_width=0) as self.cond_stage_panel:\n self.cond_stage_model = gr.Dropdown(\n label=self.component_names.cond_stage_model,\n choices=self.default_choices['cond_stage_model']\n ['choices'],\n value=self.default_choices['cond_stage_model']\n ['default'],\n interactive=False)\n # with gr.Accordion(\n # label=self.component_names.postprocess_model_name,\n # open=False):\n # with gr.Row(equal_height=True):\n # self.advance_postprocess_checkbox = gr.CheckboxGroup(\n # # choices=['Refiners', 'Tuners'], show_label=False)\n # choices=['Tuners'], show_label=False)\n # with gr.Row(equal_height=True,\n # visible=False) as self.refine_diffusion_panel:\n # with gr.Column(variant='panel', scale=1, min_width=0):\n # self.refiner_diffusion_model = gr.Dropdown(\n # label=self.component_names.refine_diffusion_model,\n # choices=self.default_choices[\n # 'refiner_diffusion_model']['choices'],\n # value=self.default_choices[\n # 'refiner_diffusion_model']['default'],\n # interactive=True)\n # with gr.Column(variant='panel', scale=1, min_width=0):\n # self.refiner_cond_model = gr.Dropdown(\n # label=self.component_names.refine_cond_model,\n # choices=self.default_choices['refiner_cond_model']\n # ['choices'],\n # value=self.default_choices['refiner_cond_model']\n # ['default'],\n # interactive=True)\n # with gr.Column(variant='panel', scale=1, min_width=0):\n # self.tuner_button = gr.Button(value=refresh_symbol)\n #\n # def refresh_choices():\n # return gr.update(choices=get_tuner_choices())\n #\n # self.tuner_button.click(refresh_choices, [],\n # [self.tuner_model])\n # with gr.Column(variant='panel', scale=4, min_width=0):\n # with gr.Group() as self.tuners_group:\n # with gr.Row(variant='panel') as self.tuners_panel:\n # with gr.Column(\n # scale=1,\n # min_width=0) as self.tuners_management:\n # self.load_Lora_tuner_btn = gr.Button(\n # value=self.component_names.\n # load_lora_tuner)\n # self.load_swift_tuner_btn = gr.Button(\n # value=self.component_names.\n # load_swift_tuner)\n # with gr.Column(scale=1,\n # min_width=0) as self.load_panel:\n # self.tuner_name = gr.Text(\n # label='tuner_name')\n # with gr.Row(variant='panel') as self.tuner_info:\n # with gr.Accordion(label=self.component_names.\n # postprocess_model_name,\n # open=False):\n # self.tuner_name = gr.Text(\n # label='tuner_name')\n\n def set_callbacks(self, diffusion_ui, tuner_ui, control_ui, mantra_ui):\n # def select_refine_tuner(all_select, evt: gr.SelectData):\n # if 'Refiners' in all_select:\n # refine_panel = gr.Row(visible=True)\n # refine_tab = gr.Group(visible=True)\n # refine_state = True\n # else:\n # refine_panel = gr.Row(visible=False)\n # refine_tab = gr.Group(visible=False)\n # refine_state = False\n # # if 'Tuners' in all_select:\n # # tuner_panel = gr.Row(visible=True)\n # # else:\n # # tuner_panel = gr.Row(visible=False)\n # return refine_panel, refine_tab, refine_state\n #\n # self.advance_postprocess_checkbox.select(\n # select_refine_tuner,\n # inputs=[self.advance_postprocess_checkbox],\n # outputs=[\n # self.refine_diffusion_panel, self.tuner_choice_panel,\n # advance_ui.refine_tab, advance_ui.refine_state\n # ])\n def diffusion_model_change(diffusion_model, control_mode):\n diffusion_model_info = self.pipe_manager.model_level_info[\n diffusion_model]\n now_pipeline = diffusion_model_info['pipeline'][0]\n pipeline_ins = self.pipe_manager.pipeline_level_modules[\n now_pipeline]\n all_module_name = {}\n for module_name in self.pipe_manager.module_list:\n module = getattr(pipeline_ins, module_name)\n if module is None:\n continue\n model_name = f\"{now_pipeline}_{module['name']}\"\n all_module_name[module_name] = model_name\n if now_pipeline in self.default_choices['tuners']:\n tunner_choices = self.default_choices['tuners'][now_pipeline][\n 'choices']\n else:\n tunner_choices = []\n\n if now_pipeline in self.default_choices[\n 'controllers'] and control_mode in self.default_choices[\n 'controllers'][now_pipeline]:\n controller_choices = self.default_choices['controllers'][\n now_pipeline][control_mode]['choices']\n controller_default = self.default_choices['controllers'][\n now_pipeline][control_mode]['default']\n else:\n controller_choices = []\n controller_default = ''\n\n default_resolutions = self.pipe_manager.pipeline_level_modules[\n now_pipeline].paras.RESOLUTIONS\n h_level_dict, default_res = diffusion_ui.merge_resolutions(\n diffusion_ui.h_level_dict, default_resolutions)\n diffusion_ui.cur_h_level_dict = h_level_dict\n\n default_input = self.pipe_manager.pipeline_level_modules[\n now_pipeline].input\n cur_paras = diffusion_ui.get_default(diffusion_ui.diffusion_paras,\n default_input)\n diffusion_ui.cur_paras = cur_paras\n return (\n gr.Dropdown(value=all_module_name['first_stage_model']),\n gr.Dropdown(value=all_module_name['cond_stage_model']),\n gr.Dropdown(choices=tunner_choices, value=None),\n gr.Dropdown(choices=controller_choices,\n value=controller_default),\n gr.Dropdown(choices=mantra_ui.all_styles[now_pipeline],\n value=[]),\n gr.Textbox(choices=cur_paras.NEGATIVE_PROMPT.get('VALUES', []),\n value=cur_paras.NEGATIVE_PROMPT.get('DEFAULT', '')),\n gr.Textbox(choices=cur_paras.PROMPT_PREFIX.get('VALUES', []),\n value=cur_paras.PROMPT_PREFIX.get('DEFAULT', '')),\n gr.Dropdown(choices=[key for key in h_level_dict.keys()],\n value=default_res[0]),\n gr.Dropdown(choices=cur_paras.SAMPLE.get('VALUES', []),\n value=cur_paras.SAMPLE.get('DEFAULT', '')),\n gr.Dropdown(choices=cur_paras.DISCRETIZATION.get('VALUES', []),\n value=cur_paras.DISCRETIZATION.get('DEFAULT', '')),\n gr.Slider(value=cur_paras.SAMPLE_STEPS.get('DEFAULT', 30)),\n gr.Slider(value=cur_paras.GUIDE_SCALE.get('DEFAULT', 7.5)),\n gr.Slider(value=cur_paras.GUIDE_RESCALE.get('DEFAULT', 0.5)))\n\n self.diffusion_model.change(\n diffusion_model_change,\n inputs=[self.diffusion_model, control_ui.control_mode],\n outputs=[\n self.first_stage_model, self.cond_stage_model,\n tuner_ui.tuner_model, control_ui.control_model,\n mantra_ui.style, diffusion_ui.negative_prompt,\n diffusion_ui.prompt_prefix, diffusion_ui.output_height,\n diffusion_ui.sampler, diffusion_ui.discretization,\n diffusion_ui.sample_steps, diffusion_ui.guide_scale,\n diffusion_ui.guide_rescale\n ])" }, { "identifier": "RefinerUI", "path": "scepter/studio/inference/inference_ui/refiner_ui.py", "snippet": "class RefinerUI(UIBase):\n def __init__(self, cfg, pipe_manager, is_debug=False, language='en'):\n self.cfg = cfg\n self.pipe_manager = pipe_manager\n self.diffusion_paras = self.load_all_paras()\n self.component_names = RefinerUIName(language)\n\n def load_all_paras(self):\n diffusion_paras = self.cfg.DIFFUSION_PARAS\n return diffusion_paras\n\n def create_ui(self, *args, **kwargs):\n self.refine_state = gr.State(value=False)\n with gr.Group(visible=False) as self.refine_tab:\n with gr.Row(equal_height=True):\n with gr.Column(variant='panel', scale=1, min_width=0):\n self.refiner_diffusion_model = gr.Dropdown(\n label=self.component_names.refine_diffusion_model,\n choices=[],\n value=None,\n interactive=True)\n with gr.Column(variant='panel', scale=1, min_width=0):\n self.refiner_cond_model = gr.Dropdown(\n label=self.component_names.refine_cond_model,\n choices=[],\n value=None,\n interactive=True)\n with gr.Row(equal_height=True):\n self.refine_strength = gr.Slider(\n label=self.component_names.refine_strength,\n minimum=self.diffusion_paras.REFINE_STRENGTH.get(\n 'MIN', 0.0),\n maximum=self.diffusion_paras.REFINE_STRENGTH.get(\n 'MAX', 1.0),\n step=0.05,\n value=self.diffusion_paras.REFINE_STRENGTH.get(\n 'DEFAULT', 7.5),\n interactive=True)\n with gr.Row(equal_height=True):\n with gr.Column(scale=1):\n self.refine_sampler = gr.Dropdown(\n label=self.component_names.refine_sample,\n choices=self.diffusion_paras.REFINE_SAMPLERS.get(\n 'VALUES', []),\n value=self.diffusion_paras.REFINE_SAMPLERS.get(\n 'DEFAULT', ''),\n interactive=True)\n with gr.Column(scale=1):\n self.refine_discretization = gr.Dropdown(\n label=self.component_names.refine_discretization,\n choices=self.diffusion_paras.REFINE_DISCRETIZATION.get(\n 'VALUES', []),\n value=self.diffusion_paras.REFINE_DISCRETIZATION.get(\n 'DEFAULT', ''),\n interactive=True)\n with gr.Row(equal_height=True):\n with gr.Column(scale=1):\n self.refine_guide_scale = gr.Slider(\n label=self.component_names.refine_guide_scale,\n minimum=self.diffusion_paras.REFINE_GUIDE_SCALE.get(\n 'MIN', 1),\n maximum=self.diffusion_paras.REFINE_GUIDE_SCALE.get(\n 'MAX', 10),\n step=0.5,\n value=self.diffusion_paras.REFINE_GUIDE_SCALE.get(\n 'DEFAULT', 7.5),\n interactive=True)\n with gr.Column(scale=1):\n self.refine_guide_rescale = gr.Slider(\n label=self.component_names.refine_guide_rescale,\n minimum=self.diffusion_paras.REFINE_GUIDE_RESCALE.get(\n 'MIN', 1),\n maximum=self.diffusion_paras.REFINE_GUIDE_RESCALE.get(\n 'MAX', 1.0),\n step=0.1,\n value=self.diffusion_paras.REFINE_GUIDE_RESCALE.get(\n 'DEFAULT', 0.5),\n interactive=True)\n\n def set_callbacks(self):\n pass" }, { "identifier": "TunerUI", "path": "scepter/studio/inference/inference_ui/tuner_ui.py", "snippet": "class TunerUI(UIBase):\n def __init__(self, cfg, pipe_manager, is_debug=False, language='en'):\n self.cfg = cfg\n self.pipe_manager = pipe_manager\n self.default_choices = pipe_manager.module_level_choices\n default_diffusion_model = self.default_choices['diffusion_model'][\n 'default']\n self.default_pipeline = pipe_manager.model_level_info[\n default_diffusion_model]['pipeline'][0]\n if self.default_pipeline in self.default_choices['tuners']:\n self.tunner_choices = self.default_choices['tuners'][\n self.default_pipeline]['choices']\n self.tunner_default = self.default_choices['tuners'][\n self.default_pipeline]['default']\n else:\n self.tunner_choices = []\n\n self.tunner_default = None\n self.component_names = TunerUIName(language)\n self.cfg_tuners = cfg.TUNERS\n self.name_level_tuners = {}\n for one_tuner in tqdm(self.cfg_tuners):\n if one_tuner.BASE_MODEL not in self.name_level_tuners:\n self.name_level_tuners[one_tuner.BASE_MODEL] = {}\n # if one_tuner.get('IMAGE_PATH', None):\n # one_tuner.IMAGE_PATH = FS.get_from(one_tuner.IMAGE_PATH)\n if language == 'zh':\n self.name_level_tuners[one_tuner.BASE_MODEL][\n one_tuner.NAME_ZH] = one_tuner\n else:\n self.name_level_tuners[one_tuner.BASE_MODEL][\n one_tuner.NAME] = one_tuner\n\n def create_ui(self, *args, **kwargs):\n with gr.Row(equal_height=True):\n with gr.Column(variant='panel', scale=1, min_width=0):\n with gr.Group(visible=True):\n with gr.Row(equal_height=True):\n with gr.Column(scale=1):\n self.tuner_model = gr.Dropdown(\n label=self.component_names.tuner_model,\n choices=self.tunner_choices,\n value=None,\n multiselect=True,\n interactive=True)\n with gr.Column(scale=1):\n self.custom_tuner_model = gr.Dropdown(\n label=self.component_names.custom_tuner_model,\n choices=[],\n value=None,\n multiselect=True,\n interactive=True)\n with gr.Row(equal_height=True):\n with gr.Column(scale=1):\n self.tuner_type = gr.Text(\n value='',\n label=self.component_names.tuner_type)\n with gr.Column(scale=1):\n self.base_model = gr.Text(\n value='',\n label=self.component_names.base_model)\n with gr.Column(scale=1):\n self.tuner_desc = gr.Text(\n value='',\n label=self.component_names.tuner_desc,\n lines=4)\n with gr.Column(variant='panel', scale=1, min_width=0):\n with gr.Group(visible=True):\n with gr.Row(equal_height=True):\n self.tuner_example = gr.Image(\n label=self.component_names.tuner_example,\n source='upload',\n value=None,\n interactive=False)\n with gr.Row(equal_height=True):\n self.tuner_prompt_example = gr.Text(\n value='',\n label=self.component_names.tuner_prompt_example,\n lines=2)\n\n def set_callbacks(self, model_manage_ui):\n def tuner_model_change(tuner_model, diffusion_model):\n diffusion_model_info = self.pipe_manager.model_level_info[\n diffusion_model]\n now_pipeline = diffusion_model_info['pipeline'][0]\n tuner_info = {}\n if tuner_model is not None and len(tuner_model) > 0:\n tuner_info = self.name_level_tuners[now_pipeline].get(\n tuner_model[-1], {})\n if tuner_info.get(\n 'IMAGE_PATH',\n None) and not os.path.exists(tuner_info.IMAGE_PATH):\n tuner_info.IMAGE_PATH = FS.get_from(tuner_info.IMAGE_PATH)\n return (gr.Text(value=tuner_info.get('TUNER_TYPE', '')),\n gr.Text(value=tuner_info.get('BASE_MODEL', '')),\n gr.Text(value=tuner_info.get('DESCRIPTION', '')),\n gr.Image(value=tuner_info.get('IMAGE_PATH', None)),\n gr.Text(value=tuner_info.get('PROMPT_EXAMPLE', '')))\n\n self.tuner_model.change(\n tuner_model_change,\n inputs=[self.tuner_model, model_manage_ui.diffusion_model],\n outputs=[\n self.tuner_type, self.base_model, self.tuner_desc,\n self.tuner_example, self.tuner_prompt_example\n ])" }, { "identifier": "init_env", "path": "scepter/studio/utils/env.py", "snippet": "def init_env(cfg_general):\n work_dir = cfg_general.WORK_DIR\n file_system = cfg_general.get('FILE_SYSTEM', None)\n if file_system is not None:\n if isinstance(file_system, list):\n for file_sys in file_system:\n _prefix = FS.init_fs_client(file_sys)\n elif file_system is not None:\n _prefix = FS.init_fs_client(file_system) # noqa\n is_flag = FS.make_dir(work_dir)\n assert is_flag\n return cfg_general" } ]
import os import gradio as gr import scepter from glob import glob from scepter.modules.utils.config import Config from scepter.modules.utils.file_system import FS from scepter.studio.inference.inference_manager.infer_runer import \ PipelineManager from scepter.studio.inference.inference_ui.component_names import \ InferenceUIName from scepter.studio.inference.inference_ui.control_ui import ControlUI from scepter.studio.inference.inference_ui.diffusion_ui import DiffusionUI from scepter.studio.inference.inference_ui.gallery_ui import GalleryUI from scepter.studio.inference.inference_ui.mantra_ui import MantraUI from scepter.studio.inference.inference_ui.model_manage_ui import ModelManageUI from scepter.studio.inference.inference_ui.refiner_ui import RefinerUI from scepter.studio.inference.inference_ui.tuner_ui import TunerUI from scepter.studio.utils.env import init_env
16,268
# -*- coding: utf-8 -*- class InferenceUI(): def __init__(self, cfg_general_file, is_debug=False, language='en', root_work_dir='./'): config_dir = os.path.dirname(cfg_general_file) cfg_general = Config(cfg_file=cfg_general_file) cfg_general.WORK_DIR = os.path.join(root_work_dir, cfg_general.WORK_DIR) if not FS.exists(cfg_general.WORK_DIR): FS.make_dir(cfg_general.WORK_DIR) cfg_general = init_env(cfg_general) # official mantra mantra_book = Config( cfg_file=os.path.join(os.path.dirname(scepter.dirname), cfg_general.EXTENSION_PARAS.MANTRA_BOOK)) cfg_general.MANTRAS = mantra_book.MANTRAS # official tuners official_tuners = Config( cfg_file=os.path.join(os.path.dirname(scepter.dirname), cfg_general.EXTENSION_PARAS.OFFICIAL_TUNERS)) cfg_general.TUNERS = official_tuners.TUNERS official_controllers = Config(cfg_file=os.path.join( os.path.dirname(scepter.dirname), cfg_general.EXTENSION_PARAS.OFFICIAL_CONTROLLERS)) cfg_general.CONTROLLERS = official_controllers.CONTROLLERS pipe_manager = PipelineManager() config_list = glob(os.path.join(config_dir, '*/*_pro.yaml'), recursive=True) for config_file in config_list: pipe_manager.register_pipeline(Config(cfg_file=config_file)) for one_tuner in cfg_general.TUNERS: pipe_manager.register_tuner( one_tuner, name=one_tuner.NAME_ZH if language == 'zh' else one_tuner.NAME) for one_controller in cfg_general.CONTROLLERS: pipe_manager.register_controllers(one_controller) self.model_manage_ui = ModelManageUI(cfg_general, pipe_manager, is_debug=is_debug, language=language)
# -*- coding: utf-8 -*- class InferenceUI(): def __init__(self, cfg_general_file, is_debug=False, language='en', root_work_dir='./'): config_dir = os.path.dirname(cfg_general_file) cfg_general = Config(cfg_file=cfg_general_file) cfg_general.WORK_DIR = os.path.join(root_work_dir, cfg_general.WORK_DIR) if not FS.exists(cfg_general.WORK_DIR): FS.make_dir(cfg_general.WORK_DIR) cfg_general = init_env(cfg_general) # official mantra mantra_book = Config( cfg_file=os.path.join(os.path.dirname(scepter.dirname), cfg_general.EXTENSION_PARAS.MANTRA_BOOK)) cfg_general.MANTRAS = mantra_book.MANTRAS # official tuners official_tuners = Config( cfg_file=os.path.join(os.path.dirname(scepter.dirname), cfg_general.EXTENSION_PARAS.OFFICIAL_TUNERS)) cfg_general.TUNERS = official_tuners.TUNERS official_controllers = Config(cfg_file=os.path.join( os.path.dirname(scepter.dirname), cfg_general.EXTENSION_PARAS.OFFICIAL_CONTROLLERS)) cfg_general.CONTROLLERS = official_controllers.CONTROLLERS pipe_manager = PipelineManager() config_list = glob(os.path.join(config_dir, '*/*_pro.yaml'), recursive=True) for config_file in config_list: pipe_manager.register_pipeline(Config(cfg_file=config_file)) for one_tuner in cfg_general.TUNERS: pipe_manager.register_tuner( one_tuner, name=one_tuner.NAME_ZH if language == 'zh' else one_tuner.NAME) for one_controller in cfg_general.CONTROLLERS: pipe_manager.register_controllers(one_controller) self.model_manage_ui = ModelManageUI(cfg_general, pipe_manager, is_debug=is_debug, language=language)
self.gallery_ui = GalleryUI(cfg_general,
6
2023-12-21 02:01:48+00:00
24k
Ruiyuan-Zhang/CCS
multi_part_assembly/utils/wx_transformer_utilities/transformer_layer.py
[ { "identifier": "LayerNorm", "path": "multi_part_assembly/utils/wx_transformer_utilities/layer_norm.py", "snippet": "def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False):\n if not export and torch.cuda.is_available() and has_fused_layernorm:\n return FusedLayerNorm(normalized_shape, eps, elementwise_affine)\n return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine)" }, { "identifier": "MultiheadAttention", "path": "multi_part_assembly/utils/wx_transformer_utilities/multihead_attention.py", "snippet": "class MultiheadAttention(nn.Module):\n \"\"\"Multi-headed attention.\n\n See \"Attention Is All You Need\" for more details.\n \"\"\"\n\n def __init__(\n self,\n embed_dim,\n num_heads,\n kdim=None,\n vdim=None,\n dropout=0.0,\n bias=True,\n add_bias_kv=False,\n add_zero_attn=False,\n self_attention=False,\n encoder_decoder_attention=False,\n q_noise=0.0,\n qn_block_size=8,\n nblocks=1,\n top_k_ratio=None,\n use_value_competition=True,\n shared_memory_attention = False,\n use_topk = False,\n topk = 3,\n num_steps = 5,\n mem_slots = 4,\n null_attention = False,\n regressive = False\n ):\n super().__init__()\n self.embed_dim = embed_dim\n self.kdim = kdim if kdim is not None else embed_dim\n self.vdim = vdim if vdim is not None else embed_dim\n self.qkv_same_dim = self.kdim == embed_dim and self.vdim == embed_dim\n\n self.num_heads = num_heads\n self.dropout_module = FairseqDropout(\n dropout, module_name=self.__class__.__name__\n )\n\n self.head_dim = embed_dim // num_heads\n self.shared_memory_attention = shared_memory_attention\n\n print('total heads', self.num_heads)\n print('head dim', self.head_dim)\n\n self.use_topk = use_topk\n self.topk = topk\n\n print('use topk?' + str(self.use_topk))\n print('topk:'+str(self.topk))\n\n assert (\n self.head_dim * num_heads == self.embed_dim\n ), \"embed_dim must be divisible by num_heads\"\n self.scaling = self.head_dim ** -0.5\n\n self.self_attention = self_attention\n self.encoder_decoder_attention = encoder_decoder_attention\n\n assert not self.self_attention or self.qkv_same_dim, (\n \"Self-attention requires query, key and \" \"value to be of the same size\"\n )\n if not self.shared_memory_attention: # 这里的共享memory_attention是什么内容呢?表示的是不在不同的layer之间共享memory吗?\n self.k_proj = quant_noise(GroupLinearLayer(self.kdim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)\n self.v_proj = quant_noise(GroupLinearLayer(self.vdim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)\n self.q_proj = quant_noise(GroupLinearLayer(embed_dim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)\n self.out_proj = quant_noise(GroupLinearLayer(embed_dim//nblocks, embed_dim//nblocks, nblocks, bias=bias), q_noise, qn_block_size)\n\n if add_bias_kv:\n self.bias_k = Parameter(torch.Tensor(1, 1, embed_dim))\n self.bias_v = Parameter(torch.Tensor(1, 1, embed_dim))\n if self.shared_memory_attention:\n self.bias_k_memory = Parameter(torch.Tensor(1, 1, embed_dim))\n self.bias_v_memory = Parameter(torch.Tensor(1, 1, embed_dim))\n else:\n self.bias_k = self.bias_v = None\n self.bias_k_memory = self.bias_v_memory = None\n\n self.add_zero_attn = add_zero_attn\n\n self.reset_parameters()\n\n self.onnx_trace = False\n self.tpu = False\n\n # 这里表示,如果共享memory_attention的话\n if self.shared_memory_attention:\n print('MEM SLOTS:' + str(mem_slots))\n print('Null attention:' + str(null_attention))\n print('USING SHARED MEMORY ATTENTION +++++++++')\n #self.num_heads = 1\n self.regressive = regressive\n if not regressive: \n self.relational_memory = RelationalMemory(\n mem_slots=mem_slots,\n head_size=self.head_dim , #128\n input_size=embed_dim,\n output_size=embed_dim,\n num_heads=self.num_heads, #1\n num_blocks=1,\n forget_bias=1,\n input_bias=0,\n gate_style=\"unit\",\n attention_mlp_layers=1,\n key_size=32,\n return_all_outputs=False,\n use_topk = self.use_topk,\n topk = self.topk,\n num_steps = num_steps, \n null_attention = null_attention\n )\n else:\n print('USING AUTO REGRESSIVE')\n self.relational_memory = RelationalMemoryRegressive(\n mem_slots=mem_slots,\n head_size=self.head_dim ,\n input_size=embed_dim,\n output_size=embed_dim,\n num_heads=self.num_heads,\n num_blocks=1,\n forget_bias=1,\n input_bias=0,\n gate_style=\"unit\",\n attention_mlp_layers=4,\n key_size=32,\n return_all_outputs=False,\n use_topk = self.use_topk,\n topk = self.topk,\n num_steps = num_steps,\n null_attention = False\n )\n self.memory_size = 128 #self.head_dim * self.num_heads\n '''\n self.mem_att = MHAMemory(\n n_head=4,\n d_model_read=embed_dim,\n d_model_write=self.memory_size,\n d_model_out=embed_dim,\n d_k=32,\n d_v=32,\n grad_sparse=False,\n )\n '''\n self.memory = None # 因为要共享self.memory,所以这里是为了占个位置\n\n def prepare_for_onnx_export_(self):\n self.onnx_trace = True\n\n def prepare_for_tpu_(self, **kwargs):\n self.tpu = True\n\n def reset_parameters(self):\n if self.qkv_same_dim:\n # Empirically observed the convergence to be much better with\n # the scaled initialization\n nn.init.xavier_uniform_(self.k_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.v_proj.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.q_proj.weight, gain=1 / math.sqrt(2))\n if self.shared_memory_attention:\n nn.init.xavier_uniform_(self.k_proj_memory.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.v_proj_memory.weight, gain=1 / math.sqrt(2))\n nn.init.xavier_uniform_(self.q_proj_memory.weight, gain=1 / math.sqrt(2))\n\n else:\n nn.init.xavier_uniform_(self.k_proj.weight)\n nn.init.xavier_uniform_(self.v_proj.weight)\n nn.init.xavier_uniform_(self.q_proj.weight)\n\n #if self.shared_memory_attention:\n # nn.init.xavier_uniform_(self.k_proj_memory.weight)\n # nn.init.xavier_uniform_(self.v_proj_memory.weight)\n # nn.init.xavier_uniform_(self.q_proj_memory.weight)\n\n nn.init.xavier_uniform_(self.out_proj.weight)\n #if self.shared_memory_attention:\n # nn.init.xavier_uniform_(self.out_proj_memory.weight)\n \n if self.out_proj.bias is not None:\n nn.init.constant_(self.out_proj.bias, 0.)\n\n #if self.shared_memory_attention and self.out_proj_memory.bias is not None:\n # nn.init.constant_(self.out_proj.bias, 0.)\n \n if self.bias_k is not None:\n nn.init.xavier_normal_(self.bias_k)\n if self.bias_v is not None:\n nn.init.xavier_normal_(self.bias_v)\n\n #if self.shared_memory_attention:\n # if self.bias_k is not None:\n # nn.init.xavier_normal_(self.bias_k_memory)\n # if self.bias_v is not None:\n # nn.init.xavier_normal_(self.bias_v_memory)\n\n\n def forward(\n self,\n query,\n key: Optional[Tensor],\n value: Optional[Tensor],\n key_padding_mask: Optional[Tensor] = None,\n incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]] = None,\n need_weights: bool = True,\n static_kv: bool = False,\n attn_mask: Optional[Tensor] = None,\n before_softmax: bool = False,\n need_head_weights: bool = False,\n comp = None,\n memory = None\n ) -> Tuple[Tensor, Optional[Tensor]]:\n \"\"\"Input shape: Time x Batch x Channel\n\n Args:\n key_padding_mask (ByteTensor, optional): mask to exclude\n keys that are pads, of shape `(batch, src_len)`, where\n padding elements are indicated by 1s.\n need_weights (bool, optional): return the attention weights,\n averaged over heads (default: False).\n attn_mask (ByteTensor, optional): typically used to\n implement causal attention, where the mask prevents the\n attention from looking forward in time (default: None).\n before_softmax (bool, optional): return the raw attention\n weights and values before the attention softmax.\n need_head_weights (bool, optional): return the attention\n weights for each head. Implies *need_weights*. Default:\n return the average attention weights over all heads.\n \"\"\"\n if need_head_weights:\n need_weights = True\n\n tgt_len, bsz, embed_dim = query.size()\n assert embed_dim == self.embed_dim\n assert list(query.size()) == [tgt_len, bsz, embed_dim]\n\n if (\n not self.onnx_trace\n and not self.tpu # don't use PyTorch version on TPUs\n and incremental_state is None\n and not static_kv\n # A workaround for quantization to work. Otherwise JIT compilation\n # treats bias in linear module as method.\n and not torch.jit.is_scripting()\n and False\n ):\n assert key is not None and value is not None\n if self.shared_memory_attention:\n memory,_ = F.multi_head_attention_forward(\n memory,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj_memory.bias, self.k_proj.bias, self.v_proj.bias)),\n self.bias_k,\n self.bias_v,\n self.add_zero_attn,\n self.dropout_module.p,\n self.out_proj_memory.weight,\n self.out_proj_memory.bias,\n self.training or self.dropout_module.apply_during_inference,\n key_padding_mask,\n need_weights,\n attn_mask,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj_memory.weight,\n k_proj_weight=self.k_proj.weight,\n v_proj_weight=self.v_proj.weight,\n )\n out,weights = F.multi_head_attention_forward(\n query,\n memory,\n memory,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj.bias, self.k_proj_memory.bias, self.v_proj_memory.bias)),\n self.bias_k_memory,\n self.bias_v_memory,\n self.add_zero_attn,\n self.dropout_module.p,\n self.out_proj.weight,\n self.out_proj.bias,\n self.training or self.dropout_module.apply_during_inference,\n key_padding_mask,\n need_weights,\n attn_mask,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj.weight,\n k_proj_weight=self.k_proj_memory.weight,\n v_proj_weight=self.v_proj_memory.weight,\n )\n else:\n out, weights = F.multi_head_attention_forward(\n query,\n key,\n value,\n self.embed_dim,\n self.num_heads,\n torch.empty([0]),\n torch.cat((self.q_proj.bias, self.k_proj.bias, self.v_proj.bias)),\n self.bias_k,\n self.bias_v,\n self.add_zero_attn,\n self.dropout_module.p,\n self.out_proj.weight,\n self.out_proj.bias,\n self.training or self.dropout_module.apply_during_inference,\n key_padding_mask,\n need_weights,\n attn_mask,\n use_separate_proj_weight=True,\n q_proj_weight=self.q_proj.weight,\n k_proj_weight=self.k_proj.weight,\n v_proj_weight=self.v_proj.weight,\n\n ) \n\n return out, memory, weights\n\n if incremental_state is not None:\n saved_state = self._get_input_buffer(incremental_state)\n if saved_state is not None and \"prev_key\" in saved_state:\n # previous time steps are cached - no need to recompute\n # key and value if they are static\n if static_kv:\n assert self.encoder_decoder_attention and not self.self_attention\n key = value = None\n else:\n saved_state = None\n\n # 如果不共享memory attention\n if not self.shared_memory_attention:\n\n t1 = time.time()\n\n if self.self_attention:\n q = self.q_proj(query)\n k = self.k_proj(query)\n v = self.v_proj(query)\n elif self.encoder_decoder_attention:\n # encoder-decoder attention\n q = self.q_proj(query)\n if key is None:\n assert value is None\n k = v = None\n else:\n k = self.k_proj(key)\n v = self.v_proj(key)\n\n else:\n assert key is not None and value is not None\n \n q = self.q_proj(query)\n k = self.k_proj(key)\n v = self.v_proj(value)\n\n if comp is not None:\n v = v * comp\n #v_memory = v_memory * comp\n q *= self.scaling\n #q_memory *= self.scaling\n\n if self.bias_k is not None:\n assert self.bias_v is not None\n k = torch.cat([k, self.bias_k.repeat(1, bsz, 1)])\n v = torch.cat([v, self.bias_v.repeat(1, bsz, 1)])\n if attn_mask is not None:\n attn_mask = torch.cat(\n [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1\n )\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [\n key_padding_mask,\n key_padding_mask.new_zeros(key_padding_mask.size(0), 1),\n ],\n dim=1,\n )\n\n q = (\n q.contiguous()\n .view(tgt_len, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n if k is not None:\n k = (\n k.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n if v is not None:\n v = (\n v.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n \n if saved_state is not None:\n # saved states are stored with shape (bsz, num_heads, seq_len, head_dim)\n if \"prev_key\" in saved_state:\n _prev_key = saved_state[\"prev_key\"]\n assert _prev_key is not None\n prev_key = _prev_key.view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n k = prev_key\n else:\n assert k is not None\n k = torch.cat([prev_key, k], dim=1)\n if \"prev_value\" in saved_state:\n _prev_value = saved_state[\"prev_value\"]\n assert _prev_value is not None\n prev_value = _prev_value.view(bsz * self.num_heads, -1, self.head_dim)\n if static_kv:\n v = prev_value\n else:\n assert v is not None\n v = torch.cat([prev_value, v], dim=1)\n prev_key_padding_mask: Optional[Tensor] = None\n if \"prev_key_padding_mask\" in saved_state:\n prev_key_padding_mask = saved_state[\"prev_key_padding_mask\"]\n assert k is not None and v is not None\n key_padding_mask = MultiheadAttention._append_prev_key_padding_mask(\n key_padding_mask=key_padding_mask,\n prev_key_padding_mask=prev_key_padding_mask,\n batch_size=bsz,\n src_len=k.size(1),\n static_kv=static_kv,\n )\n\n saved_state[\"prev_key\"] = k.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state[\"prev_value\"] = v.view(bsz, self.num_heads, -1, self.head_dim)\n saved_state[\"prev_key_padding_mask\"] = key_padding_mask\n # In this branch incremental_state is never None\n assert incremental_state is not None\n incremental_state = self._set_input_buffer(incremental_state, saved_state)\n assert k is not None\n src_len = k.size(1)\n\n # This is part of a workaround to get around fork/join parallelism\n # not supporting Optional types.\n if key_padding_mask is not None and key_padding_mask.dim() == 0:\n key_padding_mask = None\n\n if key_padding_mask is not None:\n assert key_padding_mask.size(0) == bsz\n assert key_padding_mask.size(1) == src_len\n\n if self.add_zero_attn:\n assert v is not None\n src_len += 1\n k = torch.cat([k, k.new_zeros((k.size(0), 1) + k.size()[2:])], dim=1)\n v = torch.cat([v, v.new_zeros((v.size(0), 1) + v.size()[2:])], dim=1)\n if attn_mask is not None:\n attn_mask = torch.cat(\n [attn_mask, attn_mask.new_zeros(attn_mask.size(0), 1)], dim=1\n )\n if key_padding_mask is not None:\n key_padding_mask = torch.cat(\n [\n key_padding_mask,\n torch.zeros(key_padding_mask.size(0), 1).type_as(\n key_padding_mask\n ),\n ],\n dim=1,\n )\n\n attn_weights = torch.bmm(q, k.transpose(1, 2))\n attn_weights = MultiheadAttention.apply_sparse_mask(attn_weights, tgt_len, src_len, bsz)\n\n assert list(attn_weights.size()) == [bsz * self.num_heads, tgt_len, src_len]\n\n if attn_mask is not None:\n attn_mask = attn_mask.unsqueeze(0)\n if self.onnx_trace:\n attn_mask = attn_mask.repeat(attn_weights.size(0), 1, 1)\n attn_weights += attn_mask\n\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)\n if not self.tpu:\n attn_weights = attn_weights.masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),\n float(\"-inf\")\n )\n else:\n attn_weights = attn_weights.transpose(0, 2)\n attn_weights = attn_weights.masked_fill(key_padding_mask, float('-inf'))\n attn_weights = attn_weights.transpose(0, 2)\n attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)\n\n if before_softmax:\n return attn_weights, v\n \n # 是这个\n attn_weights_float = utils.softmax(\n attn_weights, dim=-1, onnx_trace=self.onnx_trace\n )\n attn_weights = attn_weights_float.type_as(attn_weights)\n attn_probs = self.dropout_module(attn_weights)\n\n assert v is not None\n if self.use_topk:\n k = torch.topk(attn_probs, dim = 2, k = self.topk)\n mask = torch.zeros(attn_probs.size()).to(attn_probs.device)\n mask.scatter_(2, k.indices, 1)\n attn_probs = attn_probs * mask\n attn = torch.bmm(attn_probs, v)\n assert list(attn.size()) == [bsz * self.num_heads, tgt_len, self.head_dim]\n if self.onnx_trace and attn.size(1) == 1:\n # when ONNX tracing a single decoder step (sequence length == 1)\n # the transpose is a no-op copy before view, thus unnecessary\n attn = attn.contiguous().view(tgt_len, bsz, embed_dim)\n else:\n attn = attn.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)\n attn = self.out_proj(attn)\n attn_weights: Optional[Tensor] = None\n if need_weights:\n attn_weights = attn_weights_float.view(\n bsz, self.num_heads, tgt_len, src_len\n ).transpose(1, 0)\n if not need_head_weights:\n # average attention weights over heads\n attn_weights = attn_weights.mean(dim=0)\n #print('time taken by default mha:' + str(time.time() - t1))\n return attn, None, attn_weights\n \n else: # 共享注意力机制 memory\n t1 = time.time()\n\n # 这个是共享memory的时候\n if self.memory is None:\n self.memory = self.relational_memory.initial_state(query.size(1), query.size(0)).to(query.device)\n\n self.memory = self.memory.to(query.device)\n\n #print(self.memory.size())\n \n \n key = key.transpose(1, 0)\n\n #print(key.size())\n #memory = self.memory[:key.size(0)]\n #print(self.memory.size())\n\n t2 = time.time()\n\n #print(self.memory)\n\n # self.memory只是一个memory更新的方式,它并不是workspace吧!!! lm-workspace这篇代码是不是搞错了\n # 那这个 self.memory \n # 这里是对memory进行更新\n # 利用relational_memory 来对 workspace中的memory进行更新\n _,_, self.memory, out_hx_mem_new = self.relational_memory(\n inputs=key,\n memory=self.memory#.cuda(),\n )\n #print('time taken by relational:' + str(time.time() - t2))\n\n\n\n #query = query.transpose(1, 0)\n #if self.regressive:\n # B, T, D = query.size()\n # query = query.reshape(B * T, -1).unsqueeze(1)\n #out_hx_mem_new, _, _ = self.mem_att(\n # query,#.reshape((bsz, self.num_blocks_out, self.block_size_out)),\n # self.memory,\n # self.memory,\n # )\n\n #z = torch.zeros(self.memory.size(0) - memory.size(0), memory.size(1), memory.size(2)).to(memory.device)\n #memory = torch.cat((memory, z), dim = 0)\n #self.memory = self.memory + memory\n #print('time taken by shared mha:' + str(time.time() - t1))\n #if self.regressive:\n # out_hx_mem_new = out_hx_mem_new.squeeze(1)\n # out_hx_mem_new = out_hx_mem_new.reshape(B, T, -1)\n\n # 这里的memory实际上没啥用处了,emmm 我觉得\n return out_hx_mem_new.transpose(0, 1), memory, None\n \"\"\"\n\n tgt_len = memory.size(0)\n src_len = key.size(0)\n q_memory = self.q_proj_memory(memory)\n k = self.k_proj(key)\n v = self.v_proj(value)\n\n q_memory = (\n q_memory.contiguous()\n .view(memory.size(0), bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n k = (\n k.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n v = (\n v.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n \n\n attn_weights_1 = torch.bmm(q_memory, k.transpose(1, 2))\n\n if key_padding_mask is not None:\n # don't attend to padding symbols\n attn_weights_1 = attn_weights_1.view(bsz, self.num_heads, tgt_len, src_len)\n attn_weights_1 = attn_weights_1.masked_fill(\n key_padding_mask.unsqueeze(1).unsqueeze(2).to(torch.bool),\n float(\"-inf\")\n )\n\n attn_weights_float_1 = utils.softmax(\n attn_weights_1, dim=-1, onnx_trace=self.onnx_trace\n )\n attn_weights_1 = attn_weights_float_1.type_as(attn_weights_1)\n attn_probs_1 = self.dropout_module(attn_weights_1)\n\n assert v is not None\n memory = torch.bmm(attn_probs_1, v)\n\n memory = memory.permute(1, 0, 2)\n memory = memory.reshape(memory.size(0), bsz, self.num_heads, -1)\n memory = memory.reshape(memory.size(0), bsz, -1)\n\n\n\n q = self.q_proj(query)\n \n k_memory = self.k_proj_memory(memory)\n v_memory = self.v_proj_memory(memory)\n\n q = (\n q.contiguous()\n .view(src_len, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n k_memory = (\n k.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n v_memory = (\n v.contiguous()\n .view(-1, bsz * self.num_heads, self.head_dim)\n .transpose(0, 1)\n )\n\n attn_weights_2 = torch.bmm(q, k_memory.transpose(1, 2))\n \n attn_weights_float_2 = utils.softmax(\n attn_weights_2, dim=-1, onnx_trace=self.onnx_trace\n )\n \n attn_weights_2 = attn_weights_float_2.type_as(attn_weights_2)\n attn_probs_2 = self.dropout_module(attn_weights_2)\n\n out = torch.bmm(attn_probs_2, v)\n out = out.transpose(0, 1).contiguous().view(src_len, bsz, embed_dim)\n return out, memory, None\n \"\"\"\n \n # 共享参数的时候,或者是共享memory attn的时候,\n def init_memory(self, bs, ts = None, device = None):\n if not self.regressive:\n self.memory = self.relational_memory.initial_state(bs).to(device)\n else:\n self.memory = self.relational_memory.initial_state(bs, ts).to(device)\n\n\n @staticmethod\n def _append_prev_key_padding_mask(\n key_padding_mask: Optional[Tensor],\n prev_key_padding_mask: Optional[Tensor],\n batch_size: int,\n src_len: int,\n static_kv: bool,\n ) -> Optional[Tensor]:\n # saved key padding masks have shape (bsz, seq_len)\n if prev_key_padding_mask is not None and static_kv:\n new_key_padding_mask = prev_key_padding_mask\n elif prev_key_padding_mask is not None and key_padding_mask is not None:\n new_key_padding_mask = torch.cat(\n [prev_key_padding_mask.float(), key_padding_mask.float()], dim=1\n )\n # During incremental decoding, as the padding token enters and\n # leaves the frame, there will be a time when prev or current\n # is None\n elif prev_key_padding_mask is not None:\n filler = torch.zeros(\n (batch_size, src_len - prev_key_padding_mask.size(1)),\n device=prev_key_padding_mask.device,\n )\n new_key_padding_mask = torch.cat(\n [prev_key_padding_mask.float(), filler.float()], dim=1\n )\n elif key_padding_mask is not None:\n filler = torch.zeros(\n (batch_size, src_len - key_padding_mask.size(1)),\n device=key_padding_mask.device,\n )\n new_key_padding_mask = torch.cat(\n [filler.float(), key_padding_mask.float()], dim=1\n )\n else:\n new_key_padding_mask = prev_key_padding_mask\n return new_key_padding_mask\n\n @torch.jit.export\n def reorder_incremental_state(\n self, incremental_state: Dict[str, Dict[str, Optional[Tensor]]], new_order: Tensor\n ):\n \"\"\"Reorder buffered internal state (for incremental generation).\"\"\"\n input_buffer = self._get_input_buffer(incremental_state)\n if input_buffer is not None:\n for k in input_buffer.keys():\n input_buffer_k = input_buffer[k]\n if input_buffer_k is not None:\n if self.encoder_decoder_attention and input_buffer_k.size(0) == new_order.size(0):\n break\n input_buffer[k] = input_buffer_k.index_select(0, new_order)\n incremental_state = self._set_input_buffer(incremental_state, input_buffer)\n return incremental_state\n\n def _get_input_buffer(\n self, incremental_state: Optional[Dict[str, Dict[str, Optional[Tensor]]]]\n ) -> Dict[str, Optional[Tensor]]:\n result = self.get_incremental_state(incremental_state, \"attn_state\")\n if result is not None:\n return result\n else:\n empty_result: Dict[str, Optional[Tensor]] = {}\n return empty_result\n\n def _set_input_buffer(\n self,\n incremental_state: Dict[str, Dict[str, Optional[Tensor]]],\n buffer: Dict[str, Optional[Tensor]],\n ):\n return self.set_incremental_state(incremental_state, \"attn_state\", buffer)\n\n def apply_sparse_mask(attn_weights, tgt_len: int, src_len: int, bsz: int):\n return attn_weights\n\n def upgrade_state_dict_named(self, state_dict, name):\n prefix = name + \".\" if name != \"\" else \"\"\n items_to_add = {}\n keys_to_remove = []\n for k in state_dict.keys():\n if k.endswith(prefix + \"in_proj_weight\"):\n # in_proj_weight used to be q + k + v with same dimensions\n dim = int(state_dict[k].shape[0] / 3)\n items_to_add[prefix + \"q_proj.weight\"] = state_dict[k][:dim]\n items_to_add[prefix + \"k_proj.weight\"] = state_dict[k][dim : 2 * dim]\n items_to_add[prefix + \"v_proj.weight\"] = state_dict[k][2 * dim :]\n\n keys_to_remove.append(k)\n\n k_bias = prefix + \"in_proj_bias\"\n if k_bias in state_dict.keys():\n dim = int(state_dict[k].shape[0] / 3)\n items_to_add[prefix + \"q_proj.bias\"] = state_dict[k_bias][:dim]\n items_to_add[prefix + \"k_proj.bias\"] = state_dict[k_bias][\n dim : 2 * dim\n ]\n items_to_add[prefix + \"v_proj.bias\"] = state_dict[k_bias][2 * dim :]\n\n keys_to_remove.append(prefix + \"in_proj_bias\")\n\n for k in keys_to_remove:\n del state_dict[k]\n\n for key, value in items_to_add.items():\n state_dict[key] = value" }, { "identifier": "RelationalMemory", "path": "multi_part_assembly/utils/wx_transformer_utilities/relational_memory.py", "snippet": "class RelationalMemory(nn.Module):\n \"\"\"\n Constructs a `RelationalMemory` object.\n This class is same as the RMC from relational_rnn_models.py, but without language modeling-specific variables.\n Args:\n mem_slots: The total number of memory slots to use.\n head_size: The size of an attention head.\n input_size: The size of input per step. i.e. the dimension of each input vector\n num_heads: The number of attention heads to use. Defaults to 1.\n num_blocks: Number of times to compute attention per time step. Defaults\n to 1.\n forget_bias: Bias to use for the forget gate, assuming we are using\n some form of gating. Defaults to 1.\n input_bias: Bias to use for the input gate, assuming we are using\n some form of gating. Defaults to 0.\n gate_style: Whether to use per-element gating ('unit'),\n per-memory slot gating ('memory'), or no gating at all (None).\n Defaults to `unit`.\n attention_mlp_layers: Number of layers to use in the post-attention\n MLP. Defaults to 2.\n key_size: Size of vector to use for key & query vectors in the attention\n computation. Defaults to None, in which case we use `head_size`.\n name: Name of the module.\n\n # NEW flag for this class\n return_all_outputs: Whether the model returns outputs for each step (like seq2seq) or only the final output.\n Raises:\n ValueError: gate_style not one of [None, 'memory', 'unit'].\n ValueError: num_blocks is < 1.\n ValueError: attention_mlp_layers is < 1.\n \"\"\"\n\n def __init__(self, mem_slots, head_size, input_size, output_size, num_heads=1, num_blocks=1, forget_bias=1., input_bias=0.,\n gate_style='unit', attention_mlp_layers=2, key_size=None, return_all_outputs=False, use_topk = False, topk = 3, num_steps = 5,\n null_attention = False):\n super(RelationalMemory, self).__init__()\n\n ########## generic parameters for RMC ##########\n self.mem_slots = mem_slots\n self.head_size = head_size\n self.num_heads = num_heads\n self.mem_size = self.head_size * self.num_heads\n self.use_topk = use_topk\n self.topk = topk\n self.attn_log = None\n\n # a new fixed params needed for pytorch port of RMC\n # +1 is the concatenated input per time step : we do self-attention with the concatenated memory & input\n # so if the mem_slots = 1, this value is 2\n self.mem_slots_plus_input = self.mem_slots + 1\n\n if num_blocks < 1:\n raise ValueError('num_blocks must be >=1. Got: {}.'.format(num_blocks))\n self.num_blocks = num_blocks\n\n if gate_style not in ['unit', 'memory', None]:\n raise ValueError(\n 'gate_style must be one of [\\'unit\\', \\'memory\\', None]. got: '\n '{}.'.format(gate_style))\n self.gate_style = gate_style\n\n if attention_mlp_layers < 1:\n raise ValueError('attention_mlp_layers must be >= 1. Got: {}.'.format(\n attention_mlp_layers))\n self.attention_mlp_layers = attention_mlp_layers\n\n self.key_size = key_size if key_size else self.head_size\n\n ########## parameters for multihead attention ##########\n # value_size is same as head_size\n self.value_size = self.head_size\n # total size for query-key-value\n self.qkv_size = 2 * self.key_size + self.value_size\n self.total_qkv_size = self.qkv_size * self.num_heads # denoted as F\n\n self.query_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n self.key_proj = nn.Linear(self.mem_size, self.key_size * self.num_heads)\n self.value_proj = nn.Linear(self.mem_size, self.value_size * self.num_heads)\n\n\n # each head has qkv_sized linear projector\n # just using one big param is more efficient, rather than this line\n # self.qkv_projector = [nn.Parameter(torch.randn((self.qkv_size, self.qkv_size))) for _ in range(self.num_heads)]\n self.qkv_projector = nn.Linear(self.mem_size, self.total_qkv_size)\n self.qkv_layernorm = nn.LayerNorm(self.total_qkv_size)\n\n # used for attend_over_memory function\n self.attention_mlp = nn.ModuleList([nn.Linear(self.mem_size, self.mem_size)] * self.attention_mlp_layers)\n self.attended_memory_layernorm = nn.LayerNorm( self.mem_size)\n self.attended_memory_layernorm2 = nn.LayerNorm(self.mem_size)\n\n ########## parameters for initial embedded input projection ##########\n self.input_size = input_size\n self.input_projector = nn.Linear(self.input_size, self.mem_size)\n\n self.output_projector = nn.Linear(self.output_size, self.input_size)\n\n ########## parameters for gating ##########\n self.num_gates = 2 * self.calculate_gate_size()\n print('input projector:'+str(self.mem_size))\n self.input_gate_projector = nn.Linear(self.mem_size * num_steps, self.num_gates)\n self.memory_gate_projector = nn.Linear(self.mem_size, self.num_gates)\n # trainable scalar gate bias tensors\n self.forget_bias = nn.Parameter(torch.tensor(forget_bias, dtype=torch.float32))\n self.input_bias = nn.Parameter(torch.tensor(input_bias, dtype=torch.float32))\n\n ########## number of outputs returned #####\n self.return_all_outputs = return_all_outputs\n\n self.null_attention = null_attention\n\n self.competition_mlp = nn.Sequential(nn.Linear(self.mem_slots * self.mem_size + self.mem_size, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 256),\n nn.ReLU(),\n nn.Linear(256, 2))\n\n def repackage_hidden(self, h):\n \"\"\"Wraps hidden states in new Tensors, to detach them from their history.\"\"\"\n # needed for truncated BPTT, called at every batch forward pass\n if isinstance(h, torch.Tensor):\n return h.detach()\n else:\n return tuple(self.repackage_hidden(v) for v in h)\n\n def initial_state(self, batch_size, trainable=False):\n \"\"\"\n Creates the initial memory.\n We should ensure each row of the memory is initialized to be unique,\n so initialize the matrix to be the identity. We then pad or truncate\n as necessary so that init_state is of size\n (batch_size, self.mem_slots, self.mem_size).\n Args:\n batch_size: The size of the batch.\n trainable: Whether the initial state is trainable. This is always True.\n Returns:\n init_state: A truncated or padded matrix of size\n (batch_size, self.mem_slots, self.mem_size).\n \"\"\"\n init_state = torch.stack([torch.eye(self.mem_slots) for _ in range(batch_size)])\n\n # pad the matrix with zeros\n if self.mem_size > self.mem_slots:\n difference = self.mem_size - self.mem_slots\n pad = torch.zeros((batch_size, self.mem_slots, difference))\n init_state = torch.cat([init_state, pad], -1)\n\n # truncation. take the first 'self.mem_size' components\n elif self.mem_size < self.mem_slots:\n init_state = init_state[:, :, :self.mem_size]\n\n return init_state\n\n def multihead_attention(self, input, memory):\n \"\"\"\n Perform multi-head attention from 'Attention is All You Need'.\n Implementation of the attention mechanism from\n https://arxiv.org/abs/1706.03762.\n Args:\n memory: Memory tensor to perform attention on.\n Returns:\n new_memory: New memory tensor.\n \"\"\"\n\n q = self.query_proj(memory)\n k = self.key_proj(input)\n v = self.value_proj(input)\n\n q = q.reshape(q.size(0), q.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n k = k.reshape(k.size(0), k.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n v = v.reshape(v.size(0), v.size(1), self.num_heads, -1).permute(0, 2, 1, 3)\n scores = torch.matmul(q, k.transpose(2, 3))\n\n scores = torch.softmax(scores, dim = -1)\n self.attn_log = scores[0]\n if not self.null_attention:\n if self.use_topk:\n topk = torch.topk(scores, dim = -1, k = self.topk)\n mask = torch.zeros(scores.size()).to(scores.device)\n mask.scatter_(3, topk.indices, 1)\n scores = scores * mask\n else:\n memory_flat = memory.reshape(memory.size(0), -1).unsqueeze(1)\n memory_flat = memory_flat.repeat(1, input.shape[1], 1)\n\n N = torch.cat((input, memory_flat), dim = 2)\n N = self.competition_mlp(N)\n\n N = torch.nn.functional.gumbel_softmax(N, dim = 2, hard = True, tau = 0.5)\n\n N = N[:, :, 0]\n\n scores = scores * N.unsqueeze(1).unsqueeze(1)\n\n\n output = torch.matmul(scores, v)\n\n \"\"\"#print(memory.size())\n # First, a simple linear projection is used to construct queries\n qkv = self.qkv_projector(memory)\n # apply layernorm for every dim except the batch dim\n qkv = self.qkv_layernorm(qkv)\n\n # mem_slots needs to be dynamically computed since mem_slots got concatenated with inputs\n # example: self.mem_slots=10 and seq_length is 3, and then mem_slots is 10 + 1 = 11 for each 3 step forward pass\n # this is the same as self.mem_slots_plus_input, but defined to keep the sonnet implementation code style\n mem_slots = memory.shape[1] # denoted as N\n\n # split the qkv to multiple heads H\n # [B, N, F] => [B, N, H, F/H]\n qkv_reshape = qkv.view(qkv.shape[0], mem_slots, self.num_heads, self.qkv_size)\n\n # [B, N, H, F/H] => [B, H, N, F/H]\n qkv_transpose = qkv_reshape.permute(0, 2, 1, 3)\n\n # [B, H, N, key_size], [B, H, N, key_size], [B, H, N, value_size]\n q, k, v = torch.split(qkv_transpose, [self.key_size, self.key_size, self.value_size], -1)\n\n # scale q with d_k, the dimensionality of the key vectors\n q *= (self.key_size ** -0.5)\n\n # make it [B, H, N, N]\n dot_product = torch.matmul(q, k.permute(0, 1, 3, 2))\n weights = F.softmax(dot_product, dim=-1)\n\n if self.use_topk:\n topk = torch.topk(weights, dim = -1, k = self.topk)\n mask = torch.zeros(weights.size()).to(weights.device)\n mask.scatter_(3, topk.indices, 1)\n weights = weights * mask\n\n # output is [B, H, N, V]\n output = torch.matmul(weights, v)\"\"\"\n\n # [B, H, N, V] => [B, N, H, V] => [B, N, H*V]\n output_transpose = output.permute(0, 2, 1, 3).contiguous()\n new_memory = output_transpose.view((output_transpose.shape[0], output_transpose.shape[1], -1))\n\n return new_memory\n\n\n @property\n def state_size(self):\n return [self.mem_slots, self.mem_size]\n\n @property\n def output_size(self):\n return self.mem_slots * self.mem_size\n\n def calculate_gate_size(self):\n \"\"\"\n Calculate the gate size from the gate_style.\n Returns:\n The per sample, per head parameter size of each gate.\n \"\"\"\n if self.gate_style == 'unit':\n return self.mem_size\n elif self.gate_style == 'memory':\n return 1\n else: # self.gate_style == None\n return 0\n\n def print_log(self):\n print(self.attn_log)\n\n def create_gates(self, inputs, memory):\n \"\"\"\n Create input and forget gates for this step using `inputs` and `memory`.\n Args:\n inputs: Tensor input.\n memory: The current state of memory.\n Returns:\n input_gate: A LSTM-like insert gate.\n forget_gate: A LSTM-like forget gate.\n \"\"\"\n # We'll create the input and forget gates at once. Hence, calculate double\n # the gate size.\n\n # equation 8: since there is no output gate, h is just a tanh'ed m\n memory = torch.tanh(memory)\n\n # TODO: check this input flattening is correct\n # sonnet uses this, but i think it assumes time step of 1 for all cases\n # if inputs is (B, T, features) where T > 1, this gets incorrect\n # inputs = inputs.view(inputs.shape[0], -1)\n\n # fixed implementation\n if len(inputs.shape) == 3:\n #if inputs.shape[1] > 1:\n # raise ValueError(\n # \"input seq length is larger than 1. create_gate function is meant to be called for each step, with input seq length of 1\")\n inputs = inputs.view(inputs.shape[0], -1)\n # matmul for equation 4 and 5\n # there is no output gate, so equation 6 is not implemented\n gate_inputs = self.input_gate_projector(inputs)\n gate_inputs = gate_inputs.unsqueeze(dim=1)\n gate_memory = self.memory_gate_projector(memory)\n else:\n raise ValueError(\"input shape of create_gate function is 2, expects 3\")\n\n # this completes the equation 4 and 5\n #print(gate_inputs.size())\n #print(gate_memory.size())\n gates = gate_memory + gate_inputs\n gates = torch.split(gates, split_size_or_sections=int(gates.shape[2] / 2), dim=2)\n input_gate, forget_gate = gates\n assert input_gate.shape[2] == forget_gate.shape[2]\n\n # to be used for equation 7\n input_gate = torch.sigmoid(input_gate + self.input_bias)\n forget_gate = torch.sigmoid(forget_gate + self.forget_bias)\n\n return input_gate, forget_gate\n\n def attend_over_memory(self, inputs, memory):\n \"\"\"\n Perform multiheaded attention over `memory`.\n Args:\n memory: Current relational memory.\n Returns:\n The attended-over memory.\n \"\"\"\n for _ in range(self.num_blocks):\n attended_memory = self.multihead_attention(inputs, memory)\n\n # Add a skip connection to the multiheaded attention's input.\n memory = self.attended_memory_layernorm(memory + attended_memory)\n\n # add a skip connection to the attention_mlp's input.\n attention_mlp = memory\n for i, l in enumerate(self.attention_mlp):\n attention_mlp = self.attention_mlp[i](attention_mlp)\n attention_mlp = F.relu(attention_mlp)\n memory = self.multihead_attention(memory, memory, use_topk_ = False, store_log = False)\n memory = self.attended_memory_layernorm2(memory + attention_mlp)\n\n return memory\n\n def forward_step(self, inputs, memory, treat_input_as_matrix=False):\n \"\"\"\n Forward step of the relational memory core.\n Args:\n inputs: Tensor input.\n memory: Memory output from the previous time step.\n treat_input_as_matrix: Optional, whether to treat `input` as a sequence\n of matrices. Default to False, in which case the input is flattened\n into a vector.\n Returns:\n output: This time step's output.\n next_memory: The next version of memory to use.\n \"\"\"\n\n if treat_input_as_matrix:\n # keep (Batch, Seq, ...) dim (0, 1), flatten starting from dim 2\n inputs = inputs.view(inputs.shape[0], inputs.shape[1], -1)\n # apply linear layer for dim 2\n inputs_reshape = self.input_projector(inputs)\n else:\n # keep (Batch, ...) dim (0), flatten starting from dim 1\n inputs = inputs.view(inputs.shape[0], -1)\n # apply linear layer for dim 1\n inputs = self.input_projector(inputs)\n # unsqueeze the time step to dim 1\n inputs_reshape = inputs.unsqueeze(dim=1)\n\n #memory_plus_input = torch.cat([memory, inputs_reshape], dim=1)\n #print(memory_plus_input.size())\n next_memory = self.attend_over_memory(inputs_reshape, memory)\n\n # cut out the concatenated input vectors from the original memory slots\n #n = inputs_reshape.shape[1]\n #next_memory = next_memory[:, :-n, :]\n\n if self.gate_style == 'unit' or self.gate_style == 'memory':\n # these gates are sigmoid-applied ones for equation 7\n input_gate, forget_gate = self.create_gates(inputs_reshape, memory)\n # equation 7 calculation\n next_memory = input_gate * torch.tanh(next_memory)\n next_memory += forget_gate * memory\n\n\n output = next_memory.view(next_memory.shape[0], -1)\n return output, next_memory\n\n def forward(self, inputs, memory, parallel = True):\n # Starting each batch, we detach the hidden state from how it was previously produced.\n # If we didn't, the model would try backpropagating all the way to start of the dataset.\n # memory = self.repackage_hidden(memory)\n\n # for loop implementation of (entire) recurrent forward pass of the model\n # inputs is batch first [batch, seq], and output logit per step is [batch, vocab]\n # so the concatenated logits are [seq * batch, vocab]\n\n # targets are flattened [seq, batch] => [seq * batch], so the dimension is correct\n\n logits = []\n #print(inputs.size())\n #print(memory.size())\n #memory = self.repackage_hidden(memory)\n # shape[1] is seq_lenth T\n if not parallel:\n for idx_step in range(inputs.shape[1]):\n logit, memory = self.forward_step(inputs[:, idx_step], memory)\n logits.append(logit)\n logits = torch.cat(logits)\n else:\n logits, memory = self.forward_step(inputs, memory, treat_input_as_matrix = True)\n \n memory_out = self.output_projector(memory.view(memory.shape[0], -1))\n\n #print(inputs.size())\n #print(memory_out.size())\n #print('------')\n if self.return_all_outputs:\n return logits, memory_out , memory\n else:\n return logits, memory_out, memory" }, { "identifier": "GroupLinearLayer", "path": "multi_part_assembly/utils/wx_transformer_utilities/group_linear_layer.py", "snippet": "class GroupLinearLayer(nn.Module):\n\n def __init__(self, din, dout, num_blocks, bias=True, a = None):\n super(GroupLinearLayer, self).__init__()\n self.nb = num_blocks\n self.dout = dout\n\n if a is None:\n a = 1. / math.sqrt(dout * num_blocks)\n\n #gain = 1.0 / math.sqrt(2)\n #a = gain * math.sqrt(6.0 / (din + dout))\n\n self.weight = nn.Parameter(torch.FloatTensor(num_blocks,din,dout).uniform_(-a,a))\n\n self.bias = bias\n\n if bias is True:\n self.bias = nn.Parameter(torch.FloatTensor(num_blocks,dout).uniform_(-a,a))\n #self.bias = nn.Parameter(torch.zeros(dout*num_blocks))\n else:\n self.bias = None\n\n def forward(self,x):\n\n\t#input: ts x bs x blocks*nhid\n\t#ts*bs , blocks, nhid\n\t#blocks, ts*bs, nhid\n ts,bs,m = x.shape\t\n\n x = x.reshape((ts*bs, self.nb, m//self.nb))\n x = x.permute(1,0,2)\n x = torch.bmm(x,self.weight)\n x = x.permute(1,0,2)\n \n if not self.bias is None:\n x = x + self.bias\n\n x = x.reshape((ts, bs, self.dout*self.nb))\n \n #if not self.bias is None:\n # x += self.bias\n\n return x" }, { "identifier": "MemoryAttention", "path": "multi_part_assembly/utils/wx_transformer_utilities/basic_mha.py", "snippet": "class MemoryAttention(nn.Module):\n def __init__(self, n_blocks_query, n_blocks_val, dim_query, dim_val, n_heads=8):\n super(MemoryAttention, self).__init__()\n\n self.n_heads = n_heads\n self.n_blocks_val = n_blocks_val\n self.dim_val = dim_val\n self.block_dim_val = dim_val // self.n_blocks_val\n\n self.n_blocks_query = n_blocks_query\n self.dim_query = dim_query\n self.block_dim_query = dim_query // self.n_blocks_query\n\n self.head_dim = 64\n self.scale = self.head_dim ** -0.5\n\n #self.n_blocks_val * self.block_dim_val\n\n self.query_net = GroupLinearLayer(self.block_dim_query, self.head_dim * self.n_heads, n_blocks_query)\n self.key_net = GroupLinearLayer(self.block_dim_val, self.head_dim * self.n_heads, n_blocks_val)\n self.value_net = GroupLinearLayer(self.block_dim_val, self.head_dim * self.n_heads, n_blocks_val)\n self.final = GroupLinearLayer(self.head_dim * self.n_heads, self.block_dim_query, n_blocks_query)\n\n def forward(self, q, kv):\n\n #comes in as: bs, pos*emb.\n #positions_attend x T*bs x emb\n\n\n #q = q.permute(1,0,2)\n #kv = kv.permute(1,0,2)\n\n #print('kv shape after permute', kv.shape)\n\n seq_len_q,bsz,_ = q.shape\n seq_len_v,bsz,_ = kv.shape\n\n q = q.reshape((seq_len_q, bsz, self.n_blocks_query * self.block_dim_query))\n\n kv = kv.reshape((seq_len_v, bsz, self.n_blocks_val * self.block_dim_val))\n\n q = self.query_net(q).view(seq_len_q, bsz, self.n_blocks_query, self.n_heads, self.head_dim)\n k = self.key_net(kv).view(seq_len_v, bsz, self.n_blocks_val, self.n_heads, self.head_dim)\n v = self.value_net(kv).view(seq_len_v, bsz, self.n_blocks_val, self.n_heads, self.head_dim)\n\n q = q.transpose(2,3) * self.scale\n k = k.transpose(2,3)\n v = v.transpose(2,3)\n score = torch.matmul(q, k.transpose(3,4))\n #print('score shape', score.shape)\n score = F.softmax(score, dim=-1)\n out = torch.matmul(score, v).transpose(2,3)\n #print('out shape', out.shape)\n score = score.mean(dim=2)\n\n out = out.reshape(seq_len_q, bsz, self.n_blocks_query * self.head_dim * self.n_heads)\n out = self.final(out)\n out = out.view(seq_len_q, bsz, self.dim_query)\n\n\n return out, score" }, { "identifier": "quant_noise", "path": "multi_part_assembly/utils/wx_transformer_utilities/quant_noise.py", "snippet": "def quant_noise(module, p, block_size):\n \"\"\"\n Wraps modules and applies quantization noise to the weights for\n subsequent quantization with Iterative Product Quantization as\n described in \"Training with Quantization Noise for Extreme Model Compression\"\n\n Args:\n - module: nn.Module\n - p: amount of Quantization Noise\n - block_size: size of the blocks for subsequent quantization with iPQ\n\n Remarks:\n - Module weights must have the right sizes wrt the block size\n - Only Linear, Embedding and Conv2d modules are supported for the moment\n - For more detail on how to quantize by blocks with convolutional weights,\n see \"And the Bit Goes Down: Revisiting the Quantization of Neural Networks\"\n - We implement the simplest form of noise here as stated in the paper\n which consists in randomly dropping blocks\n \"\"\"\n\n # if no quantization noise, don't register hook\n if p <= 0:\n return module\n\n # supported modules\n assert isinstance(module, (nn.Linear, nn.Embedding, nn.Conv2d))\n\n # test whether module.weight has the right sizes wrt block_size\n is_conv = module.weight.ndim == 4\n\n # 2D matrix\n if not is_conv:\n assert module.weight.size(1) % block_size == 0, \"Input features must be a multiple of block sizes\"\n\n # 4D matrix\n else:\n # 1x1 convolutions\n if module.kernel_size == (1, 1):\n assert module.in_channels % block_size == 0, \"Input channels must be a multiple of block sizes\"\n # regular convolutions\n else:\n k = module.kernel_size[0] * module.kernel_size[1]\n assert k % block_size == 0, \"Kernel size must be a multiple of block size\"\n\n def _forward_pre_hook(mod, input):\n # no noise for evaluation\n if mod.training:\n if not is_conv:\n # gather weight and sizes\n weight = mod.weight\n in_features = weight.size(1)\n out_features = weight.size(0)\n\n # split weight matrix into blocks and randomly drop selected blocks\n mask = torch.zeros(in_features // block_size * out_features, device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_features)\n\n else:\n # gather weight and sizes\n weight = mod.weight\n in_channels = mod.in_channels\n out_channels = mod.out_channels\n\n # split weight matrix into blocks and randomly drop selected blocks\n if mod.kernel_size == (1, 1):\n mask = torch.zeros(int(in_channels // block_size * out_channels), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.repeat_interleave(block_size, -1).view(-1, in_channels)\n else:\n mask = torch.zeros(weight.size(0), weight.size(1), device=weight.device)\n mask.bernoulli_(p)\n mask = mask.unsqueeze(2).unsqueeze(3).repeat(1, 1, mod.kernel_size[0], mod.kernel_size[1])\n\n # scale weights and apply mask\n mask = mask.to(torch.bool) # x.bool() is not currently supported in TorchScript\n s = 1 / (1 - p)\n mod.weight.data = s * weight.masked_fill(mask, 0)\n\n module.register_forward_pre_hook(_forward_pre_hook)\n return module" }, { "identifier": "FairseqDropout", "path": "multi_part_assembly/utils/wx_transformer_utilities/fairseq_dropout.py", "snippet": "class FairseqDropout(nn.Module):\n\n def __init__(self, p, module_name=None):\n super().__init__()\n self.p = p\n self.module_name = module_name\n self.apply_during_inference = False\n\n def forward(self, x, inplace: bool = False):\n if self.training or self.apply_during_inference:\n return F.dropout(x, p=self.p, training=True, inplace=inplace)\n else:\n return x\n\n def make_generation_fast_(\n self,\n name: str,\n retain_dropout: bool = False,\n retain_dropout_modules: Optional[List[str]] = None,\n **kwargs\n ):\n if retain_dropout:\n if retain_dropout_modules is not None and self.module_name is None:\n logger.warning(\n 'Cannot enable dropout during inference for module {} '\n 'because module_name was not set'.format(name)\n )\n elif (\n retain_dropout_modules is None # if None, apply to all modules\n or self.module_name in retain_dropout_modules\n ):\n logger.info(\n 'Enabling dropout during inference for module: {}'.format(name)\n )\n self.apply_during_inference = True\n else:\n logger.info('Disabling dropout for module: {}'.format(name))" } ]
from typing import Dict, List, Optional from .layer_norm import LayerNorm from .multihead_attention import MultiheadAttention from .relational_memory import RelationalMemory from .group_linear_layer import GroupLinearLayer from .basic_mha import MemoryAttention from .quant_noise import quant_noise from .fairseq_dropout import FairseqDropout from torch import Tensor import torch import torch.nn as nn import multi_part_assembly.utils.wx_transformer_utilities.fairseq_utils as utils import random import torch.nn.functional as F
18,016
#should return these q,k,v and save to a big list. Also pull in from the list passed in and concat along dim=3, i.e. so that it's nblocks * nlayers. #print('running comm attention with shapes', q.shape, k.shape, v.shape) score = torch.matmul(q, k.transpose(3,4)) #print('score shape', score.shape) score = F.softmax(score, dim=-1) out = torch.matmul(score, v).transpose(2,3) #print('out shape', out.shape) score = score.mean(dim=2) out = out.reshape(seq_len, bsz, self.n_blocks * self.head_dim * self.n_heads) out = self.final(out) out = out.view(seq_len, bsz, self.dim) return out, score class NormLayer(nn.Module): def __init__(self, num_rims, dim, export=False): super(NormLayer, self).__init__() self.num_rims = num_rims self.dim = dim self.weight = nn.Parameter(torch.ones(1,1,dim*num_rims,)) self.bias = nn.Parameter(torch.zeros(1,1,dim*num_rims,)) self.norm = LayerNorm(dim, export=export, elementwise_affine=False) def forward(self, x): seq_len, bsz, _ = x.shape x = x.view(seq_len, bsz, self.num_rims, self.dim) x = self.norm(x) x = x.view(seq_len, bsz, self.num_rims * self.dim) weight_use = self.weight.repeat(seq_len, bsz, 1) bias_use = self.bias.repeat(seq_len, bsz, 1) x = x * weight_use + bias_use return x class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, nb, blockatt, blockatt_memory, use_nfm, out_proj_dim=None): super().__init__() self.blockatt = blockatt self.blockatt_memory = blockatt_memory self.embed_dim = args.encoder_embed_dim self.quant_noise = getattr(args, "quant_noise_pq", 0) self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8) self.use_nfm = use_nfm print('using nfm?', self.use_nfm) self.nb = nb self.norm_blocks = self.nb self.self_attn = self.build_self_attention(self.embed_dim, args) #should divide embed_dim by nb. Then raise embed_dim in args self.self_attn_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__) self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) print("SETUP TRANSFORMER LAYER", 'blocks', self.nb) activation_dropout_p = getattr(args, "activation_dropout", 0) if activation_dropout_p == 0: # for backwards compatibility with models that use args.relu_dropout activation_dropout_p = getattr(args, "relu_dropout", 0) self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = args.encoder_normalize_before self.fc1 = self.build_fc1( self.embed_dim, args.encoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size ) self.fc2 = self.build_fc2( args.encoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size ) self.final_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) if self.blockatt: self.comm = Attention(args.encoder_attention_heads, self.nb, self.embed_dim, self.use_nfm) self.comm_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) if self.blockatt_memory: memory_slots = 4 memory_head_size = 128 memory_num_heads = 1 gate_style = 'memory' print('not using special key size gate_style is', gate_style, memory_slots, memory_num_heads, memory_head_size) self.memory_layer = RelationalMemory(mem_slots=memory_slots, head_size=memory_head_size, input_size=self.embed_dim, output_size=self.embed_dim, num_heads=memory_num_heads, num_blocks=1, forget_bias=1., input_bias=0., attention_mlp_layers=5, gate_style=gate_style) #self.n_blocks_val * self.block_dim_val #self.block_dim_val = dim_val // self.n_blocks_val
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. #from fairseq.modules.shared_group_linear_layer import SharedGroupLinearLayer class TransformerEncoderLayerVanilla(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, out_proj = None): super().__init__() self.embed_dim = args.encoder_embed_dim self.self_attn = self.build_self_attention(self.embed_dim, args) self.self_attn_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) self.dropout = args.dropout self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) self.activation_dropout = getattr(args, "activation_dropout", 0) if self.activation_dropout == 0: # for backwards compatibility with models that use args.relu_dropout self.activation_dropout = getattr(args, "relu_dropout", 0) self.normalize_before = args.encoder_normalize_before self.fc1 = self.build_fc1(self.embed_dim, args.encoder_ffn_embed_dim) self.fc2 = self.build_fc2(args.encoder_ffn_embed_dim, self.embed_dim) self.final_layer_norm = LayerNorm(self.embed_dim, eps=1e-5) if out_proj is not None: self.final_linear = nn.Linear(args.encoder_embed_dim, out_proj) else: self.final_linear = None def build_fc1(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_fc2(self, input_dim, output_dim): return nn.Linear(input_dim, output_dim) def build_self_attention(self, embed_dim, args): return MultiheadAttention( embed_dim, args.encoder_attention_heads, dropout=args.attention_dropout, self_attention=args.self_attention, shared_memory_attention = args.shared_memory_attention, use_topk = args.use_topk, topk = args.topk, num_steps = args.num_steps, mem_slots = args.mem_slots, null_attention = args.null_attention, regressive = args.regressive ) def upgrade_state_dict_named(self, state_dict, name): """ Rename layer norm states from `...layer_norms.0.weight` to `...self_attn_layer_norm.weight` and `...layer_norms.1.weight` to `...final_layer_norm.weight` """ layer_norm_map = {"0": "self_attn_layer_norm", "1": "final_layer_norm"} for old, new in layer_norm_map.items(): for m in ("weight", "bias"): k = "{}.layer_norms.{}.{}".format(name, old, m) if k in state_dict: state_dict["{}.{}.{}".format(name, new, m)] = state_dict[k] del state_dict[k] def forward(self, x, encoder_padding_mask, attn_mask: Optional[Tensor] = None, state = None, memory = None): """ Args: x (Tensor): input to the layer of shape `(seq_len, batch, embed_dim)` encoder_padding_mask (ByteTensor): binary ByteTensor of shape `(batch, src_len)` where padding elements are indicated by ``1``. attn_mask (ByteTensor): binary tensor of shape (T_tgt, T_src), where T_tgt is the length of query, while T_src is the length of key, though here both query and key is x here, attn_mask[t_tgt, t_src] = 1 means when calculating embedding for t_tgt, t_src is excluded (or masked out), =0 means it is included in attention Returns: encoded output of shape `(seq_len, batch, embed_dim)` """ residual = x if self.normalize_before: x = self.self_attn_layer_norm(x) if attn_mask is not None: attn_mask = attn_mask.masked_fill(attn_mask.to(torch.bool), -1e8) # anything in original attn_mask = 1, becomes -1e8 # anything in original attn_mask = 0, becomes 0 # Note that we cannot use -inf here, because at some edge cases, # the attention weight (before softmax) for some padded element in query # will become -inf, which results in NaN in model parameters # TODO: to formally solve this problem, we need to change fairseq's # MultiheadAttention. We will do this later on. #print(state is not None) x, memory, _ = self.self_attn( query=state if state is not None else x, key=x, value=x, key_padding_mask=encoder_padding_mask, attn_mask=attn_mask, memory = memory ) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x if not self.normalize_before: x = self.self_attn_layer_norm(x) residual = x if self.normalize_before: x = self.final_layer_norm(x) x = self.activation_fn(self.fc1(x)) x = F.dropout(x, p=float(self.activation_dropout), training=self.training) x = self.fc2(x) x = F.dropout(x, p=self.dropout, training=self.training) x = residual + x if not self.normalize_before: x = self.final_layer_norm(x) if self.final_linear is not None: x = self.final_linear(x) return x, memory class Attention(nn.Module): def __init__(self, n_heads, n_blocks, dim, use_nfm): super(Attention, self).__init__() self.use_nfm = use_nfm #self.n_heads = n_heads self.n_heads = 12 self.n_blocks = n_blocks self.dim = dim self.block_dim = dim // self.n_blocks #self.head_dim = self.block_dim // self.n_heads self.head_dim = 64 self.scale = self.head_dim ** -0.5 self.query_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.key_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.value_net = GroupLinearLayer(self.block_dim, self.head_dim * self.n_heads, n_blocks) self.final = GroupLinearLayer(self.head_dim * self.n_heads, self.block_dim, n_blocks) def forward(self, x, qkv=None): use_exshare = False if qkv is not None: klst, vlst = qkv seq_len, bsz, _ = x.shape if use_exshare: x = x.view(seq_len, bsz, self.n_blocks * self.block_dim) q = self.query_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) k = self.key_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) v = self.value_net(x).view(seq_len, 1, bsz*self.n_blocks, self.n_heads, self.head_dim) else: x = x.view(seq_len, bsz, self.n_blocks * self.block_dim) q = self.query_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) k = self.key_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) v = self.value_net(x).view(seq_len, bsz, self.n_blocks, self.n_heads, self.head_dim) q = q.transpose(2,3) * self.scale k = k.transpose(2,3) v = v.transpose(2,3) if random.uniform(0,1) < 0.00001: print('use NFM?', self.use_nfm) if self.use_nfm: if qkv is not None: klst.append(k) vlst.append(v) #print('len qlst', len(qlst)) #for kval in klst: # print(kval.shape) k = torch.cat(klst, dim=3) v = torch.cat(vlst, dim=3) #should return these q,k,v and save to a big list. Also pull in from the list passed in and concat along dim=3, i.e. so that it's nblocks * nlayers. #print('running comm attention with shapes', q.shape, k.shape, v.shape) score = torch.matmul(q, k.transpose(3,4)) #print('score shape', score.shape) score = F.softmax(score, dim=-1) out = torch.matmul(score, v).transpose(2,3) #print('out shape', out.shape) score = score.mean(dim=2) out = out.reshape(seq_len, bsz, self.n_blocks * self.head_dim * self.n_heads) out = self.final(out) out = out.view(seq_len, bsz, self.dim) return out, score class NormLayer(nn.Module): def __init__(self, num_rims, dim, export=False): super(NormLayer, self).__init__() self.num_rims = num_rims self.dim = dim self.weight = nn.Parameter(torch.ones(1,1,dim*num_rims,)) self.bias = nn.Parameter(torch.zeros(1,1,dim*num_rims,)) self.norm = LayerNorm(dim, export=export, elementwise_affine=False) def forward(self, x): seq_len, bsz, _ = x.shape x = x.view(seq_len, bsz, self.num_rims, self.dim) x = self.norm(x) x = x.view(seq_len, bsz, self.num_rims * self.dim) weight_use = self.weight.repeat(seq_len, bsz, 1) bias_use = self.bias.repeat(seq_len, bsz, 1) x = x * weight_use + bias_use return x class TransformerEncoderLayer(nn.Module): """Encoder layer block. In the original paper each operation (multi-head attention or FFN) is postprocessed with: `dropout -> add residual -> layernorm`. In the tensor2tensor code they suggest that learning is more robust when preprocessing each layer with layernorm and postprocessing with: `dropout -> add residual`. We default to the approach in the paper, but the tensor2tensor approach can be enabled by setting *args.encoder_normalize_before* to ``True``. Args: args (argparse.Namespace): parsed command-line arguments """ def __init__(self, args, nb, blockatt, blockatt_memory, use_nfm, out_proj_dim=None): super().__init__() self.blockatt = blockatt self.blockatt_memory = blockatt_memory self.embed_dim = args.encoder_embed_dim self.quant_noise = getattr(args, "quant_noise_pq", 0) self.quant_noise_block_size = getattr(args, "quant_noise_pq_block_size", 8) self.use_nfm = use_nfm print('using nfm?', self.use_nfm) self.nb = nb self.norm_blocks = self.nb self.self_attn = self.build_self_attention(self.embed_dim, args) #should divide embed_dim by nb. Then raise embed_dim in args self.self_attn_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) self.dropout_module = FairseqDropout(args.dropout, module_name=self.__class__.__name__) self.activation_fn = utils.get_activation_fn( activation=getattr(args, "activation_fn", "relu") ) print("SETUP TRANSFORMER LAYER", 'blocks', self.nb) activation_dropout_p = getattr(args, "activation_dropout", 0) if activation_dropout_p == 0: # for backwards compatibility with models that use args.relu_dropout activation_dropout_p = getattr(args, "relu_dropout", 0) self.activation_dropout_module = FairseqDropout( float(activation_dropout_p), module_name=self.__class__.__name__ ) self.normalize_before = args.encoder_normalize_before self.fc1 = self.build_fc1( self.embed_dim, args.encoder_ffn_embed_dim, self.quant_noise, self.quant_noise_block_size ) self.fc2 = self.build_fc2( args.encoder_ffn_embed_dim, self.embed_dim, self.quant_noise, self.quant_noise_block_size ) self.final_layer_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) if self.blockatt: self.comm = Attention(args.encoder_attention_heads, self.nb, self.embed_dim, self.use_nfm) self.comm_norm = NormLayer(self.norm_blocks, self.embed_dim // self.norm_blocks) if self.blockatt_memory: memory_slots = 4 memory_head_size = 128 memory_num_heads = 1 gate_style = 'memory' print('not using special key size gate_style is', gate_style, memory_slots, memory_num_heads, memory_head_size) self.memory_layer = RelationalMemory(mem_slots=memory_slots, head_size=memory_head_size, input_size=self.embed_dim, output_size=self.embed_dim, num_heads=memory_num_heads, num_blocks=1, forget_bias=1., input_bias=0., attention_mlp_layers=5, gate_style=gate_style) #self.n_blocks_val * self.block_dim_val #self.block_dim_val = dim_val // self.n_blocks_val
self.memory_attention = MemoryAttention(n_blocks_query=self.nb, n_blocks_val=8, dim_query=self.embed_dim, dim_val=memory_head_size*memory_num_heads*memory_slots)
4
2023-12-15 13:13:01+00:00
24k
camenduru/FreeInit-hf
app.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/models/unet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n use_inflated_groupnorm=False,\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n \n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n use_inflated_groupnorm=use_inflated_groupnorm,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if use_inflated_groupnorm:\n self.conv_norm_out = InflatedGroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n else:\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "AnimationFreeInitPipeline", "path": "animatediff/pipelines/pipeline_animation.py", "snippet": "class AnimationFreeInitPipeline(AnimationPipeline):\n _optional_components = []\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet3DConditionModel,\n scheduler: Union[\n DDIMScheduler,\n PNDMScheduler,\n LMSDiscreteScheduler,\n EulerDiscreteScheduler,\n EulerAncestralDiscreteScheduler,\n DPMSolverMultistepScheduler,\n ],\n ):\n super().__init__(vae, text_encoder, tokenizer, unet, scheduler)\n self.freq_filter = None\n\n \n @torch.no_grad()\n def init_filter(self, video_length, height, width, filter_params):\n # initialize frequency filter for noise reinitialization\n batch_size = 1\n num_channels_latents = self.unet.in_channels\n filter_shape = [\n batch_size, \n num_channels_latents, \n video_length, \n height // self.vae_scale_factor, \n width // self.vae_scale_factor\n ]\n # self.freq_filter = get_freq_filter(filter_shape, device=self._execution_device, params=filter_params)\n self.freq_filter = get_freq_filter(\n filter_shape, \n device=self._execution_device, \n filter_type=filter_params.method,\n n=filter_params.n,\n d_s=filter_params.d_s,\n d_t=filter_params.d_t\n )\n\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]],\n video_length: Optional[int],\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_videos_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"tensor\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: Optional[int] = 1,\n # freeinit args\n num_iters: int = 5,\n use_fast_sampling: bool = False,\n save_intermediate: bool = False,\n return_orig: bool = False,\n save_dir: str = None,\n save_name: str = None,\n use_fp16: bool = False,\n **kwargs\n ):\n if use_fp16:\n print('Warning: using half percision for inferencing!')\n self.vae.to(dtype=torch.float16)\n self.unet.to(dtype=torch.float16)\n self.text_encoder.to(dtype=torch.float16)\n # Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # Check inputs. Raise error if not correct\n # import pdb\n # pdb.set_trace()\n self.check_inputs(prompt, height, width, callback_steps)\n\n # Define call parameters\n # batch_size = 1 if isinstance(prompt, str) else len(prompt)\n batch_size = 1\n if latents is not None:\n batch_size = latents.shape[0]\n if isinstance(prompt, list):\n batch_size = len(prompt)\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # Encode input prompt\n prompt = prompt if isinstance(prompt, list) else [prompt] * batch_size\n if negative_prompt is not None:\n negative_prompt = negative_prompt if isinstance(negative_prompt, list) else [negative_prompt] * batch_size \n text_embeddings = self._encode_prompt(\n prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt\n )\n\n # Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # Prepare latent variables\n num_channels_latents = self.unet.in_channels\n latents = self.prepare_latents(\n batch_size * num_videos_per_prompt,\n num_channels_latents,\n video_length,\n height,\n width,\n text_embeddings.dtype,\n device,\n generator,\n latents,\n )\n latents_dtype = latents.dtype\n\n # Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # Sampling with FreeInit.\n for iter in range(num_iters):\n # FreeInit ------------------------------------------------------------------\n if iter == 0:\n initial_noise = latents.detach().clone()\n else:\n # 1. DDPM Forward with initial noise, get noisy latents z_T\n # if use_fast_sampling:\n # current_diffuse_timestep = self.scheduler.config.num_train_timesteps / num_iters * (iter + 1) - 1\n # else:\n # current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1\n current_diffuse_timestep = self.scheduler.config.num_train_timesteps - 1 # diffuse to t=999 noise level\n diffuse_timesteps = torch.full((batch_size,),int(current_diffuse_timestep))\n diffuse_timesteps = diffuse_timesteps.long()\n z_T = self.scheduler.add_noise(\n original_samples=latents.to(device), \n noise=initial_noise.to(device), \n timesteps=diffuse_timesteps.to(device)\n )\n # 2. create random noise z_rand for high-frequency\n z_rand = torch.randn((batch_size * num_videos_per_prompt, num_channels_latents, video_length, height // self.vae_scale_factor, width // self.vae_scale_factor), device=device)\n # 3. Roise Reinitialization\n latents = freq_mix_3d(z_T.to(dtype=torch.float32), z_rand, LPF=self.freq_filter)\n latents = latents.to(latents_dtype)\n \n # Coarse-to-Fine Sampling for Fast Inference (can lead to sub-optimal results)\n if use_fast_sampling:\n current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1))\n self.scheduler.set_timesteps(current_num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n # --------------------------------------------------------------------------\n\n # Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n # if use_fast_sampling:\n # # Coarse-to-Fine Sampling for Fast Inference\n # current_num_inference_steps= int(num_inference_steps / num_iters * (iter + 1))\n # current_timesteps = timesteps[:current_num_inference_steps]\n # else:\n current_timesteps = timesteps\n for i, t in enumerate(current_timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # predict the noise residual\n noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample.to(dtype=latents_dtype)\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # call the callback, if provided\n if i == len(current_timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n \n # save intermediate results\n if save_intermediate:\n # Post-processing\n video = self.decode_latents(latents)\n video = torch.from_numpy(video)\n os.makedirs(save_dir, exist_ok=True)\n save_videos_grid(video, f\"{save_dir}/{save_name}_iter{iter}.gif\")\n \n if return_orig and iter==0:\n orig_video = self.decode_latents(latents)\n orig_video = torch.from_numpy(orig_video)\n\n # Post-processing\n video = self.decode_latents(latents)\n\n # Convert to tensor\n if output_type == \"tensor\":\n video = torch.from_numpy(video)\n\n if not return_dict:\n return video\n\n if return_orig:\n return AnimationFreeInitPipelineOutput(videos=video, orig_videos=orig_video)\n\n return AnimationFreeInitPipelineOutput(videos=video)" }, { "identifier": "save_videos_grid", "path": "animatediff/utils/util.py", "snippet": "def save_videos_grid(videos: torch.Tensor, path: str, rescale=False, n_rows=6, fps=8):\n videos = rearrange(videos, \"b c t h w -> t b c h w\")\n outputs = []\n for x in videos:\n x = torchvision.utils.make_grid(x, nrow=n_rows)\n x = x.transpose(0, 1).transpose(1, 2).squeeze(-1)\n if rescale:\n x = (x + 1.0) / 2.0 # -1,1 -> 0,1\n x = (x * 255).numpy().astype(np.uint8)\n outputs.append(x)\n\n os.makedirs(os.path.dirname(path), exist_ok=True)\n imageio.mimsave(path, outputs, fps=fps)" }, { "identifier": "convert_ldm_unet_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_unet_checkpoint(checkpoint, config, path=None, extract_ema=False, controlnet=False):\n \"\"\"\n Takes a state dict and a config, and returns a converted checkpoint.\n \"\"\"\n\n # extract state_dict for UNet\n unet_state_dict = {}\n keys = list(checkpoint.keys())\n\n if controlnet:\n unet_key = \"control_model.\"\n else:\n unet_key = \"model.diffusion_model.\"\n\n # at least a 100 parameters have to start with `model_ema` in order for the checkpoint to be EMA\n if sum(k.startswith(\"model_ema\") for k in keys) > 100 and extract_ema:\n print(f\"Checkpoint {path} has both EMA and non-EMA weights.\")\n print(\n \"In this conversion only the EMA weights are extracted. If you want to instead extract the non-EMA\"\n \" weights (useful to continue fine-tuning), please make sure to remove the `--extract_ema` flag.\"\n )\n for key in keys:\n if key.startswith(\"model.diffusion_model\"):\n flat_ema_key = \"model_ema.\" + \"\".join(key.split(\".\")[1:])\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(flat_ema_key)\n else:\n if sum(k.startswith(\"model_ema\") for k in keys) > 100:\n print(\n \"In this conversion only the non-EMA weights are extracted. If you want to instead extract the EMA\"\n \" weights (usually better for inference), please make sure to add the `--extract_ema` flag.\"\n )\n\n for key in keys:\n if key.startswith(unet_key):\n unet_state_dict[key.replace(unet_key, \"\")] = checkpoint.pop(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"time_embedding.linear_1.weight\"] = unet_state_dict[\"time_embed.0.weight\"]\n new_checkpoint[\"time_embedding.linear_1.bias\"] = unet_state_dict[\"time_embed.0.bias\"]\n new_checkpoint[\"time_embedding.linear_2.weight\"] = unet_state_dict[\"time_embed.2.weight\"]\n new_checkpoint[\"time_embedding.linear_2.bias\"] = unet_state_dict[\"time_embed.2.bias\"]\n\n if config[\"class_embed_type\"] is None:\n # No parameters to port\n ...\n elif config[\"class_embed_type\"] == \"timestep\" or config[\"class_embed_type\"] == \"projection\":\n new_checkpoint[\"class_embedding.linear_1.weight\"] = unet_state_dict[\"label_emb.0.0.weight\"]\n new_checkpoint[\"class_embedding.linear_1.bias\"] = unet_state_dict[\"label_emb.0.0.bias\"]\n new_checkpoint[\"class_embedding.linear_2.weight\"] = unet_state_dict[\"label_emb.0.2.weight\"]\n new_checkpoint[\"class_embedding.linear_2.bias\"] = unet_state_dict[\"label_emb.0.2.bias\"]\n else:\n raise NotImplementedError(f\"Not implemented `class_embed_type`: {config['class_embed_type']}\")\n\n new_checkpoint[\"conv_in.weight\"] = unet_state_dict[\"input_blocks.0.0.weight\"]\n new_checkpoint[\"conv_in.bias\"] = unet_state_dict[\"input_blocks.0.0.bias\"]\n\n if not controlnet:\n new_checkpoint[\"conv_norm_out.weight\"] = unet_state_dict[\"out.0.weight\"]\n new_checkpoint[\"conv_norm_out.bias\"] = unet_state_dict[\"out.0.bias\"]\n new_checkpoint[\"conv_out.weight\"] = unet_state_dict[\"out.2.weight\"]\n new_checkpoint[\"conv_out.bias\"] = unet_state_dict[\"out.2.bias\"]\n\n # Retrieves the keys for the input blocks only\n num_input_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"input_blocks\" in layer})\n input_blocks = {\n layer_id: [key for key in unet_state_dict if f\"input_blocks.{layer_id}\" in key]\n for layer_id in range(num_input_blocks)\n }\n\n # Retrieves the keys for the middle blocks only\n num_middle_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"middle_block\" in layer})\n middle_blocks = {\n layer_id: [key for key in unet_state_dict if f\"middle_block.{layer_id}\" in key]\n for layer_id in range(num_middle_blocks)\n }\n\n # Retrieves the keys for the output blocks only\n num_output_blocks = len({\".\".join(layer.split(\".\")[:2]) for layer in unet_state_dict if \"output_blocks\" in layer})\n output_blocks = {\n layer_id: [key for key in unet_state_dict if f\"output_blocks.{layer_id}\" in key]\n for layer_id in range(num_output_blocks)\n }\n\n for i in range(1, num_input_blocks):\n block_id = (i - 1) // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = (i - 1) % (config[\"layers_per_block\"] + 1)\n\n resnets = [\n key for key in input_blocks[i] if f\"input_blocks.{i}.0\" in key and f\"input_blocks.{i}.0.op\" not in key\n ]\n attentions = [key for key in input_blocks[i] if f\"input_blocks.{i}.1\" in key]\n\n if f\"input_blocks.{i}.0.op.weight\" in unet_state_dict:\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.weight\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.weight\"\n )\n new_checkpoint[f\"down_blocks.{block_id}.downsamplers.0.conv.bias\"] = unet_state_dict.pop(\n f\"input_blocks.{i}.0.op.bias\"\n )\n\n paths = renew_resnet_paths(resnets)\n meta_path = {\"old\": f\"input_blocks.{i}.0\", \"new\": f\"down_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\"old\": f\"input_blocks.{i}.1\", \"new\": f\"down_blocks.{block_id}.attentions.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n resnet_0 = middle_blocks[0]\n attentions = middle_blocks[1]\n resnet_1 = middle_blocks[2]\n\n resnet_0_paths = renew_resnet_paths(resnet_0)\n assign_to_checkpoint(resnet_0_paths, new_checkpoint, unet_state_dict, config=config)\n\n resnet_1_paths = renew_resnet_paths(resnet_1)\n assign_to_checkpoint(resnet_1_paths, new_checkpoint, unet_state_dict, config=config)\n\n attentions_paths = renew_attention_paths(attentions)\n meta_path = {\"old\": \"middle_block.1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(\n attentions_paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n for i in range(num_output_blocks):\n block_id = i // (config[\"layers_per_block\"] + 1)\n layer_in_block_id = i % (config[\"layers_per_block\"] + 1)\n output_block_layers = [shave_segments(name, 2) for name in output_blocks[i]]\n output_block_list = {}\n\n for layer in output_block_layers:\n layer_id, layer_name = layer.split(\".\")[0], shave_segments(layer, 1)\n if layer_id in output_block_list:\n output_block_list[layer_id].append(layer_name)\n else:\n output_block_list[layer_id] = [layer_name]\n\n if len(output_block_list) > 1:\n resnets = [key for key in output_blocks[i] if f\"output_blocks.{i}.0\" in key]\n attentions = [key for key in output_blocks[i] if f\"output_blocks.{i}.1\" in key]\n\n resnet_0_paths = renew_resnet_paths(resnets)\n paths = renew_resnet_paths(resnets)\n\n meta_path = {\"old\": f\"output_blocks.{i}.0\", \"new\": f\"up_blocks.{block_id}.resnets.{layer_in_block_id}\"}\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n\n output_block_list = {k: sorted(v) for k, v in output_block_list.items()}\n if [\"conv.bias\", \"conv.weight\"] in output_block_list.values():\n index = list(output_block_list.values()).index([\"conv.bias\", \"conv.weight\"])\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.weight\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.weight\"\n ]\n new_checkpoint[f\"up_blocks.{block_id}.upsamplers.0.conv.bias\"] = unet_state_dict[\n f\"output_blocks.{i}.{index}.conv.bias\"\n ]\n\n # Clear attentions as they have been attributed above.\n if len(attentions) == 2:\n attentions = []\n\n if len(attentions):\n paths = renew_attention_paths(attentions)\n meta_path = {\n \"old\": f\"output_blocks.{i}.1\",\n \"new\": f\"up_blocks.{block_id}.attentions.{layer_in_block_id}\",\n }\n assign_to_checkpoint(\n paths, new_checkpoint, unet_state_dict, additional_replacements=[meta_path], config=config\n )\n else:\n resnet_0_paths = renew_resnet_paths(output_block_layers, n_shave_prefix_segments=1)\n for path in resnet_0_paths:\n old_path = \".\".join([\"output_blocks\", str(i), path[\"old\"]])\n new_path = \".\".join([\"up_blocks\", str(block_id), \"resnets\", str(layer_in_block_id), path[\"new\"]])\n\n new_checkpoint[new_path] = unet_state_dict[old_path]\n\n if controlnet:\n # conditioning embedding\n\n orig_index = 0\n\n new_checkpoint[\"controlnet_cond_embedding.conv_in.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_in.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n orig_index += 2\n\n diffusers_index = 0\n\n while diffusers_index < 6:\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[f\"controlnet_cond_embedding.blocks.{diffusers_index}.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n diffusers_index += 1\n orig_index += 2\n\n new_checkpoint[\"controlnet_cond_embedding.conv_out.weight\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.weight\"\n )\n new_checkpoint[\"controlnet_cond_embedding.conv_out.bias\"] = unet_state_dict.pop(\n f\"input_hint_block.{orig_index}.bias\"\n )\n\n # down blocks\n for i in range(num_input_blocks):\n new_checkpoint[f\"controlnet_down_blocks.{i}.weight\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.weight\")\n new_checkpoint[f\"controlnet_down_blocks.{i}.bias\"] = unet_state_dict.pop(f\"zero_convs.{i}.0.bias\")\n\n # mid block\n new_checkpoint[\"controlnet_mid_block.weight\"] = unet_state_dict.pop(\"middle_block_out.0.weight\")\n new_checkpoint[\"controlnet_mid_block.bias\"] = unet_state_dict.pop(\"middle_block_out.0.bias\")\n\n return new_checkpoint" }, { "identifier": "convert_ldm_clip_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_clip_checkpoint(checkpoint):\n text_model = CLIPTextModel.from_pretrained(\"openai/clip-vit-large-patch14\")\n keys = list(checkpoint.keys())\n\n text_model_dict = {}\n\n for key in keys:\n if key.startswith(\"cond_stage_model.transformer\"):\n text_model_dict[key[len(\"cond_stage_model.transformer.\") :]] = checkpoint[key]\n\n text_model.load_state_dict(text_model_dict)\n\n return text_model" }, { "identifier": "convert_ldm_vae_checkpoint", "path": "animatediff/utils/convert_from_ckpt.py", "snippet": "def convert_ldm_vae_checkpoint(checkpoint, config):\n # extract state dict for VAE\n vae_state_dict = {}\n vae_key = \"first_stage_model.\"\n keys = list(checkpoint.keys())\n for key in keys:\n if key.startswith(vae_key):\n vae_state_dict[key.replace(vae_key, \"\")] = checkpoint.get(key)\n\n new_checkpoint = {}\n\n new_checkpoint[\"encoder.conv_in.weight\"] = vae_state_dict[\"encoder.conv_in.weight\"]\n new_checkpoint[\"encoder.conv_in.bias\"] = vae_state_dict[\"encoder.conv_in.bias\"]\n new_checkpoint[\"encoder.conv_out.weight\"] = vae_state_dict[\"encoder.conv_out.weight\"]\n new_checkpoint[\"encoder.conv_out.bias\"] = vae_state_dict[\"encoder.conv_out.bias\"]\n new_checkpoint[\"encoder.conv_norm_out.weight\"] = vae_state_dict[\"encoder.norm_out.weight\"]\n new_checkpoint[\"encoder.conv_norm_out.bias\"] = vae_state_dict[\"encoder.norm_out.bias\"]\n\n new_checkpoint[\"decoder.conv_in.weight\"] = vae_state_dict[\"decoder.conv_in.weight\"]\n new_checkpoint[\"decoder.conv_in.bias\"] = vae_state_dict[\"decoder.conv_in.bias\"]\n new_checkpoint[\"decoder.conv_out.weight\"] = vae_state_dict[\"decoder.conv_out.weight\"]\n new_checkpoint[\"decoder.conv_out.bias\"] = vae_state_dict[\"decoder.conv_out.bias\"]\n new_checkpoint[\"decoder.conv_norm_out.weight\"] = vae_state_dict[\"decoder.norm_out.weight\"]\n new_checkpoint[\"decoder.conv_norm_out.bias\"] = vae_state_dict[\"decoder.norm_out.bias\"]\n\n new_checkpoint[\"quant_conv.weight\"] = vae_state_dict[\"quant_conv.weight\"]\n new_checkpoint[\"quant_conv.bias\"] = vae_state_dict[\"quant_conv.bias\"]\n new_checkpoint[\"post_quant_conv.weight\"] = vae_state_dict[\"post_quant_conv.weight\"]\n new_checkpoint[\"post_quant_conv.bias\"] = vae_state_dict[\"post_quant_conv.bias\"]\n\n # Retrieves the keys for the encoder down blocks only\n num_down_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"encoder.down\" in layer})\n down_blocks = {\n layer_id: [key for key in vae_state_dict if f\"down.{layer_id}\" in key] for layer_id in range(num_down_blocks)\n }\n\n # Retrieves the keys for the decoder up blocks only\n num_up_blocks = len({\".\".join(layer.split(\".\")[:3]) for layer in vae_state_dict if \"decoder.up\" in layer})\n up_blocks = {\n layer_id: [key for key in vae_state_dict if f\"up.{layer_id}\" in key] for layer_id in range(num_up_blocks)\n }\n\n for i in range(num_down_blocks):\n resnets = [key for key in down_blocks[i] if f\"down.{i}\" in key and f\"down.{i}.downsample\" not in key]\n\n if f\"encoder.down.{i}.downsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.weight\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.weight\"\n )\n new_checkpoint[f\"encoder.down_blocks.{i}.downsamplers.0.conv.bias\"] = vae_state_dict.pop(\n f\"encoder.down.{i}.downsample.conv.bias\"\n )\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"down.{i}.block\", \"new\": f\"down_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"encoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"encoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"encoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n\n for i in range(num_up_blocks):\n block_id = num_up_blocks - 1 - i\n resnets = [\n key for key in up_blocks[block_id] if f\"up.{block_id}\" in key and f\"up.{block_id}.upsample\" not in key\n ]\n\n if f\"decoder.up.{block_id}.upsample.conv.weight\" in vae_state_dict:\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.weight\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.weight\"\n ]\n new_checkpoint[f\"decoder.up_blocks.{i}.upsamplers.0.conv.bias\"] = vae_state_dict[\n f\"decoder.up.{block_id}.upsample.conv.bias\"\n ]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"up.{block_id}.block\", \"new\": f\"up_blocks.{i}.resnets\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_resnets = [key for key in vae_state_dict if \"decoder.mid.block\" in key]\n num_mid_res_blocks = 2\n for i in range(1, num_mid_res_blocks + 1):\n resnets = [key for key in mid_resnets if f\"decoder.mid.block_{i}\" in key]\n\n paths = renew_vae_resnet_paths(resnets)\n meta_path = {\"old\": f\"mid.block_{i}\", \"new\": f\"mid_block.resnets.{i - 1}\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n\n mid_attentions = [key for key in vae_state_dict if \"decoder.mid.attn\" in key]\n paths = renew_vae_attention_paths(mid_attentions)\n meta_path = {\"old\": \"mid.attn_1\", \"new\": \"mid_block.attentions.0\"}\n assign_to_checkpoint(paths, new_checkpoint, vae_state_dict, additional_replacements=[meta_path], config=config)\n conv_attn_to_linear(new_checkpoint)\n return new_checkpoint" }, { "identifier": "get_freq_filter", "path": "animatediff/utils/freeinit_utils.py", "snippet": "def get_freq_filter(shape, device, filter_type, n, d_s, d_t):\n \"\"\"\n Form the frequency filter for noise reinitialization.\n\n Args:\n shape: shape of latent (B, C, T, H, W)\n filter_type: type of the freq filter\n n: (only for butterworth) order of the filter, larger n ~ ideal, smaller n ~ gaussian\n d_s: normalized stop frequency for spatial dimensions (0.0-1.0)\n d_t: normalized stop frequency for temporal dimension (0.0-1.0)\n \"\"\"\n if filter_type == \"gaussian\":\n return gaussian_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"ideal\":\n return ideal_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"box\":\n return box_low_pass_filter(shape=shape, d_s=d_s, d_t=d_t).to(device)\n elif filter_type == \"butterworth\":\n return butterworth_low_pass_filter(shape=shape, n=n, d_s=d_s, d_t=d_t).to(device)\n else:\n raise NotImplementedError" } ]
import os import torch import random import gradio as gr from glob import glob from omegaconf import OmegaConf from safetensors import safe_open from diffusers import AutoencoderKL from diffusers import EulerDiscreteScheduler, DDIMScheduler from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer from animatediff.models.unet import UNet3DConditionModel from animatediff.pipelines.pipeline_animation import AnimationFreeInitPipeline from animatediff.utils.util import save_videos_grid from animatediff.utils.convert_from_ckpt import convert_ldm_unet_checkpoint, convert_ldm_clip_checkpoint, convert_ldm_vae_checkpoint from diffusers.training_utils import set_seed from animatediff.utils.freeinit_utils import get_freq_filter from collections import namedtuple
14,510
# ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda() self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda() self.freq_filter = None self.update_base_model(self.base_model_list[-2]) self.update_motion_module(self.motion_module_list[0]) self.update_filter(512, 512, self.filter_type_list[0], 0.25, 0.25) def refresh_motion_module(self): motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt")) self.motion_module_list = sorted([os.path.basename(p) for p in motion_module_list]) def refresh_personalized_model(self): base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors")) self.base_model_list = sorted([os.path.basename(p) for p in base_model_list]) def update_base_model(self, base_model_dropdown): self.selected_base_model = base_model_dropdown base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config) self.vae.load_state_dict(converted_vae_checkpoint) converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config) self.unet.load_state_dict(converted_unet_checkpoint, strict=False) self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict) return gr.Dropdown.update() def update_motion_module(self, motion_module_dropdown): self.selected_motion_module = motion_module_dropdown motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown) motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu") _, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False) assert len(unexpected) == 0 return gr.Dropdown.update() # def update_filter(self, shape, method, n, d_s, d_t): def update_filter(self, width_slider, height_slider, filter_type_dropdown, d_s_slider, d_t_slider): self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s_slider self.set_d_t = d_t_slider vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) shape = [1, 4, 16, self.set_width//vae_scale_factor, self.set_height//vae_scale_factor]
pretrained_model_path = "models/StableDiffusion/stable-diffusion-v1-5" inference_config_path = "configs/inference/inference-v1.yaml" css = """ .toolbutton { margin-buttom: 0em 0em 0em 0em; max-width: 2.5em; min-width: 2.5em !important; height: 2.5em; } """ examples = [ # 0-RealisticVision [ "realisticVisionV51_v20Novae.safetensors", "mm_sd_v14.ckpt", "A panda standing on a surfboard in the ocean under moonlight.", "worst quality, low quality, nsfw, logo", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 1-ToonYou [ "toonyou_beta3.safetensors", "mm_sd_v14.ckpt", "(best quality, masterpiece), 1girl, looking at viewer, blurry background, upper body, contemporary, dress", "(worst quality, low quality)", 512, 512, "478028150728261", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 2-Lyriel [ "lyriel_v16.safetensors", "mm_sd_v14.ckpt", "hypercars cyberpunk moving, muted colors, swirling color smokes, legend, cityscape, space", "3d, cartoon, anime, sketches, worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 3-RCNZ [ "rcnzCartoon3d_v10.safetensors", "mm_sd_v14.ckpt", "A cute raccoon playing guitar in a boat on the ocean", "worst quality, low quality, nsfw, logo", 512, 512, "1566149281915957", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # 4-MajicMix [ "majicmixRealistic_v5Preview.safetensors", "mm_sd_v14.ckpt", "1girl, reading book", "(ng_deepnegative_v1_75t:1.2), (badhandv4:1), (worst quality:2), (low quality:2), (normal quality:2), lowres, bad anatomy, bad hands, watermark, moles", 512, 512, "2005563494988190", "butterworth", 0.25, 0.25, 3, ["use_fp16"] ], # # 5-RealisticVision # [ # "realisticVisionV51_v20Novae.safetensors", # "mm_sd_v14.ckpt", # "A panda standing on a surfboard in the ocean in sunset.", # "worst quality, low quality, nsfw, logo", # 512, 512, "2005563494988190", # "butterworth", 0.25, 0.25, 3, # ["use_fp16"] # ] ] # clean unrelated ckpts # ckpts = [ # "realisticVisionV40_v20Novae.safetensors", # "majicmixRealistic_v5Preview.safetensors", # "rcnzCartoon3d_v10.safetensors", # "lyriel_v16.safetensors", # "toonyou_beta3.safetensors" # ] # for path in glob(os.path.join("models", "DreamBooth_LoRA", "*.safetensors")): # for ckpt in ckpts: # if path.endswith(ckpt): break # else: # print(f"### Cleaning {path} ...") # os.system(f"rm -rf {path}") # os.system(f"rm -rf {os.path.join('models', 'DreamBooth_LoRA', '*.safetensors')}") # os.system(f"bash download_bashscripts/1-ToonYou.sh") # os.system(f"bash download_bashscripts/2-Lyriel.sh") # os.system(f"bash download_bashscripts/3-RcnzCartoon.sh") # os.system(f"bash download_bashscripts/4-MajicMix.sh") # os.system(f"bash download_bashscripts/5-RealisticVision.sh") # # clean Gradio cache # print(f"### Cleaning cached examples ...") # os.system(f"rm -rf gradio_cached_examples/") class AnimateController: def __init__(self): # config dirs self.basedir = os.getcwd() self.stable_diffusion_dir = os.path.join(self.basedir, "models", "StableDiffusion") self.motion_module_dir = os.path.join(self.basedir, "models", "Motion_Module") self.personalized_model_dir = os.path.join(self.basedir, "models", "DreamBooth_LoRA") self.savedir = os.path.join(self.basedir, "samples") os.makedirs(self.savedir, exist_ok=True) self.base_model_list = [] self.motion_module_list = [] self.filter_type_list = [ "butterworth", "gaussian", "box", "ideal" ] self.selected_base_model = None self.selected_motion_module = None self.selected_filter_type = None self.set_width = None self.set_height = None self.set_d_s = None self.set_d_t = None self.refresh_motion_module() self.refresh_personalized_model() # config models self.inference_config = OmegaConf.load(inference_config_path) self.tokenizer = CLIPTokenizer.from_pretrained(pretrained_model_path, subfolder="tokenizer") self.text_encoder = CLIPTextModel.from_pretrained(pretrained_model_path, subfolder="text_encoder").cuda() self.vae = AutoencoderKL.from_pretrained(pretrained_model_path, subfolder="vae").cuda() self.unet = UNet3DConditionModel.from_pretrained_2d(pretrained_model_path, subfolder="unet", unet_additional_kwargs=OmegaConf.to_container(self.inference_config.unet_additional_kwargs)).cuda() self.freq_filter = None self.update_base_model(self.base_model_list[-2]) self.update_motion_module(self.motion_module_list[0]) self.update_filter(512, 512, self.filter_type_list[0], 0.25, 0.25) def refresh_motion_module(self): motion_module_list = glob(os.path.join(self.motion_module_dir, "*.ckpt")) self.motion_module_list = sorted([os.path.basename(p) for p in motion_module_list]) def refresh_personalized_model(self): base_model_list = glob(os.path.join(self.personalized_model_dir, "*.safetensors")) self.base_model_list = sorted([os.path.basename(p) for p in base_model_list]) def update_base_model(self, base_model_dropdown): self.selected_base_model = base_model_dropdown base_model_dropdown = os.path.join(self.personalized_model_dir, base_model_dropdown) base_model_state_dict = {} with safe_open(base_model_dropdown, framework="pt", device="cpu") as f: for key in f.keys(): base_model_state_dict[key] = f.get_tensor(key) converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_model_state_dict, self.vae.config) self.vae.load_state_dict(converted_vae_checkpoint) converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_model_state_dict, self.unet.config) self.unet.load_state_dict(converted_unet_checkpoint, strict=False) self.text_encoder = convert_ldm_clip_checkpoint(base_model_state_dict) return gr.Dropdown.update() def update_motion_module(self, motion_module_dropdown): self.selected_motion_module = motion_module_dropdown motion_module_dropdown = os.path.join(self.motion_module_dir, motion_module_dropdown) motion_module_state_dict = torch.load(motion_module_dropdown, map_location="cpu") _, unexpected = self.unet.load_state_dict(motion_module_state_dict, strict=False) assert len(unexpected) == 0 return gr.Dropdown.update() # def update_filter(self, shape, method, n, d_s, d_t): def update_filter(self, width_slider, height_slider, filter_type_dropdown, d_s_slider, d_t_slider): self.set_width = width_slider self.set_height = height_slider self.selected_filter_type = filter_type_dropdown self.set_d_s = d_s_slider self.set_d_t = d_t_slider vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) shape = [1, 4, 16, self.set_width//vae_scale_factor, self.set_height//vae_scale_factor]
self.freq_filter = get_freq_filter(
6
2023-12-19 21:06:32+00:00
24k
m-abr/FCPCodebase
world/Robot.py
[ { "identifier": "Math_Ops", "path": "math_ops/Math_Ops.py", "snippet": "class Math_Ops():\n '''\n This class provides general mathematical operations that are not directly available through numpy \n '''\n \n @staticmethod\n def deg_sph2cart(spherical_vec):\n ''' Converts SimSpark's spherical coordinates in degrees to cartesian coordinates '''\n r = spherical_vec[0]\n h = spherical_vec[1] * pi / 180\n v = spherical_vec[2] * pi / 180\n return np.array([r * cos(v) * cos(h), r * cos(v) * sin(h), r * sin(v)])\n\n @staticmethod\n def deg_sin(deg_angle):\n ''' Returns sin of degrees '''\n return sin(deg_angle * pi / 180)\n\n @staticmethod\n def deg_cos(deg_angle):\n ''' Returns cos of degrees '''\n return cos(deg_angle * pi / 180)\n\n @staticmethod\n def to_3d(vec_2d, value=0) -> np.ndarray:\n ''' Returns new 3d vector from 2d vector '''\n return np.append(vec_2d,value)\n\n @staticmethod\n def to_2d_as_3d(vec_3d) -> np.ndarray:\n ''' Returns new 3d vector where the 3rd dimension is zero '''\n vec_2d_as_3d = np.copy(vec_3d)\n vec_2d_as_3d[2] = 0\n return vec_2d_as_3d\n\n @staticmethod\n def normalize_vec(vec) -> np.ndarray:\n ''' Divides vector by its length '''\n size = np.linalg.norm(vec)\n if size == 0: return vec\n return vec / size\n\n @staticmethod\n def get_active_directory(dir:str) -> str:\n global GLOBAL_DIR\n return GLOBAL_DIR + dir\n\n @staticmethod\n def acos(val):\n ''' arccosine function that limits input '''\n return acos( np.clip(val,-1,1) )\n \n @staticmethod\n def asin(val):\n ''' arcsine function that limits input '''\n return asin( np.clip(val,-1,1) )\n\n @staticmethod\n def normalize_deg(val):\n ''' normalize val in range [-180,180[ '''\n return (val + 180.0) % 360 - 180\n\n @staticmethod\n def normalize_rad(val):\n ''' normalize val in range [-pi,pi[ '''\n return (val + pi) % (2*pi) - pi\n\n @staticmethod\n def deg_to_rad(val):\n ''' convert degrees to radians '''\n return val * 0.01745329251994330\n\n @staticmethod\n def rad_to_deg(val):\n ''' convert radians to degrees '''\n return val * 57.29577951308232\n\n @staticmethod\n def vector_angle(vector, is_rad=False):\n ''' angle (degrees or radians) of 2D vector '''\n if is_rad:\n return atan2(vector[1], vector[0])\n else:\n return atan2(vector[1], vector[0]) * 180 / pi\n\n @staticmethod\n def vectors_angle(vec1, vec2, is_rad=False):\n ''' get angle between vectors (degrees or radians) '''\n ang_rad = acos(np.dot(Math_Ops.normalize_vec(vec1),Math_Ops.normalize_vec(vec2)))\n return ang_rad if is_rad else ang_rad * 180 / pi\n\n @staticmethod\n def vector_from_angle(angle, is_rad=False):\n ''' unit vector with direction given by `angle` '''\n if is_rad:\n return np.array([cos(angle), sin(angle)], float)\n else:\n return np.array([Math_Ops.deg_cos(angle), Math_Ops.deg_sin(angle)], float)\n\n @staticmethod\n def target_abs_angle(pos2d, target, is_rad=False):\n ''' angle (degrees or radians) of vector (target-pos2d) '''\n if is_rad:\n return atan2(target[1]-pos2d[1], target[0]-pos2d[0])\n else:\n return atan2(target[1]-pos2d[1], target[0]-pos2d[0]) * 180 / pi\n\n @staticmethod\n def target_rel_angle(pos2d, ori, target, is_rad=False):\n ''' relative angle (degrees or radians) of target if we're located at 'pos2d' with orientation 'ori' (degrees or radians) '''\n if is_rad:\n return Math_Ops.normalize_rad( atan2(target[1]-pos2d[1], target[0]-pos2d[0]) - ori )\n else:\n return Math_Ops.normalize_deg( atan2(target[1]-pos2d[1], target[0]-pos2d[0]) * 180 / pi - ori )\n\n @staticmethod\n def rotate_2d_vec(vec, angle, is_rad=False):\n ''' rotate 2D vector anticlockwise around the origin by `angle` '''\n cos_ang = cos(angle) if is_rad else cos(angle * pi / 180)\n sin_ang = sin(angle) if is_rad else sin(angle * pi / 180)\n return np.array([cos_ang*vec[0]-sin_ang*vec[1], sin_ang*vec[0]+cos_ang*vec[1]])\n\n @staticmethod\n def distance_point_to_line(p:np.ndarray, a:np.ndarray, b:np.ndarray):\n ''' \n Distance between point p and 2d line 'ab' (and side where p is)\n\n Parameters\n ----------\n a : ndarray\n 2D point that defines line\n b : ndarray\n 2D point that defines line\n p : ndarray\n 2D point\n\n Returns\n -------\n distance : float\n distance between line and point\n side : str\n if we are at a, looking at b, p may be at our \"left\" or \"right\"\n '''\n line_len = np.linalg.norm(b-a)\n\n if line_len == 0: # assumes vertical line\n dist = sdist = np.linalg.norm(p-a)\n else:\n sdist = np.cross(b-a,p-a)/line_len\n dist = abs(sdist)\n\n return dist, \"left\" if sdist>0 else \"right\"\n\n @staticmethod\n def distance_point_to_segment(p:np.ndarray, a:np.ndarray, b:np.ndarray):\n ''' Distance from point p to 2d line segment 'ab' '''\n \n ap = p-a\n ab = b-a\n\n ad = Math_Ops.vector_projection(ap,ab)\n\n # Is d in ab? We can find k in (ad = k * ab) without computing any norm\n # we use the largest dimension of ab to avoid division by 0\n k = ad[0]/ab[0] if abs(ab[0])>abs(ab[1]) else ad[1]/ab[1]\n\n if k <= 0: return np.linalg.norm(ap)\n elif k >= 1: return np.linalg.norm(p-b)\n else: return np.linalg.norm(p-(ad + a)) # p-d\n\n @staticmethod\n def distance_point_to_ray(p:np.ndarray, ray_start:np.ndarray, ray_direction:np.ndarray):\n ''' Distance from point p to 2d ray '''\n \n rp = p-ray_start\n rd = Math_Ops.vector_projection(rp,ray_direction)\n\n # Is d in ray? We can find k in (rd = k * ray_direction) without computing any norm\n # we use the largest dimension of ray_direction to avoid division by 0\n k = rd[0]/ray_direction[0] if abs(ray_direction[0])>abs(ray_direction[1]) else rd[1]/ray_direction[1]\n\n if k <= 0: return np.linalg.norm(rp)\n else: return np.linalg.norm(p-(rd + ray_start)) # p-d\n\n @staticmethod\n def closest_point_on_ray_to_point(p:np.ndarray, ray_start:np.ndarray, ray_direction:np.ndarray):\n ''' Point on ray closest to point p '''\n \n rp = p-ray_start\n rd = Math_Ops.vector_projection(rp,ray_direction)\n\n # Is d in ray? We can find k in (rd = k * ray_direction) without computing any norm\n # we use the largest dimension of ray_direction to avoid division by 0\n k = rd[0]/ray_direction[0] if abs(ray_direction[0])>abs(ray_direction[1]) else rd[1]/ray_direction[1]\n\n if k <= 0: return ray_start\n else: return rd + ray_start\n\n @staticmethod\n def does_circle_intersect_segment(p:np.ndarray, r, a:np.ndarray, b:np.ndarray):\n ''' Returns true if circle (center p, radius r) intersect 2d line segment '''\n\n ap = p-a\n ab = b-a\n\n ad = Math_Ops.vector_projection(ap,ab)\n\n # Is d in ab? We can find k in (ad = k * ab) without computing any norm\n # we use the largest dimension of ab to avoid division by 0\n k = ad[0]/ab[0] if abs(ab[0])>abs(ab[1]) else ad[1]/ab[1]\n\n if k <= 0: return np.dot(ap,ap) <= r*r\n elif k >= 1: return np.dot(p-b,p-b) <= r*r\n \n dp = p-(ad + a)\n return np.dot(dp,dp) <= r*r\n\n @staticmethod\n def vector_projection(a:np.ndarray, b:np.ndarray):\n ''' Vector projection of a onto b '''\n b_dot = np.dot(b,b)\n return b * np.dot(a,b) / b_dot if b_dot != 0 else b\n\n @staticmethod\n def do_noncollinear_segments_intersect(a,b,c,d):\n ''' \n Check if 2d line segment 'ab' intersects with noncollinear 2d line segment 'cd' \n Explanation: https://www.geeksforgeeks.org/check-if-two-given-line-segments-intersect/ \n '''\n\n ccw = lambda a,b,c: (c[1]-a[1]) * (b[0]-a[0]) > (b[1]-a[1]) * (c[0]-a[0])\n return ccw(a,c,d) != ccw(b,c,d) and ccw(a,b,c) != ccw(a,b,d)\n\n @staticmethod\n def intersection_segment_opp_goal(a:np.ndarray, b:np.ndarray):\n ''' Computes the intersection point of 2d segment 'ab' and the opponents' goal (front line) '''\n vec_x = b[0]-a[0]\n\n # Collinear intersections are not accepted\n if vec_x == 0: return None\n \n k = (15.01-a[0])/vec_x\n\n # No collision\n if k < 0 or k > 1: return None\n\n intersection_pt = a + (b-a) * k\n\n if -1.01 <= intersection_pt[1] <= 1.01:\n return intersection_pt\n else:\n return None\n\n @staticmethod\n def intersection_circle_opp_goal(p:np.ndarray, r):\n ''' \n Computes the intersection segment of circle (center p, radius r) and the opponents' goal (front line)\n Only the y coordinates are returned since the x coordinates are always equal to 15\n '''\n\n x_dev = abs(15-p[0])\n\n if x_dev > r:\n return None # no intersection with x=15\n\n y_dev = sqrt(r*r - x_dev*x_dev)\n\n p1 = max(p[1] - y_dev, -1.01)\n p2 = min(p[1] + y_dev, 1.01)\n\n if p1 == p2:\n return p1 # return the y coordinate of a single intersection point\n elif p2 < p1:\n return None # no intersection\n else:\n return p1, p2 # return the y coordinates of the intersection segment\n\n\n @staticmethod\n def distance_point_to_opp_goal(p:np.ndarray):\n ''' Distance between point 'p' and the opponents' goal (front line) '''\n\n if p[1] < -1.01:\n return np.linalg.norm( p-(15,-1.01) )\n elif p[1] > 1.01:\n return np.linalg.norm( p-(15, 1.01) )\n else:\n return abs(15-p[0])\n\n\n @staticmethod\n def circle_line_segment_intersection(circle_center, circle_radius, pt1, pt2, full_line=True, tangent_tol=1e-9):\n \"\"\" Find the points at which a circle intersects a line-segment. This can happen at 0, 1, or 2 points.\n\n :param circle_center: The (x, y) location of the circle center\n :param circle_radius: The radius of the circle\n :param pt1: The (x, y) location of the first point of the segment\n :param pt2: The (x, y) location of the second point of the segment\n :param full_line: True to find intersections along full line - not just in the segment. False will just return intersections within the segment.\n :param tangent_tol: Numerical tolerance at which we decide the intersections are close enough to consider it a tangent\n :return Sequence[Tuple[float, float]]: A list of length 0, 1, or 2, where each element is a point at which the circle intercepts a line segment.\n\n Note: We follow: http://mathworld.wolfram.com/Circle-LineIntersection.html\n \"\"\"\n\n (p1x, p1y), (p2x, p2y), (cx, cy) = pt1, pt2, circle_center\n (x1, y1), (x2, y2) = (p1x - cx, p1y - cy), (p2x - cx, p2y - cy)\n dx, dy = (x2 - x1), (y2 - y1)\n dr = (dx ** 2 + dy ** 2)**.5\n big_d = x1 * y2 - x2 * y1\n discriminant = circle_radius ** 2 * dr ** 2 - big_d ** 2\n\n if discriminant < 0: # No intersection between circle and line\n return []\n else: # There may be 0, 1, or 2 intersections with the segment\n intersections = [\n (cx + (big_d * dy + sign * (-1 if dy < 0 else 1) * dx * discriminant**.5) / dr ** 2,\n cy + (-big_d * dx + sign * abs(dy) * discriminant**.5) / dr ** 2)\n for sign in ((1, -1) if dy < 0 else (-1, 1))] # This makes sure the order along the segment is correct\n if not full_line: # If only considering the segment, filter out intersections that do not fall within the segment\n fraction_along_segment = [\n (xi - p1x) / dx if abs(dx) > abs(dy) else (yi - p1y) / dy for xi, yi in intersections]\n intersections = [pt for pt, frac in zip(\n intersections, fraction_along_segment) if 0 <= frac <= 1]\n # If line is tangent to circle, return just one point (as both intersections have same location)\n if len(intersections) == 2 and abs(discriminant) <= tangent_tol:\n return [intersections[0]]\n else:\n return intersections\n\n\n\n\n # adapted from https://stackoverflow.com/questions/3252194/numpy-and-line-intersections\n @staticmethod\n def get_line_intersection(a1, a2, b1, b2):\n \"\"\" \n Returns the point of intersection of the lines passing through a2,a1 and b2,b1.\n a1: [x, y] a point on the first line\n a2: [x, y] another point on the first line\n b1: [x, y] a point on the second line\n b2: [x, y] another point on the second line\n \"\"\"\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return np.array([float('inf'), float('inf')])\n return np.array([x/z, y/z],float)" }, { "identifier": "Matrix_3x3", "path": "math_ops/Matrix_3x3.py", "snippet": "class Matrix_3x3():\n\n def __init__(self, matrix = None) -> None:\n '''\n Constructor examples:\n a = Matrix_3x3( ) # create identity matrix\n b = Matrix_3x3( [[1,1,1],[2,2,2],[3,3,3]] ) # manually initialize matrix\n c = Matrix_3x3( [1,1,1,2,2,2,3,3,3] ) # manually initialize matrix\n d = Matrix_3x3( b ) # copy constructor\n '''\n if matrix is None:\n self.m = np.identity(3)\n elif type(matrix) == Matrix_3x3: \n self.m = np.copy(matrix.m)\n else:\n self.m = np.asarray(matrix)\n self.m.shape = (3,3) #reshape if needed, throw error if impossible\n\n\n self.rotation_shortcuts={(1,0,0):self.rotate_x_rad, (-1, 0, 0):self._rotate_x_neg_rad,\n (0,1,0):self.rotate_y_rad, ( 0,-1, 0):self._rotate_y_neg_rad,\n (0,0,1):self.rotate_z_rad, ( 0, 0,-1):self._rotate_z_neg_rad}\n\n @classmethod\n def from_rotation_deg(cls, euler_vec):\n '''\n Create rotation matrix from Euler angles, in degrees.\n Rotation order: RotZ*RotY*RotX\n\n Parameters\n ----------\n euler_vec : array_like, length 3\n vector with Euler angles (x,y,z) aka (roll, pitch, yaw)\n\n Example\n ----------\n Matrix_3x3.from_rotation_deg((roll,pitch,yaw)) # Creates: RotZ(yaw)*RotY(pitch)*RotX(roll)\n '''\n mat = cls().rotate_z_deg(euler_vec[2], True).rotate_y_deg(euler_vec[1], True).rotate_x_deg(euler_vec[0], True)\n return mat\n\n def get_roll_deg(self):\n ''' Get angle around the x-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[2,1] == 0 and self.m[2,2] == 0: \n return 180\n return atan2(self.m[2,1], self.m[2,2]) * 180 / pi\n\n def get_pitch_deg(self):\n ''' Get angle around the y-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n return atan2(-self.m[2,0], sqrt(self.m[2,1]*self.m[2,1] + self.m[2,2]*self.m[2,2])) * 180 / pi\n\n def get_yaw_deg(self):\n ''' Get angle around the z-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[1,0] == 0 and self.m[0,0] == 0: \n return atan2(self.m[0,1], self.m[1,1]) * 180 / pi\n return atan2(self.m[1,0], self.m[0,0]) * 180 / pi\n\n def get_inclination_deg(self):\n ''' Get inclination of z-axis in relation to reference z-axis '''\n return 90 - (asin(self.m[2,2]) * 180 / pi)\n\n\n def rotate_deg(self, rotation_vec, rotation_deg, in_place=False):\n '''\n Rotates the current rotation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n return self.rotate_rad(rotation_vec, rotation_deg * (pi/180) , in_place)\n\n \n def rotate_rad(self, rotation_vec, rotation_rad, in_place=False):\n '''\n Rotates the current rotation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n\n if rotation_rad == 0: return\n\n shortcut = self.rotation_shortcuts.get(tuple(a for a in rotation_vec))\n if shortcut:\n return shortcut(rotation_rad, in_place)\n \n c = np.math.cos(rotation_rad)\n c1 = 1 - c\n s = np.math.sin(rotation_rad)\n x = rotation_vec[0]\n y = rotation_vec[1]\n z = rotation_vec[2]\n xxc1 = x * x * c1\n yyc1 = y * y * c1\n zzc1 = z * z * c1\n xyc1 = x * y * c1\n xzc1 = x * z * c1\n yzc1 = y * z * c1\n xs = x * s\n ys = y * s\n zs = z * s\n\n mat = np.array([\n [xxc1 + c, xyc1 - zs, xzc1 + ys],\n [xyc1 + zs, yyc1 + c, yzc1 - xs],\n [xzc1 - ys, yzc1 + xs, zzc1 + c]])\n\n return self.multiply(mat, in_place)\n\n\n def _rotate_x_neg_rad(self, rotation_rad, in_place=False):\n self.rotate_x_rad(-rotation_rad, in_place)\n\n def _rotate_y_neg_rad(self, rotation_rad, in_place=False):\n self.rotate_y_rad(-rotation_rad, in_place)\n\n def _rotate_z_neg_rad(self, rotation_rad, in_place=False):\n self.rotate_z_rad(-rotation_rad, in_place)\n\n def rotate_x_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current rotation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_3x3(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [1, 0, 0],\n [0, c,-s],\n [0, s, c]])\n\n return self.multiply(mat, in_place)\n\n def rotate_y_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current rotation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_3x3(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c, 0, s],\n [ 0, 1, 0],\n [-s, 0, c]])\n\n return self.multiply(mat, in_place)\n\n def rotate_z_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current rotation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_3x3(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c,-s, 0],\n [ s, c, 0],\n [ 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_x_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current rotation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n return self.rotate_x_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_y_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current rotation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n return self.rotate_y_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_z_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current rotation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n return self.rotate_z_rad(rotation_deg * (pi/180), in_place)\n\n def invert(self, in_place=False):\n '''\n Inverts the current rotation matrix\n\n Parameters\n ----------\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_3x3 \n self is returned if in_place is True\n '''\n\n if in_place:\n self.m = np.linalg.inv(self.m)\n return self\n else:\n return Matrix_3x3(np.linalg.inv(self.m))\n\n def multiply(self,mat, in_place=False, reverse_order=False):\n '''\n Multiplies the current rotation matrix by mat\n\n Parameters\n ----------\n mat : Matrix_3x3 or array_like\n multiplier matrix or 3D vector\n in_place: bool, optional\n - True: the internal matrix is changed in-place\n - False: a new matrix is returned and the current one is not changed (default) \n reverse_order: bool, optional\n - False: self * mat\n - True: mat * self\n \n Returns\n -------\n result : Matrix_3x3 | array_like\n Matrix_3x3 is returned if mat is a matrix (self is returned if in_place is True); \n a 3D vector is returned if mat is a vector\n '''\n # get array from matrix object or convert to numpy array (if needed) \n mat = mat.m if type(mat) == Matrix_3x3 else np.asarray(mat)\n\n a,b = (mat, self.m) if reverse_order else (self.m, mat)\n\n if mat.ndim == 1: \n return np.matmul(a, b) # multiplication by 3D vector\n elif in_place:\n np.matmul(a, b, self.m) # multiplication by matrix, in place\n return self\n else: # multiplication by matrix, return new Matrix_3x3\n return Matrix_3x3(np.matmul(a, b))" }, { "identifier": "Matrix_4x4", "path": "math_ops/Matrix_4x4.py", "snippet": "class Matrix_4x4():\n\n def __init__(self, matrix = None) -> None:\n '''\n Constructor examples:\n a = Matrix_4x4( ) # create identity matrix\n b = Matrix_4x4( [[1,1,1,1],[2,2,2,2],[3,3,3,3],[4,4,4,4]] ) # manually initialize matrix\n c = Matrix_4x4( [1,1,1,1,2,2,2,2,3,3,3,3,4,4,4,4] ) # manually initialize matrix\n d = Matrix_4x4( b ) # copy constructor\n '''\n if matrix is None:\n self.m = np.identity(4)\n elif type(matrix) == Matrix_4x4: \n self.m = np.copy(matrix.m)\n elif type(matrix) == Matrix_3x3: \n self.m = np.identity(4)\n self.m[0:3,0:3] = matrix.m\n else:\n self.m = np.asarray(matrix)\n self.m.shape = (4,4) #reshape if needed, throw error if impossible\n\n\n @classmethod\n def from_translation(cls, translation_vec):\n '''\n Create transformation matrix from translation_vec translation\n e.g. Matrix_4x4.from_translation((a,b,c))\n output: [[1,0,0,a],[0,1,0,b],[0,0,1,c],[0,0,0,1]]\n '''\n mat = np.identity(4)\n mat[0:3,3] = translation_vec\n return cls(mat)\n\n @classmethod\n def from_3x3_and_translation(cls, mat3x3:Matrix_3x3, translation_vec):\n '''\n Create transformation matrix from rotation matrix (3x3) and translation\n e.g. Matrix_4x4.from_3x3_and_translation(r,(a,b,c)) \n output: [[r00,r01,r02,a],[r10,r11,r12,b],[r20,r21,r22,c],[0,0,0,1]]\n '''\n mat = np.identity(4)\n mat[0:3,0:3] = mat3x3.m\n mat[0:3,3] = translation_vec\n return cls(mat)\n\n def translate(self, translation_vec, in_place=False):\n '''\n Translates the current transformation matrix\n\n Parameters\n ----------\n translation_vec : array_like, length 3\n translation vector\n in_place: bool, optional\n * True: the internal matrix is changed in-place\n * False: a new matrix is returned and the current one is not changed \n\n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n vec = np.array([*translation_vec,1])# conversion to 4D vector\n np.matmul(self.m, vec, out=vec) # compute only 4th column\n\n if in_place:\n self.m[:,3] = vec\n return self\n else:\n ret = Matrix_4x4(self.m)\n ret.m[:,3] = vec\n return ret\n\n\n def get_translation(self):\n ''' Get translation vector (x,y,z) '''\n return self.m[0:3,3] # return view\n\n def get_x(self):\n return self.m[0,3]\n\n def get_y(self):\n return self.m[1,3]\n\n def get_z(self):\n return self.m[2,3]\n\n def get_rotation_4x4(self):\n ''' Get Matrix_4x4 without translation ''' \n mat = Matrix_4x4(self)\n mat.m[0:3,3] = 0\n return mat\n\n def get_rotation(self):\n ''' Get rotation Matrix_3x3 '''\n return Matrix_3x3(self.m[0:3,0:3])\n\n def get_distance(self):\n ''' Get translation vector length '''\n return np.linalg.norm(self.m[0:3,3])\n\n def get_roll_deg(self):\n ''' Get angle around the x-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[2,1] == 0 and self.m[2,2] == 0: \n return 180\n return atan2(self.m[2,1], self.m[2,2]) * 180 / pi\n\n def get_pitch_deg(self):\n ''' Get angle around the y-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n return atan2(-self.m[2,0], sqrt(self.m[2,1]*self.m[2,1] + self.m[2,2]*self.m[2,2])) * 180 / pi\n\n def get_yaw_deg(self):\n ''' Get angle around the z-axis in degrees, Rotation order: RotZ*RotY*RotX=Rot '''\n if self.m[1,0] == 0 and self.m[0,0] == 0: \n return atan2(self.m[0,1], self.m[1,1]) * 180 / pi\n return atan2(self.m[1,0], self.m[0,0]) * 180 / pi\n \n def get_inclination_deg(self):\n ''' Get inclination of z-axis in relation to reference z-axis '''\n return 90 - (asin(np.clip(self.m[2,2],-1,1)) * 180 / pi)\n\n def rotate_deg(self, rotation_vec, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_rad(rotation_vec, rotation_deg * (pi/180) , in_place)\n\n \n def rotate_rad(self, rotation_vec, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix\n\n Parameters\n ----------\n rotation_vec : array_like, length 3\n rotation vector\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n\n # shortcuts for rotation around 1 axis\n if rotation_vec[0]==0:\n if rotation_vec[1]==0:\n if rotation_vec[2]==1:\n return self.rotate_z_rad(rotation_rad, in_place)\n elif rotation_vec[2]==-1:\n return self.rotate_z_rad(-rotation_rad, in_place)\n elif rotation_vec[2]==0:\n if rotation_vec[1]==1:\n return self.rotate_y_rad(rotation_rad, in_place)\n elif rotation_vec[1]==-1:\n return self.rotate_y_rad(-rotation_rad, in_place)\n elif rotation_vec[1]==0 and rotation_vec[2]==0:\n if rotation_vec[0]==1:\n return self.rotate_x_rad(rotation_rad, in_place)\n elif rotation_vec[0]==-1:\n return self.rotate_x_rad(-rotation_rad, in_place)\n \n c = np.math.cos(rotation_rad)\n c1 = 1 - c\n s = np.math.sin(rotation_rad)\n x = rotation_vec[0]\n y = rotation_vec[1]\n z = rotation_vec[2]\n xxc1 = x * x * c1\n yyc1 = y * y * c1\n zzc1 = z * z * c1\n xyc1 = x * y * c1\n xzc1 = x * z * c1\n yzc1 = y * z * c1\n xs = x * s\n ys = y * s\n zs = z * s\n\n mat = np.array([\n [xxc1 + c, xyc1 - zs, xzc1 + ys, 0],\n [xyc1 + zs, yyc1 + c, yzc1 - xs, 0],\n [xzc1 - ys, yzc1 + xs, zzc1 + c, 0],\n [0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n\n def rotate_x_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [1, 0, 0, 0],\n [0, c,-s, 0],\n [0, s, c, 0],\n [0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_y_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c, 0, s, 0],\n [ 0, 1, 0, 0],\n [-s, 0, c, 0],\n [ 0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_z_rad(self, rotation_rad, in_place=False):\n '''\n Rotates the current transformation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in radians\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n if rotation_rad == 0: \n return self if in_place else Matrix_4x4(self)\n \n c = np.math.cos(rotation_rad)\n s = np.math.sin(rotation_rad)\n\n mat = np.array([\n [ c,-s, 0, 0],\n [ s, c, 0, 0],\n [ 0, 0, 1, 0],\n [ 0, 0, 0, 1]])\n\n return self.multiply(mat, in_place)\n\n def rotate_x_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the x-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_x_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_y_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the y-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_y_rad(rotation_deg * (pi/180), in_place)\n\n def rotate_z_deg(self, rotation_deg, in_place=False):\n '''\n Rotates the current transformation matrix around the z-axis\n\n Parameters\n ----------\n rotation_rad : float\n rotation in degrees\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n return self.rotate_z_rad(rotation_deg * (pi/180), in_place)\n\n def invert(self, in_place=False):\n '''\n Inverts the current transformation matrix\n\n Parameters\n ----------\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed \n \n Returns\n -------\n result : Matrix_4x4 \n self is returned if in_place is True\n '''\n\n if in_place:\n self.m = np.linalg.inv(self.m)\n return self\n else:\n return Matrix_4x4(np.linalg.inv(self.m))\n\n def multiply(self,mat, in_place=False):\n '''\n Multiplies the current transformation matrix by mat\n\n Parameters\n ----------\n mat : Matrix_4x4 or array_like\n multiplier matrix or 3D vector\n in_place: bool, optional\n * True: the internal matrix is changed in-place (default)\n * False: a new matrix is returned and the current one is not changed (if mat is a 4x4 matrix)\n \n Returns\n -------\n result : Matrix_4x4 | array_like\n Matrix_4x4 is returned if mat is a matrix (self is returned if in_place is True); \n a 3D vector is returned if mat is a vector\n '''\n if type(mat) == Matrix_4x4: \n mat = mat.m\n else:\n mat = np.asarray(mat) # conversion to array, if needed\n if mat.ndim == 1: # multiplication by 3D vector\n vec = np.append(mat,1) # conversion to 4D vector\n return np.matmul(self.m, vec)[0:3] # conversion to 3D vector\n\n if in_place:\n np.matmul(self.m, mat, self.m)\n return self\n else:\n return Matrix_4x4(np.matmul(self.m, mat))\n\n def __call__(self,mat, is_spherical=False):\n '''\n Multiplies the current transformation matrix by mat and returns a new matrix or vector\n\n Parameters\n ----------\n mat : Matrix_4x4 or array_like\n multiplier matrix or 3D vector\n is_spherical : bool\n only relevant if mat is a 3D vector, True if it uses spherical coordinates\n \n Returns\n -------\n result : Matrix_4x4 | array_like\n Matrix_4x4 is returned if mat is a matrix; \n a 3D vector is returned if mat is a vector\n '''\n\n if is_spherical and mat.ndim == 1: mat = M.deg_sph2cart(mat)\n return self.multiply(mat,False)" }, { "identifier": "Body_Part", "path": "world/commons/Body_Part.py", "snippet": "class Body_Part():\n def __init__(self, mass) -> None:\n self.mass = float(mass)\n self.joints = []\n self.transform = Matrix_4x4() # body part to head transformation matrix" }, { "identifier": "Joint_Info", "path": "world/commons/Joint_Info.py", "snippet": "class Joint_Info():\n def __init__(self, xml_element) -> None:\n self.perceptor = xml_element.attrib['perceptor']\n self.effector = xml_element.attrib['effector']\n self.axes = np.array([\n float(xml_element.attrib['xaxis']), \n float(xml_element.attrib['yaxis']), \n float(xml_element.attrib['zaxis'])])\n self.min = int(xml_element.attrib['min'])\n self.max = int(xml_element.attrib['max'])\n\n self.anchor0_part = xml_element[0].attrib['part']\n self.anchor0_axes = np.array([\n float(xml_element[0].attrib['y']), \n float(xml_element[0].attrib['x']), \n float(xml_element[0].attrib['z'])]) #x and y axes are switched\n\n self.anchor1_part = xml_element[1].attrib['part']\n self.anchor1_axes_neg = np.array([\n -float(xml_element[1].attrib['y']), \n -float(xml_element[1].attrib['x']), \n -float(xml_element[1].attrib['z'])]) #x and y axes are switched" } ]
from collections import deque from math import atan, pi, sqrt, tan from math_ops.Math_Ops import Math_Ops as M from math_ops.Matrix_3x3 import Matrix_3x3 from math_ops.Matrix_4x4 import Matrix_4x4 from world.commons.Body_Part import Body_Part from world.commons.Joint_Info import Joint_Info import numpy as np import xml.etree.ElementTree as xmlp
14,724
self.feet_toes_last_touch = {"lf":0,"rf":0,"lf1":0,"rf1":0} # foot "lf"/"rf", toe "lf1"/"rf1" World.time_local_ms when foot/toe last touched any surface self.feet_toes_are_touching = {"lf":False,"rf":False,"lf1":False,"rf1":False} # foot "lf"/"rf", toe "lf1"/"rf1" True if touching in last received server message self.fwd_kinematics_list = None # List of body parts, ordered according to dependencies self.rel_cart_CoM_position = np.zeros(3) # Center of Mass position, relative to head, in cartesian coordinates (m) # Joint variables are optimized for performance / array operations self.joints_position = np.zeros(self.no_of_joints) # Joints' angular position (deg) self.joints_speed = np.zeros(self.no_of_joints) # Joints' angular speed (rad/s) self.joints_target_speed = np.zeros(self.no_of_joints) # Joints' target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_target_last_speed = np.zeros(self.no_of_joints) # Joints' last target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_info = [None] * self.no_of_joints # Joints' constant information (see class Joint_Info) self.joints_transform = [Matrix_4x4() for _ in range(self.no_of_joints)] # Joints' transformation matrix # Localization variables relative to head self.loc_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field self.loc_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head self.loc_rotation_head_to_field = Matrix_3x3() # Rotation matrix from head to field self.loc_rotation_field_to_head = Matrix_3x3() # Rotation matrix from field to head self.loc_head_position = np.zeros(3) # Absolute head position (m) self.loc_head_position_history = deque(maxlen=40)# Absolute head position history (queue with up to 40 old positions at intervals of 0.04s, where index 0 is the previous position) self.loc_head_velocity = np.zeros(3) # Absolute head velocity (m/s) (Warning: possibly noisy) self.loc_head_orientation = 0 # Head orientation (deg) self.loc_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible self.loc_last_update = 0 # World.time_local_ms when the localization was last updated self.loc_head_position_last_update = 0 # World.time_local_ms when loc_head_position was last updated by vision or radio self.radio_fallen_state = False # True if (radio says we fell) and (radio is significantly more recent than loc) self.radio_last_update = 0 # World.time_local_ms when radio_fallen_state was last updated (and possibly loc_head_position) # Localization variables relative to torso self.loc_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field self.loc_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field self.loc_torso_roll = 0 # Torso roll (deg) self.loc_torso_pitch = 0 # Torso pitch (deg) self.loc_torso_orientation = 0 # Torso orientation (deg) self.loc_torso_inclination = 0 # Torso inclination (deg) (inclination of z-axis in relation to field z-axis) self.loc_torso_position = np.zeros(3) # Absolute torso position (m) self.loc_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) self.loc_torso_acceleration = np.zeros(3) # Absolute Coordinate acceleration (m/s2) # Other localization variables self.cheat_abs_pos = np.zeros(3) # Absolute head position provided by the server as cheat (m) self.cheat_ori = 0.0 # Absolute head orientation provided by the server as cheat (deg) self.loc_CoM_position = np.zeros(3) # Absolute CoM position (m) self.loc_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) # Localization special variables ''' self.loc_head_z is often equivalent to self.loc_head_position[2], but sometimes it differs. There are situations in which the rotation and translation cannot be computed, but the z-coordinate can still be found through vision, in which case: self.loc_is_up_to_date is False self.loc_head_z_is_up_to_date is True It should be used in applications which rely on z as an independent coordinate, such as detecting if the robot has fallen, or as an observation for machine learning. It should NEVER be used for 3D transformations. ''' self.loc_head_z = 0 # Absolute head position (z) - see above for explanation (m) self.loc_head_z_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible self.loc_head_z_last_update = 0 # World.time_local_ms when loc_head_z was last computed self.loc_head_z_vel = 0 # Absolute head velocity (z) (m/s) # Localization + Gyroscope # These variables are reliable. The gyroscope is used to update the rotation when waiting for the next visual cycle self.imu_torso_roll = 0 # Torso roll (deg) (src: Localization + Gyro) self.imu_torso_pitch = 0 # Torso pitch (deg) (src: Localization + Gyro) self.imu_torso_orientation = 0 # Torso orientation (deg) (src: Localization + Gyro) self.imu_torso_inclination = 0 # Torso inclination (deg) (src: Localization + Gyro) self.imu_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field (src: Localization + Gyro) self.imu_last_visual_update = 0 # World.time_local_ms when the IMU data was last updated with visual information # Localization + Gyroscope + Accelerometer # Warning: these variables are unreliable, since small errors in the Localization Orientation lead to # wrong acceleration -> wrong velocity -> wrong position self.imu_weak_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field (src: Localization + Gyro + Acc) self.imu_weak_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field (src: Localization + Gyro + Acc) self.imu_weak_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head (src: Localization + Gyro + Acc) self.imu_weak_torso_position = np.zeros(3) # Absolute torso position (m) (src: Localization + Gyro + Acc) self.imu_weak_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) (src: Localization + Gyro + Acc) self.imu_weak_torso_acceleration = np.zeros(3) # Absolute torso acceleration (m/s2) (src: Localization + Gyro + Acc) self.imu_weak_torso_next_position = np.zeros(3) # Absolute position in next step estimate (m) (src: Localization + Gyro + Acc) self.imu_weak_torso_next_velocity = np.zeros(3) # Absolute velocity in next step estimate (m/s) (src: Localization + Gyro + Acc) self.imu_weak_CoM_position = np.zeros(3) # Absolute CoM position (m) (src: Localization + Gyro + Acc) self.imu_weak_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) (src: Localization + Gyro + Acc) #Using explicit variables to enable IDE suggestions self.J_HEAD_YAW = 0 self.J_HEAD_PITCH = 1 self.J_LLEG_YAW_PITCH = 2 self.J_RLEG_YAW_PITCH = 3 self.J_LLEG_ROLL = 4 self.J_RLEG_ROLL = 5 self.J_LLEG_PITCH = 6 self.J_RLEG_PITCH = 7 self.J_LKNEE = 8 self.J_RKNEE = 9 self.J_LFOOT_PITCH = 10 self.J_RFOOT_PITCH = 11 self.J_LFOOT_ROLL = 12 self.J_RFOOT_ROLL = 13 self.J_LARM_PITCH = 14 self.J_RARM_PITCH = 15 self.J_LARM_ROLL = 16 self.J_RARM_ROLL = 17 self.J_LELBOW_YAW = 18 self.J_RELBOW_YAW = 19 self.J_LELBOW_ROLL = 20 self.J_RELBOW_ROLL = 21 self.J_LTOE_PITCH = 22 self.J_RTOE_PITCH = 23 #------------------ parse robot xml dir = M.get_active_directory("/world/commons/robots/") robot_xml_root = xmlp.parse(dir + robot_xml).getroot() joint_no = 0 for child in robot_xml_root: if child.tag == "bodypart":
class Robot(): STEPTIME = 0.02 # Fixed step time VISUALSTEP = 0.04 # Fixed visual step time SQ_STEPTIME = STEPTIME * STEPTIME GRAVITY = np.array([0,0,-9.81]) IMU_DECAY = 0.996 #IMU's velocity decay #------------------ constants to force symmetry in joints/effectors MAP_PERCEPTOR_TO_INDEX = {"hj1":0, "hj2":1, "llj1":2, "rlj1":3, "llj2":4, "rlj2":5, "llj3":6, "rlj3":7, "llj4":8, "rlj4":9, "llj5":10,"rlj5":11, "llj6":12,"rlj6":13,"laj1":14,"raj1":15, "laj2":16,"raj2":17,"laj3":18,"raj3":19, "laj4":20,"raj4":21,"llj7":22,"rlj7":23 } # Fix symmetry issues 1a/4 (identification) FIX_PERCEPTOR_SET = {'rlj2','rlj6','raj2','laj3','laj4'} FIX_INDICES_LIST = [5,13,17,18,20] # Recommended height for unofficial beam (near ground) BEAM_HEIGHTS = [0.4, 0.43, 0.4, 0.46, 0.4] def __init__(self, unum:int, robot_type:int) -> None: robot_xml = "nao"+str(robot_type)+".xml" # Typical NAO file name self.type = robot_type self.beam_height = Robot.BEAM_HEIGHTS[robot_type] self.no_of_joints = 24 if robot_type == 4 else 22 #Fix symmetry issues 1b/4 (identification) self.FIX_EFFECTOR_MASK = np.ones(self.no_of_joints) self.FIX_EFFECTOR_MASK[Robot.FIX_INDICES_LIST] = -1 self.body_parts = dict() # keys='body part names' (given by the robot's XML), values='Body_Part objects' self.unum = unum # Robot's uniform number self.gyro = np.zeros(3) # Angular velocity along the three axes of freedom of the robot's torso (deg/s) self.acc = np.zeros(3) # Proper acceleration along the three axes of freedom of the robot's torso (m/s2) self.frp = dict() # foot "lf"/"rf", toe "lf1"/"rf1" resistance perceptor (relative [p]oint of origin + [f]orce vector) e.g. {"lf":(px,py,pz,fx,fy,fz)} self.feet_toes_last_touch = {"lf":0,"rf":0,"lf1":0,"rf1":0} # foot "lf"/"rf", toe "lf1"/"rf1" World.time_local_ms when foot/toe last touched any surface self.feet_toes_are_touching = {"lf":False,"rf":False,"lf1":False,"rf1":False} # foot "lf"/"rf", toe "lf1"/"rf1" True if touching in last received server message self.fwd_kinematics_list = None # List of body parts, ordered according to dependencies self.rel_cart_CoM_position = np.zeros(3) # Center of Mass position, relative to head, in cartesian coordinates (m) # Joint variables are optimized for performance / array operations self.joints_position = np.zeros(self.no_of_joints) # Joints' angular position (deg) self.joints_speed = np.zeros(self.no_of_joints) # Joints' angular speed (rad/s) self.joints_target_speed = np.zeros(self.no_of_joints) # Joints' target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_target_last_speed = np.zeros(self.no_of_joints) # Joints' last target speed (rad/s) (max: 6.1395 rad/s, see rcssserver3d/data/rsg/agent/nao/hingejoint.rsg) self.joints_info = [None] * self.no_of_joints # Joints' constant information (see class Joint_Info) self.joints_transform = [Matrix_4x4() for _ in range(self.no_of_joints)] # Joints' transformation matrix # Localization variables relative to head self.loc_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field self.loc_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head self.loc_rotation_head_to_field = Matrix_3x3() # Rotation matrix from head to field self.loc_rotation_field_to_head = Matrix_3x3() # Rotation matrix from field to head self.loc_head_position = np.zeros(3) # Absolute head position (m) self.loc_head_position_history = deque(maxlen=40)# Absolute head position history (queue with up to 40 old positions at intervals of 0.04s, where index 0 is the previous position) self.loc_head_velocity = np.zeros(3) # Absolute head velocity (m/s) (Warning: possibly noisy) self.loc_head_orientation = 0 # Head orientation (deg) self.loc_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible self.loc_last_update = 0 # World.time_local_ms when the localization was last updated self.loc_head_position_last_update = 0 # World.time_local_ms when loc_head_position was last updated by vision or radio self.radio_fallen_state = False # True if (radio says we fell) and (radio is significantly more recent than loc) self.radio_last_update = 0 # World.time_local_ms when radio_fallen_state was last updated (and possibly loc_head_position) # Localization variables relative to torso self.loc_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field self.loc_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field self.loc_torso_roll = 0 # Torso roll (deg) self.loc_torso_pitch = 0 # Torso pitch (deg) self.loc_torso_orientation = 0 # Torso orientation (deg) self.loc_torso_inclination = 0 # Torso inclination (deg) (inclination of z-axis in relation to field z-axis) self.loc_torso_position = np.zeros(3) # Absolute torso position (m) self.loc_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) self.loc_torso_acceleration = np.zeros(3) # Absolute Coordinate acceleration (m/s2) # Other localization variables self.cheat_abs_pos = np.zeros(3) # Absolute head position provided by the server as cheat (m) self.cheat_ori = 0.0 # Absolute head orientation provided by the server as cheat (deg) self.loc_CoM_position = np.zeros(3) # Absolute CoM position (m) self.loc_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) # Localization special variables ''' self.loc_head_z is often equivalent to self.loc_head_position[2], but sometimes it differs. There are situations in which the rotation and translation cannot be computed, but the z-coordinate can still be found through vision, in which case: self.loc_is_up_to_date is False self.loc_head_z_is_up_to_date is True It should be used in applications which rely on z as an independent coordinate, such as detecting if the robot has fallen, or as an observation for machine learning. It should NEVER be used for 3D transformations. ''' self.loc_head_z = 0 # Absolute head position (z) - see above for explanation (m) self.loc_head_z_is_up_to_date = False # False if this is not a visual step, or not enough elements are visible self.loc_head_z_last_update = 0 # World.time_local_ms when loc_head_z was last computed self.loc_head_z_vel = 0 # Absolute head velocity (z) (m/s) # Localization + Gyroscope # These variables are reliable. The gyroscope is used to update the rotation when waiting for the next visual cycle self.imu_torso_roll = 0 # Torso roll (deg) (src: Localization + Gyro) self.imu_torso_pitch = 0 # Torso pitch (deg) (src: Localization + Gyro) self.imu_torso_orientation = 0 # Torso orientation (deg) (src: Localization + Gyro) self.imu_torso_inclination = 0 # Torso inclination (deg) (src: Localization + Gyro) self.imu_torso_to_field_rotation = Matrix_3x3() # Rotation matrix from torso to field (src: Localization + Gyro) self.imu_last_visual_update = 0 # World.time_local_ms when the IMU data was last updated with visual information # Localization + Gyroscope + Accelerometer # Warning: these variables are unreliable, since small errors in the Localization Orientation lead to # wrong acceleration -> wrong velocity -> wrong position self.imu_weak_torso_to_field_transform = Matrix_4x4() # Transformation matrix from torso to field (src: Localization + Gyro + Acc) self.imu_weak_head_to_field_transform = Matrix_4x4() # Transformation matrix from head to field (src: Localization + Gyro + Acc) self.imu_weak_field_to_head_transform = Matrix_4x4() # Transformation matrix from field to head (src: Localization + Gyro + Acc) self.imu_weak_torso_position = np.zeros(3) # Absolute torso position (m) (src: Localization + Gyro + Acc) self.imu_weak_torso_velocity = np.zeros(3) # Absolute torso velocity (m/s) (src: Localization + Gyro + Acc) self.imu_weak_torso_acceleration = np.zeros(3) # Absolute torso acceleration (m/s2) (src: Localization + Gyro + Acc) self.imu_weak_torso_next_position = np.zeros(3) # Absolute position in next step estimate (m) (src: Localization + Gyro + Acc) self.imu_weak_torso_next_velocity = np.zeros(3) # Absolute velocity in next step estimate (m/s) (src: Localization + Gyro + Acc) self.imu_weak_CoM_position = np.zeros(3) # Absolute CoM position (m) (src: Localization + Gyro + Acc) self.imu_weak_CoM_velocity = np.zeros(3) # Absolute CoM velocity (m/s) (src: Localization + Gyro + Acc) #Using explicit variables to enable IDE suggestions self.J_HEAD_YAW = 0 self.J_HEAD_PITCH = 1 self.J_LLEG_YAW_PITCH = 2 self.J_RLEG_YAW_PITCH = 3 self.J_LLEG_ROLL = 4 self.J_RLEG_ROLL = 5 self.J_LLEG_PITCH = 6 self.J_RLEG_PITCH = 7 self.J_LKNEE = 8 self.J_RKNEE = 9 self.J_LFOOT_PITCH = 10 self.J_RFOOT_PITCH = 11 self.J_LFOOT_ROLL = 12 self.J_RFOOT_ROLL = 13 self.J_LARM_PITCH = 14 self.J_RARM_PITCH = 15 self.J_LARM_ROLL = 16 self.J_RARM_ROLL = 17 self.J_LELBOW_YAW = 18 self.J_RELBOW_YAW = 19 self.J_LELBOW_ROLL = 20 self.J_RELBOW_ROLL = 21 self.J_LTOE_PITCH = 22 self.J_RTOE_PITCH = 23 #------------------ parse robot xml dir = M.get_active_directory("/world/commons/robots/") robot_xml_root = xmlp.parse(dir + robot_xml).getroot() joint_no = 0 for child in robot_xml_root: if child.tag == "bodypart":
self.body_parts[child.attrib['name']] = Body_Part(child.attrib['mass'])
3
2023-12-16 23:40:23+00:00
24k
Sam-Izdat/tinycio
src/tinycio/tonemapping.py
[ { "identifier": "applyAgX", "path": "src/tinycio/np_agx/agx.py", "snippet": "def applyAgX(array):\n # type: (numpy.ndarray) -> numpy.ndarray\n \"\"\"\n -> take linear - sRGB image data as input\n - apply custom grading if any\n - apply the AgX Punchy view-transform\n - return a display-ready array encoded for sRGB SDR monitors\n\n Args:\n array: float32 array, R-G-B format, sRGB Display\n \"\"\"\n\n # Apply Grading\n array = customLook1(array)\n array = applyAgxLog(array)\n array = applyAgxLut(array) # AgX Base\n # Ready for display.\n return array" }, { "identifier": "applyAgXPunchy", "path": "src/tinycio/np_agx/agx.py", "snippet": "def applyAgXPunchy(array):\n # type: (numpy.ndarray) -> numpy.ndarray\n \"\"\"\n -> take linear - sRGB image data as input\n - apply custom grading if any\n - apply the AgX Punchy view-transform\n - return a display-ready array encoded for sRGB SDR monitors\n\n Args:\n array: float32 array, R-G-B format, sRGB Display\n \"\"\"\n\n # Apply Grading\n array = customLook1(array)\n array = applyAgxLog(array)\n array = applyAgxLut(array) # AgX Base\n array = applyLookPunchy(array=array)\n # Ready for display.\n return array" }, { "identifier": "ColorSpace", "path": "src/tinycio/colorspace.py", "snippet": "class ColorSpace:\n \"\"\"\n Color space conversion. Applies OETFs and EOTFs as needed but omits tonemapping. Cylindrical transformations are \n treated as distinct color spaces. Example:\n\n .. highlight:: python\n .. code-block:: python\n \n cs_in = ColorSpace.Variant.SRGB_LIN\n cs_out = ColorSpace.Variant.OKLAB\n oklab_image = ColorSpace.convert(srgb_image, source=cs_in, destination=cs_out)\n \"\"\"\n class Variant(IntEnum):\n \"\"\"\n Color space enum. For a list of available options, see :ref:`ref_color_spaces`.\n \"\"\"\n UNKNOWN = 1<<0 \n NONCOLOR = 1<<1 \n CIE_XYZ = 1<<2 \n CIE_XYY = 1<<3 \n SRGB = 1<<4 \n SRGB_LIN = 1<<5 \n REC709 = 1<<6 \n REC2020 = 1<<7 \n REC2020_LIN = 1<<8 \n DCI_P3 = 1<<9 \n DCI_P3_LIN = 1<<10 \n DISPLAY_P3 = 1<<11 \n ACESCG = 1<<12 \n ACESCC = 1<<13 \n ACESCCT = 1<<14 \n ACES2065_1 = 1<<15 \n LMS = 1<<16 \n OKLAB = 1<<17 \n CIELAB = 1<<18 \n CIELUV = 1<<19 \n HSV = 1<<20 \n HSL = 1<<21 \n OKHSV = 1<<22\n OKHSL = 1<<23\n\n SCENE_LINEAR = SRGB_LIN | REC2020_LIN | DCI_P3_LIN | ACESCG | ACES2065_1 | CIE_XYZ\n PERCEPTUAL = OKLAB | CIELAB | CIELUV | OKHSL | OKHSV\n CYLINDRICAL = HSL | HSV | OKHSL | OKHSV\n\n GAMUT_SRGB = SRGB | SRGB_LIN | REC709 | HSL | HSV\n GAMUT_AP0 = ACES2065_1\n GAMUT_AP1 = ACESCG | ACESCC | ACESCCT\n GAMUT_REC2020 = REC2020 | REC2020_LIN\n GAMUT_DCI_P3 = DCI_P3 | DCI_P3_LIN\n GAMUT_DISPLAY_P3= DISPLAY_P3\n GAMUT_OKLAB = OKLAB | OKHSL | OKHSV\n GAMUT_CIE_XYZ = CIE_XYZ | CIE_XYY\n GAMUT_CIELAB = CIELAB\n GAMUT_CIELUV = CIELUV\n GAMUT_OTHER = LMS | UNKNOWN | NONCOLOR\n\n WP_D65 = SRGB | SRGB_LIN | REC709 | DISPLAY_P3 | REC2020 | REC2020_LIN | CIE_XYZ | CIE_XYY\n WP_CCT_6300 = DCI_P3 | DCI_P3_LIN\n WP_CCT_6000 = ACESCG | ACESCC | ACESCCT | ACES2065_1\n\n MODEL_RGB = SRGB | SRGB_LIN | REC709 | REC2020 | REC2020_LIN | DCI_P3 | DCI_P3_LIN | DISPLAY_P3 | \\\n ACESCG | ACESCC | ACESCCT | ACES2065_1\n MODEL_CIE = CIE_XYZ | CIE_XYY | CIELAB | CIELUV\n MODEL_CAM = 0\n MODEL_YUV = 0\n MODEL_OTHER = LMS | HSL | HSV | OKLAB # is OKLAB CAM-based?\n \n NEGATIVE = OKLAB | CIELAB | CIELUV | GAMUT_AP0\n NON_NEGATIVE = ~NEGATIVE\n\n DISABLED = CIELUV\n UNSUPPORTED = OKHSV | OKHSL # disabled doesn't go here - CS must have alternate path\n SUPPORTED = ~UNSUPPORTED \n\n # FIXME: LUV doesn't quite match expected values, needs further testing\n\n mat_xyz_to_srgb = [\n [3.24096994190452134, -1.53738317757009346, -0.498610760293003284],\n [-0.969243636280879826, 1.87596750150772067, 0.0415550574071756125],\n [0.0556300796969936084, -0.203976958888976564, 1.05697151424287856]]\n\n mat_srgb_to_xyz = [\n [0.412390799265959481, 0.357584339383877964, 0.180480788401834288],\n [0.212639005871510358, 0.715168678767755927, 0.072192315360733715],\n [0.0193308187155918507, 0.119194779794625988, 0.950532152249660581]]\n\n mat_srgb_to_acescg = [\n [ 0.6130974024, 0.3395231462, 0.04737945141],\n [ 0.07019372247, 0.916353879, 0.01345239847],\n [ 0.02061559288, 0.1095697729, 0.8698146341]]\n\n # NOTE: Includes \"D60\"/D65 white point conversion\n mat_acescg_to_srgb = [\n [ 1.705050993, -0.6217921206,-0.083258872],\n [-0.1302564175, 1.140804737, -0.01054831907],\n [-0.02400335681,-0.1289689761, 1.152972333]]\n\n # NOTE: Includes \"D60\"/D65 white point conversion\n mat_srgb_to_aces2065_1 = [\n [ 0.439632982, 0.382988698, 0.17737832],\n [ 0.0897764431, 0.813439429, 0.0967841284],\n [ 0.0175411704, 0.111546553, 0.870912277]]\n\n mat_aces2065_1_to_srgb = [\n [ 2.52168619, -1.13413099, -0.387555198],\n [-0.276479914, 1.37271909, -0.0962391736],\n [-0.015378065, -0.152975336, 1.1683534]]\n\n mat_srgb_to_displayp3 = [\n [ 0.822461969, 0.177538031, 1.15772692e-10],\n [ 0.0331941989, 0.966805801, 1.95085037e-11],\n [ 0.0170826307, 0.0723974405, 0.910519929]]\n\n mat_displayp3_to_srgb = [\n [ 1.22494018, -0.224940176, -4.77534979e-11],\n [-0.0420569547, 1.04205695, 3.37864801e-11],\n [-0.0196375546,-0.0786360454, 1.0982736]] \n\n # NOTE: No chromatic adaptation\n mat_srgb_to_dcip3 = [\n [0.868579739716132409, 0.128919138460847047, 0.00250112182302054368],\n [0.0345404102543194426, 0.961811386361919975, 0.0036482033837605824],\n [0.0167714290414502718, 0.0710399977868858352, 0.912188573171663893]]\n\n # NOTE: No chromatic adaptation\n mat_dcip3_to_srgb = [\n [ 1.15751640619975871, -0.154962378073857756, -0.00255402812590095854],\n [-0.0415000715306859699, 1.04556792307969925, -0.00406785154901328463],\n [-0.0180500389562539583,-0.0785782726530290654, 1.09662831160928302]]\n\n # NOTE: No chromatic adaptation\n mat_dcip3_to_xyz = [\n [ 0.445169815564552417, 0.277134409206777664, 0.172282669815564564],\n [ 0.209491677912730539, 0.721595254161043636, 0.0689130679262258258],\n [-3.63410131696985616e-17, 0.0470605600539811521, 0.907355394361973415]]\n\n # NOTE: No chromatic adaptation\n mat_xyz_to_dcip3 = [\n [2.7253940304917328, -1.01800300622718496, -0.440163195190036463],\n [-0.795168025808764195, 1.689732054843624, 0.0226471906084774533],\n [0.0412418913957000325, -0.0876390192158623825, 1.10092937864632191]]\n\n mat_srgb_to_rec2020 = [\n [ 0.627403896, 0.329283039, 0.0433130657],\n [ 0.0690972894, 0.919540395, 0.0113623156],\n [ 0.0163914389, 0.0880133077, 0.895595253]]\n\n mat_rec2020_to_srgb = [\n [ 1.660491, -0.587641139,-0.0728498633],\n [-0.124550475, 1.1328999, -0.00834942258],\n [-0.0181507633,-0.100578898, 1.11872966]]\n\n mat_rec2020_to_xyz = [\n [0.636958048301291, 0.144616903586208, 0.168880975164172],\n [0.262700212011267, 0.677998071518871, 0.059301716469862],\n [4.99410657446607e-17, 0.0280726930490874, 1.06098505771079]]\n\n mat_xyz_to_rec2020 = [\n [1.71665118797127, -0.355670783776393, -0.25336628137366],\n [-0.666684351832489, 1.61648123663494, 0.0157685458139111],\n [0.0176398574453108, -0.0427706132578085, 0.942103121235474]]\n\n # NOTE: No chromatic adaptation\n mat_acescg_to_xyz = [\n [ 0.66245418, 0.13400421, 0.15618769],\n [ 0.27222872, 0.67408177, 0.05368952],\n [-0.00557465, 0.00406073, 1.0103391 ]]\n\n # NOTE: No chromatic adaptation\n mat_xyz_to_acescg = [\n [ 1.64102338, -0.32480329, -0.2364247 ],\n [-0.66366286, 1.61533159, 0.01675635],\n [ 0.01172189, -0.00828444, 0.98839486]]\n\n # NOTE: For CIE XYZ color\n mat_d60_to_d65 = [\n [ 0.98722400,-0.00611327, 0.01595330],\n [-0.00759836, 1.00186000, 0.00533002],\n [ 0.00307257,-0.00509595, 1.08168000]]\n\n # NOTE: For CIE XYZ color\n mat_d65_to_d60 = [\n [ 1.01303000, 0.00610531,-0.01497100],\n [ 0.00769823, 0.99816500,-0.00503203],\n [-0.00284131, 0.00468516, 0.92450700]]\n\n # NOTE: For CIE XYZ color\n mat_d65_to_dci = [\n [0.976578896646979768, -0.0154362646984919742, -0.016686021704209866],\n [-0.0256896658505145926, 1.02853916787996963, -0.00378517365630504153],\n [-0.00570574587417104179, 0.0110778657389971485, 0.871176159390377409]]\n \n # NOTE: For CIE XYZ color\n mat_dci_to_d65 = [\n [1.02449672775257752, 0.0151635410224165156, 0.0196885223342066827],\n [0.0256121933371584198, 0.97258630562441342, 0.00471635229242730096],\n [0.0063842306500876874, -0.012268082736730219, 1.14794244517367791]]\n\n mat_xyz_to_lms = [\n [ 0.8951, 0.2664,-0.1614],\n [-0.7502, 1.7135, 0.0367],\n [ 0.0389,-0.0685, 1.0296]]\n\n mat_lms_to_xyz = [\n [ 0.986993, -0.147054, 0.159963],\n [ 0.432305, 0.51836, 0.0492912],\n [ -0.00852866, 0.0400428, 0.968487]]\n\n # OKLAB's XYZ to LMS\n mat_oklab_m1 = [\n [ 0.8189330101, 0.3618667424, -0.1288597137],\n [ 0.0329845436, 0.9293118715, 0.0361456387],\n [ 0.0482003018, 0.2643662691, 0.6338517070]]\n\n # OKLAB's non-linear L'M'S' to OKLAB\n mat_oklab_m2 = [\n [ 0.2104542553, 0.7936177850, -0.0040720468],\n [ 1.9779984951, -2.4285922050, 0.4505937099],\n [ 0.0259040371, 0.7827717662, -0.8086757660]]\n\n # Inverse of OKLAB M1\n mat_oklab_m1_inv = [\n [ 1.22701385, -0.55779998, 0.28125615],\n [-0.04058018, 1.11225687, -0.07167668],\n [-0.07638128, -0.42148198, 1.58616322]]\n\n # Inverse of OKLAB M2\n mat_oklab_m2_inv = [\n [ 1. , 0.39633779, 0.21580376],\n [ 1.00000001, -0.10556134, -0.06385417],\n [ 1.00000005, -0.08948418, -1.29148554]]\n\n @classmethod\n def convert(cls, im:Union[torch.Tensor, ColorImage], source:Variant, destination:Variant) -> torch.Tensor:\n \"\"\"\n Change the color space of an image. Cylindrical transformations HSV/HSL are \n treated as their own color spaces and assumed to be relative to sRGB linear. \n Unless otherwise noted or required by specification (e.g. ACES), we assume D65 white point.\n\n .. warning::\n\n Tone mapping is not included, so converting the color space of HDR values to \n an LDR-designated color space will not automatically reduce dynamic range. For example, \n taking an HDR image from :code:`ACESCG` (AP1) to :code:`SRGB` will yield the sRGB \n gamma curve, but values outside the required range must still be tone mapped or clamped beforehand.\n\n .. warning::\n\n Cylindrical transformations (HSL, HSV) should be given input in [0, 1] linear sRGB range \n (or equivalent). This is not strictly enforced but input outside this range may yield \n unpredictable results or *NaN* values.\n\n :param im: [C=3, H, W] image tensor \n :type im: torch.Tensor | ColorImage\n :param source: color space to convert from\n :param destination: color space to convert to\n :return: image tensor in designated color space\n \"\"\"\n ip, op = source, destination\n cs = cls.Variant\n tf = TransferFunction\n if ip == op: return im\n\n assert im.dim() == 3 and im.size(0) == 3, f\"expected [C=3, H, W] image tensor, got {im.size()}\"\n assert source != 0, f\"Unknown source color space\"\n assert ip & cs.SUPPORTED, f\"Source color space not supported: {source.name}\"\n assert op & cs.SUPPORTED, f\"Destination color space not supported: {destination.name}\"\n assert ip & ~cs.DISABLED, f\"Source color space disabled: {ColorSpace.Variant(ip).name}\"\n assert op & ~cs.DISABLED, f\"Destination color space disabled: {ColorSpace.Variant(op).name}\"\n\n err_not_implemented = f\"Color space conversion not implemented: {ColorSpace.Variant(ip).name} to {ColorSpace.Variant(op).name}\" \n\n # Direct path where it matters, loop-de-loop elsewhere\n if ip == cs.SRGB_LIN:\n if op == cs.SRGB: im = tf.srgb_oetf(im)\n elif op == cs.REC709: im = tf.rec709_oetf(im)\n elif op == cs.REC2020: im = tf.rec2020_oetf(mm(im, cls.mat_srgb_to_rec2020))\n elif op == cs.REC2020_LIN: im = mm(im, cls.mat_srgb_to_rec2020)\n elif op == cs.DCI_P3: im = tf.dcip3_oetf(mm(mm(mm(im, cls.mat_srgb_to_xyz), cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3))\n elif op == cs.DCI_P3_LIN: im = mm(mm(mm(im, cls.mat_srgb_to_xyz), cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3)\n elif op == cs.DISPLAY_P3: im = tf.srgb_oetf(mm(im, cls.mat_srgb_to_displayp3))\n elif op == cs.CIE_XYZ: im = mm(im, cls.mat_srgb_to_xyz)\n elif op == cs.CIE_XYY: im = cls._xyz_to_xyy(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.LMS: im = cls._xyz_to_lms(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.ACESCG: im = mm(im, cls.mat_srgb_to_acescg)\n elif op == cs.ACESCC: im = cls._acescg_to_acescc(mm(im, cls.mat_srgb_to_acescg))\n elif op == cs.ACES2065_1: im = mm(im, cls.mat_srgb_to_aces2065_1)\n elif op == cs.CIELAB: im = cls._xyz_to_cielab(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.CIELUV: im = cls._xyz_to_cieluv(mm(im, cls.mat_srgb_to_xyz))\n elif op == cs.OKLAB: im = cls._rgb_to_oklab(im)\n elif op == cs.HSL: im = cls._rgb_to_hsl(tf.srgb_oetf(im))\n elif op == cs.HSV: im = cls._rgb_to_hsv(tf.srgb_oetf(im))\n else: raise Exception(err_not_implemented)\n elif ip == cs.SRGB:\n if op == cs.HSL: im = cls._rgb_to_hsl(im)\n elif op == cs.HSV: im = cls._rgb_to_hsv(im)\n else: im = cls.convert(tf.srgb_eotf(im), cs.SRGB_LIN, op)\n elif ip == cs.REC709: im = cls.convert(tf.rec709_eotf(im), cs.SRGB_LIN, op)\n elif ip == cs.REC2020: \n if op == cs.REC2020_LIN: im = tf.rec2020_eotf(im)\n elif op == cs.CIE_XYZ: im = mm(tf.rec2020_eotf(im), cls.mat_rec2020_to_xyz)\n elif op == cs.SRGB_LIN: im = mm(tf.rec2020_eotf(im), cls.mat_rec2020_to_srgb)\n else: im = cls.convert(mm(tf.rec2020_eotf(im), cls.mat_rec2020_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.REC2020_LIN: \n if op == cs.REC2020: im = tf.rec2020_oetf(im)\n elif op == cs.CIE_XYZ: im = mm(im, cls.mat_rec2020_to_xyz)\n elif op == cs.SRGB_LIN: im = mm(im, cls.mat_rec2020_to_srgb)\n else: im = cls.convert(mm(im, cls.mat_rec2020_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.DCI_P3: \n if op == cs.DCI_P3_LIN: im = tf.dcip3_eotf(im)\n elif op == cs.CIE_XYZ: im = mm(mm(tf.dcip3_eotf(im), cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65)\n else: im = cls.convert(mm(mm(tf.dcip3_eotf(im), cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65), cs.CIE_XYZ, op)\n elif ip == cs.DCI_P3_LIN: \n if op == cs.DCI_P3: im = tf.dcip3_oetf(im)\n elif op == cs.CIE_XYZ: im = mm(mm(im, cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65)\n else: im = cls.convert(mm(mm(im, cls.mat_dcip3_to_xyz), cls.mat_dci_to_d65), cs.CIE_XYZ, op)\n elif ip == cs.DISPLAY_P3: im = cls.convert(mm(tf.srgb_eotf(im), cls.mat_displayp3_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.CIE_XYZ:\n if op == cs.CIE_XYY: im = cls._xyz_to_xyy(im)\n elif op == cs.REC2020_LIN: im = mm(im, cls.mat_xyz_to_rec2020)\n elif op == cs.REC2020: im = tf.rec2020_oetf(mm(im, cls.mat_xyz_to_rec2020))\n elif op == cs.DCI_P3_LIN: im = mm(mm(im, cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3)\n elif op == cs.DCI_P3: im = tf.dcip3_oetf(mm(mm(im, cls.mat_d65_to_dci), cls.mat_xyz_to_dcip3))\n elif op == cs.LMS: im = cls._xyz_to_lms(im)\n elif op == cs.ACESCG: im = mm(cls._d65_to_d60(im), cls.mat_xyz_to_acescg)\n elif op == cs.CIELAB: im = cls._xyz_to_cielab(im)\n elif op == cs.CIELUV: im = cls._xyz_to_cieluv(im)\n elif op == cs.OKLAB: im = cls._xyz_to_oklab(im)\n else: im = cls.convert(mm(im, cls.mat_xyz_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.CIE_XYY: \n if op == cs.CIE_XYZ: im = cls._xyy_to_xyz(im)\n else: im = cls.convert(cls._xyy_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.LMS: \n if op == cs.CIE_XYZ: im = cls._lms_to_xyz(im)\n else: im = cls.convert(cls._lms_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.ACESCG:\n # if op == cs.CIE_XYZ: im = cls._d60_to_d65(mm(im, cls.mat_acescg_to_xyz)) # FIXME: fails unit test (?)\n if op == cs.ACESCC: im = cls._acescg_to_acescc(im)\n else: im = cls.convert(mm(im, cls.mat_acescg_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.ACESCC:\n if op == cs.ACESCG: im = cls._acescc_to_acescg(im)\n else: im = cls.convert(cls._acescc_to_acescg(im), cs.ACESCG, op)\n elif ip == cs.ACES2065_1: im = cls.convert(mm(im, cls.mat_aces2065_1_to_srgb), cs.SRGB_LIN, op)\n elif ip == cs.HSL:\n if op == cs.SRGB: im = cls._hsl_to_rgb(im)\n else: im = cls.convert(tf.srgb_eotf(cls._hsl_to_rgb(im)), cs.SRGB_LIN, op)\n elif ip == cs.HSV:\n if op == cs.SRGB: im = cls._hsv_to_rgb(im)\n else: im = cls.convert(tf.srgb_eotf(cls._hsv_to_rgb(im)), cs.SRGB_LIN, op)\n elif ip == cs.CIELAB: im = cls.convert(cls._cielab_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.CIELUV: im = cls.convert(cls._cieluv_to_xyz(im), cs.CIE_XYZ, op)\n elif ip == cs.OKLAB:\n if op == cs.CIE_XYZ: im = cls._oklab_to_xyz(im)\n else: im = cls.convert(cls._oklab_to_rgb(im), cs.SRGB_LIN, op)\n else: raise Exception(err_not_implemented)\n\n return im\n\n @classmethod\n def _xyz_to_xyy(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ color space to CIE xyY color space.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: CIE xyY color space tensor\n \"\"\"\n X = xyz[0:1]\n Y = xyz[1:2]\n Z = xyz[2:3]\n x = X / (X + Y + Z)\n y = Y / (X + Y + Z)\n return torch.cat([x, y, Y], dim=0)\n\n @classmethod\n def _xyy_to_xyz(cls, xyy:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE xyY color space to CIE XYZ color space.\n\n :param xyy: Input CIE xyY color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n x = xyy[0:1]\n y = xyy[1:2]\n Y = xyy[2:3]\n X = (Y / y) * x\n Z = (Y / y) * (1. - x - y)\n return torch.cat([X, Y, Z], dim=0)\n\n @classmethod\n def _xyz_to_lms(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ color space to LMS color space.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: LMS color space tensor\n \"\"\"\n return mm(xyz, cls.mat_xyz_to_lms)\n\n @classmethod\n def _lms_to_xyz(cls, lms:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert LMS color space to CIE XYZ color space.\n\n :param lms: Input LMS color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n return mm(lms, cls.mat_lms_to_xyz)\n\n @classmethod\n def _acescg_to_acescc(cls, cg:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert scene-linear ACEScg to log ACEScc.\n\n :param lms: Input ACEScg color space tensor\n :return: ACEScc color space tensor\n \"\"\"\n res = torch.where(cg < 0.00003051757, \n (torch.log2(0.00001525878 + cg * 0.5) + 9.72) / 17.52, \n (torch.log2(cg) + 9.72) / 17.52)\n return res\n\n @classmethod\n def _acescc_to_acescg(cls, cc:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert log ACEScc to scene-linear ACEScg.\n\n :param lms: Input ACEScc color space tensor\n :return: ACEScg color space tensor\n \"\"\"\n res = torch.where(cc < -0.3013698630, \n (torch.exp2(cc * 17.52 - 9.72) - 0.00001525878) * 2,\n torch.exp2(cc * 17.52 - 9.72))\n return res\n\n @classmethod\n def _xyz_to_oklab(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ color space to OKLAB color space.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: OKLAB color space tensor\n \"\"\" \n lms = mm(xyz, cls.mat_oklab_m1)\n lms_p = torch.pow(torch.abs(lms), 0.3333333333) * torch.sign(lms).float()\n lab = mm(lms_p, cls.mat_oklab_m2)\n return lab\n\n @classmethod\n def _oklab_to_xyz(cls, lab:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert OKLAB color space to CIE XYZ color space.\n\n :param lab: Input OKLAB color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n lms_p = mm(lab, cls.mat_oklab_m2_inv)\n lms = torch.pow(lms_p, 3.)\n xyz = mm(lms, cls.mat_oklab_m1_inv)\n return xyz\n\n\n @classmethod\n def __pivot_xyz_to_lab(cls, val): \n return torch.where(val > 0.008856, torch.pow(val, 0.3333333333), ((val * 903.3) + 16.0) / 116.0)\n\n @classmethod\n def _xyz_to_cielab(cls, xyz:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from CIE XYZ to CIELAB.\n\n :param xyz: Input CIE XYZ color space tensor\n :return: CIELAB color space tensor\n \"\"\"\n # https://github.com/CairX/convert-colors-py/blob/master/convcolors/__init__.py\n # MIT License\n\n # Copyright (c) 2022 Thomas Cairns\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE. \n x = xyz[0:1] / 0.95047 \n y = xyz[1:2] / 1.00000 \n z = xyz[2:3] / 1.08883 \n\n x = cls.__pivot_xyz_to_lab(x)\n y = cls.__pivot_xyz_to_lab(y)\n z = cls.__pivot_xyz_to_lab(z)\n\n l = torch.maximum(torch.zeros_like(y).to(y.device), (116.0 * y) - 16.0)\n a = (x - y) * 500.0\n b = (y - z) * 200.0\n return torch.cat([l, a, b], dim=0)\n\n @classmethod\n def _cielab_to_xyz(cls, lab:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from CIELAB to CIE XYZ.\n \n .. note::\n\n Assumes D65 standard illuminant.\n\n :param lab: Input CIELAB color space tensor\n :return: CIE XYZ color space tensor\n \"\"\"\n # https://github.com/CairX/convert-colors-py/blob/master/convcolors/__init__.py\n # MIT License\n\n # Copyright (c) 2022 Thomas Cairns\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n l = lab[0:1]\n a = lab[1:2]\n b = lab[2:3]\n\n # Reminder: The y values is calculated first as it can be reused\n # for the calculation of x and z.\n y = (l + 16.0) / 116.0\n x = y + (a / 500.0)\n z = y - (b / 200.0)\n\n x3 = x * x * x\n z3 = z * z * z\n y3 = y * y * y\n\n x = torch.where(x3 > 0.008856, x3, ((x * 116.0) - 16.0) / 903.3)\n y = torch.where(l > 7.9996248, y3, l / 903.3)\n z = torch.where(z3 > 0.008856, z3, ((z * 116.0) - 16.0) / 903.3)\n\n x = x * 0.95047 \n y = y * 1.00000 \n z = z * 1.08883\n\n return torch.cat([x, y, z], dim=0)\n\n def _xyz_to_cieluv(image:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Converts CIE XYZ to CIELUV. \n \n .. note::\n\n Assumes D65 standard illuminant.\n\n :param image: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are X, Y, Z\n :return: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are L, U, V\n \"\"\"\n # https://github.com/stefanLeong/S2CRNet/blob/main/scripts/utils/color.py\n # MIT License\n\n # Copyright (c) 2021 StefanLeong\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n if len(image.size()) == 3:\n small_L = (29. / 3) ** 3 * image[1]\n large_L = 116 * torch.pow(image[1], 1 / 3.) - 16\n L = torch.where(image[1] <= (6. / 29) ** 3, small_L, large_L)\n\n denom = (image[0] + 15 * image[1] + 3 * image[2])\n u_prime = torch.where(denom != 0., 4 * image[0] / denom, 0.)\n v_prime = torch.where(denom != 0., 9 * image[1] / denom, 0.)\n d = 0\n elif len(image.size()) == 4:\n small_L = (29. / 3) ** 3 * image[:, 1]\n large_L = 116 * torch.pow(image[:, 1], 1 / 3.) - 16\n L = torch.where(image[:, 1] <= (6. / 29) ** 3, small_L, large_L)\n\n denom = (image[:, 0] + 15 * image[:, 1] + 3 * image[:, 2])\n u_prime = torch.where(denom > 0., 4 * image[:, 0] / denom, 0.)\n v_prime = torch.where(denom > 0., 9 * image[:, 1] / denom, 0.)\n d = 1\n\n u = 13 * L * (u_prime - .2009)\n v = 13 * L * (v_prime - .4610)\n\n luv_image = torch.stack((L, u, v), dim=d)\n\n return luv_image\n\n def _cieluv_to_xyz(image:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Converts CIELUV to CIE XYZ. \n \n .. note::\n\n Assumes D65 standard illuminant.\n\n :param image: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are L, U, V\n :return: A pytorch tensor of shape (3, n_pixels_x, n_pixels_y) in which the channels are X, Y, Z\n \"\"\"\n # https://github.com/stefanLeong/S2CRNet/blob/main/scripts/utils/color.py\n # MIT License\n\n # Copyright (c) 2021 StefanLeong\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n if len(image.size()) == 3:\n denom = (13 * image[0])\n u_prime = torch.where(denom != 0., image[1] / denom, 0.) + .2009\n v_prime = torch.where(denom != 0., image[2] / denom, 0.) + .4610\n\n small_Y = image[0] * (3. / 29) ** 3\n large_Y = ((image[0] + 16.) / 116.) ** 3\n\n Y = torch.where(image[0] <= 8, small_Y, large_Y)\n d = 0\n # batch of images\n elif len(image.size()) == 4:\n denom = (13 * image[:, 0])\n u_prime = torch.where(denom != 0., image[:, 1] / denom, 0.) + .2009\n v_prime = torch.where(denom != 0., image[:, 2] / denom, 0.) + .4610\n\n small_Y = image[:, 0] * (3. / 29) ** 3\n large_Y = ((image[:, 0] + 16.) / 116.) ** 3\n\n Y = torch.where(image[:, 0] <= 8, small_Y, large_Y)\n d = 1\n\n X = torch.where(v_prime != 0., Y * 9 * u_prime / (4 * v_prime), 0.)\n Z = torch.where(v_prime != 0., Y * (12 - 3 * u_prime - 20 * v_prime) / (4 * v_prime), 0.)\n\n xyz_image = torch.stack((X, Y, Z), dim=d)\n\n return xyz_image\n\n @classmethod\n def _rgb_to_oklab(cls, rgb:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from linear sRGB to OKLAB.\n\n :param rgb: Input linear sRGB color space tensor\n :return: OKLAB color space tensor\n \"\"\"\n cr = rgb[0:1]\n cg = rgb[1:2]\n cb = rgb[2:3]\n\n l = 0.4122214708 * cr + 0.5363325363 * cg + 0.0514459929 * cb;\n m = 0.2119034982 * cr + 0.6806995451 * cg + 0.1073969566 * cb;\n s = 0.0883024619 * cr + 0.2817188376 * cg + 0.6299787005 * cb;\n\n l_ = torch.pow(torch.abs(l), 0.3333333333) * torch.sign(l).float()\n m_ = torch.pow(torch.abs(m), 0.3333333333) * torch.sign(m).float()\n s_ = torch.pow(torch.abs(s), 0.3333333333) * torch.sign(s).float()\n\n return torch.cat([\n 0.2104542553 * l_ + 0.7936177850 * m_ - 0.0040720468 * s_,\n 1.9779984951 * l_ - 2.4285922050 * m_ + 0.4505937099 * s_,\n 0.0259040371 * l_ + 0.7827717662 * m_ - 0.8086757660 * s_], dim=0)\n\n @classmethod\n def _oklab_to_rgb(cls, lab:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert color space from OKLAB to linear sRGB.\n\n :param lab: Input OKLAB color space tensor\n :return: Linear sRGB color space tensor\n \"\"\"\n cl = lab[0:1]\n ca = lab[1:2]\n cb = lab[2:3]\n\n l_ = cl + 0.3963377774 * ca + 0.2158037573 * cb\n m_ = cl - 0.1055613458 * ca - 0.0638541728 * cb\n s_ = cl - 0.0894841775 * ca - 1.2914855480 * cb\n\n l = l_*l_*l_\n m = m_*m_*m_\n s = s_*s_*s_\n\n return torch.cat([\n +4.0767416621 * l - 3.3077115913 * m + 0.2309699292 * s,\n -1.2684380046 * l + 2.6097574011 * m - 0.3413193965 * s,\n -0.0041960863 * l - 0.7034186147 * m + 1.7076147010 * s], dim=0)\n\n @classmethod\n def _rgb_to_hsl(cls, rgb: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB image tensor to sRGB-relative HSL. \n \n .. note::\n\n expects non-linear sRGB w/ gamma curve as input\n\n :param rgb: Input sRGB image tensor\n :return: HSL image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n rgb = rgb.unsqueeze(0)\n cmax, cmax_idx = torch.max(rgb, dim=1, keepdim=True)\n cmin = torch.min(rgb, dim=1, keepdim=True)[0]\n delta = cmax - cmin\n hsl_h = torch.empty_like(rgb[:, 0:1, :, :])\n cmax_idx[delta == 0] = 3\n hsl_h[cmax_idx == 0] = (((rgb[:, 1:2] - rgb[:, 2:3]) / delta) % 6)[cmax_idx == 0]\n hsl_h[cmax_idx == 1] = (((rgb[:, 2:3] - rgb[:, 0:1]) / delta) + 2)[cmax_idx == 1]\n hsl_h[cmax_idx == 2] = (((rgb[:, 0:1] - rgb[:, 1:2]) / delta) + 4)[cmax_idx == 2]\n hsl_h[cmax_idx == 3] = 0.\n hsl_h /= 6.\n\n hsl_l = (cmax + cmin) / 2.\n hsl_s = torch.empty_like(hsl_h)\n hsl_s[hsl_l == 0] = 0\n hsl_s[hsl_l == 1] = 0\n hsl_l_ma = torch.bitwise_and(hsl_l > 0, hsl_l < 1)\n hsl_l_s0_5 = torch.bitwise_and(hsl_l_ma, hsl_l <= 0.5)\n hsl_l_l0_5 = torch.bitwise_and(hsl_l_ma, hsl_l > 0.5)\n hsl_s[hsl_l_s0_5] = ((cmax - cmin) / (hsl_l * 2.))[hsl_l_s0_5]\n hsl_s[hsl_l_l0_5] = ((cmax - cmin) / (- hsl_l * 2. + 2.))[hsl_l_l0_5]\n return torch.cat([hsl_h, hsl_s, hsl_l], dim=1).squeeze(0)\n\n @classmethod\n def _hsl_to_rgb(cls, hsl: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB-relative HSL image tensor to sRGB. \n \n .. note::\n\n returns non-linear sRGB w/ gamma curve as output\n\n :param hsl: Input HSL image tensor\n :return: sRGB image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n hsl = hsl.unsqueeze(0)\n hsl_h, hsl_s, hsl_l = hsl[:, 0:1], hsl[:, 1:2], hsl[:, 2:3]\n _c = (-torch.abs(hsl_l * 2. - 1.) + 1) * hsl_s\n _x = _c * (-torch.abs(hsl_h * 6. % 2. - 1) + 1.)\n _m = hsl_l - _c / 2.\n idx = (hsl_h * 6.).type(torch.uint8)\n idx = (idx % 6).expand(-1, 3, -1, -1)\n rgb = torch.empty_like(hsl).to(hsl.device)\n _o = torch.zeros_like(_c).to(hsl.device)\n rgb[idx == 0] = torch.cat([_c, _x, _o], dim=1)[idx == 0]\n rgb[idx == 1] = torch.cat([_x, _c, _o], dim=1)[idx == 1]\n rgb[idx == 2] = torch.cat([_o, _c, _x], dim=1)[idx == 2]\n rgb[idx == 3] = torch.cat([_o, _x, _c], dim=1)[idx == 3]\n rgb[idx == 4] = torch.cat([_x, _o, _c], dim=1)[idx == 4]\n rgb[idx == 5] = torch.cat([_c, _o, _x], dim=1)[idx == 5]\n rgb += _m\n return rgb.squeeze(0)\n\n @classmethod\n def _rgb_to_hsv(cls, rgb: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB image tensor to sRGB-relative HSV. \n \n .. note::\n\n expects non-linear sRGB w/ gamma curve as input\n\n .. warning::\n\n input tensor will be clamped to [0, 1] range\n\n :param rgb: Input sRGB image tensor\n :return: HSV image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n rgb = rgb.clamp(0.,1.).unsqueeze(0)\n cmax, cmax_idx = torch.max(rgb, dim=1, keepdim=True)\n cmin = torch.min(rgb, dim=1, keepdim=True)[0]\n delta = cmax - cmin\n hsv_h = torch.empty_like(rgb[:, 0:1, :, :])\n cmax_idx[delta == 0] = 3\n hsv_h[cmax_idx == 0] = (((rgb[:, 1:2] - rgb[:, 2:3]) / delta) % 6)[cmax_idx == 0]\n hsv_h[cmax_idx == 1] = (((rgb[:, 2:3] - rgb[:, 0:1]) / delta) + 2)[cmax_idx == 1]\n hsv_h[cmax_idx == 2] = (((rgb[:, 0:1] - rgb[:, 1:2]) / delta) + 4)[cmax_idx == 2]\n hsv_h[cmax_idx == 3] = 0.\n hsv_h /= 6.\n hsv_s = torch.where(cmax == 0, torch.tensor(0.).type_as(rgb), delta / cmax)\n hsv_v = cmax\n return torch.cat([hsv_h, hsv_s, hsv_v], dim=1).squeeze(0)\n\n @classmethod\n def _hsv_to_rgb(cls, hsv: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Transform sRGB-relative HSV image tensor to sRGB. \n \n .. note::\n \n returns non-linear sRGB w/ gamma curve as output\n\n :param hsv: Input HSV image tensor\n :return: sRGB image tensor\n \"\"\"\n # https://github.com/windingwind/seal-3d/blob/main/SealNeRF/color_utils.py\n # MIT License\n\n # Copyright (c) 2022 hawkey\n\n # Permission is hereby granted, free of charge, to any person obtaining a copy\n # of this software and associated documentation files (the \"Software\"), to deal\n # in the Software without restriction, including without limitation the rights\n # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n # copies of the Software, and to permit persons to whom the Software is\n # furnished to do so, subject to the following conditions:\n\n # The above copyright notice and this permission notice shall be included in all\n # copies or substantial portions of the Software.\n\n # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n # SOFTWARE.\n hsv = hsv.unsqueeze(0)\n hsv_h, hsv_s, hsv_l = hsv[:, 0:1], hsv[:, 1:2], hsv[:, 2:3]\n _c = hsv_l * hsv_s\n _x = _c * (- torch.abs(hsv_h * 6. % 2. - 1) + 1.)\n _m = hsv_l - _c\n _o = torch.zeros_like(_c).to(hsv.device)\n idx = (hsv_h * 6.).type(torch.uint8)\n idx = (idx % 6).expand(-1, 3, -1, -1)\n rgb = torch.empty_like(hsv).to(hsv.device)\n rgb[idx == 0] = torch.cat([_c, _x, _o], dim=1)[idx == 0]\n rgb[idx == 1] = torch.cat([_x, _c, _o], dim=1)[idx == 1]\n rgb[idx == 2] = torch.cat([_o, _c, _x], dim=1)[idx == 2]\n rgb[idx == 3] = torch.cat([_o, _x, _c], dim=1)[idx == 3]\n rgb[idx == 4] = torch.cat([_x, _o, _c], dim=1)[idx == 4]\n rgb[idx == 5] = torch.cat([_c, _o, _x], dim=1)[idx == 5]\n rgb += _m\n return rgb.squeeze(0)\n\n @classmethod\n def _d60_to_d65(cls, im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ image from \"D60\" to D65 white point.\n\n :param im: Input image tensor\n :return: Converted image tensor\n \"\"\"\n # There is not really a CIE D60 white point, but that's what everyone calls what ACES uses.\n return mm(im, cls.mat_d60_to_d65)\n\n @classmethod\n def _d65_to_d60(cls, im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert CIE XYZ image from D65 to \"D60\" white point.\n\n :param torch.Tensor im: Input image tensor\n :return: Converted image tensor\n \"\"\"\n return mm(im, cls.mat_d65_to_d60)" }, { "identifier": "TransferFunction", "path": "src/tinycio/colorspace.py", "snippet": "class TransferFunction: \n \"\"\"\n Opto-electronic/electro-optical transfer functions. Example:\n\n .. highlight:: python\n .. code-block:: python\n \n im_srgb = TransferFunction.srgb_oetf(im_linear)\n\n .. note::\n These transfer functions are applied automatically by :code:`ColorSpace.convert` when appropriate, \n but can instead be used explicitly.\n\n \"\"\"\n @staticmethod\n def srgb_eotf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n sRGB electro-optical transfer function (sRGB gamma to linear sRGB)\n\n :param im: sRGB image tensor \n :return: linear sRGB image tensor\n \"\"\"\n s1 = im / 12.92321\n s2 = torch.pow((im + 0.055) / 1.055, 12. / 5)\n return torch.where(im <= 0.04045, s1, s2)\n\n @staticmethod\n def srgb_oetf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n sRGB opto-electronic transfer function (linear sRGB to sRGB gamma)\n\n :param im: linear sRGB image tensor \n :return: sRGB image tensor\n \"\"\"\n s1 = im * 12.92321\n s2 = torch.pow(im, 1. / 2.4) * 1.055 - 0.055\n return torch.where(im <= 0.0031308, s1, s2)\n\n @staticmethod\n def rec709_eotf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Rec. 709 electro-optical transfer function (Rec. 709 gamma to linear sRGB)\n\n :param im: Rec. 709 image tensor \n :return: linear sRGB image tensor (same primaries)\n \"\"\"\n s1 = im / 4.5\n s2 = torch.pow((im + 0.099) / 1.099, 2.2)\n return torch.where(im <= 0.081, s1, s2)\n\n @staticmethod\n def rec709_oetf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Rec. 709 opto-electronic transfer function (linear sRGB to Rec. 709 gamma)\n\n :param im: linear sRGB image tensor (same primaries)\n :return: Rec. 709 image tensor\n \"\"\"\n s1 = im * 4.5\n s2 = torch.pow(im, .4545) * 1.099 - 0.099\n return torch.where(im <= 0.018, s1, s2)\n\n @staticmethod\n def rec2020_eotf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n Rec. 2020 electro-optical transfer function (Rec. 2020 gamma to linear)\n\n :param im: Rec. 2020 image tensor \n :return: linear Rec. 2020 gamut image tensor\n \"\"\"\n a = 1.09929682680944\n b = 0.08124285829 \n s1 = im / 4.5\n s2 = torch.pow((im + a - 1.) / a, 1./ 0.45)\n return torch.where(im <= b, s1, s2)\n\n @staticmethod\n def rec2020_oetf(im:torch.Tensor) -> torch.Tensor: \n \"\"\"\n Rec. 2020 opto-electronic transfer function (linear to Rec. 2020 gamma)\n\n :param im: linear Rec. 2020 gamut image tensor \n :return: Rec. 2020 image tensor\n \"\"\"\n a = 1.09929682680944\n b = 0.018053968510807\n s1 = im * 4.5\n s2 = a * torch.pow(im, .45) - (a - 1.)\n return torch.where(im <= b, s1, s2)\n\n @staticmethod\n def dcip3_eotf(im:torch.Tensor) -> torch.Tensor: \n \"\"\"\n DCI P3 electro-optical transfer function (DCI P3 gamma to linear)\n\n :param im: DCI P3 image tensor \n :return: linear P3 gamut image tensor\n \"\"\"\n return torch.pow(im, 2.6)\n\n @staticmethod\n def dcip3_oetf(im:torch.Tensor) -> torch.Tensor: \n \"\"\"\n DCI P3 opto-electronic transfer function (linear to DCI P3 gamma)\n\n :param im: linear P3 gamut image tensor \n :return: DCI P3 image tensor\n \"\"\"\n return torch.pow(im, 1./2.6)\n\n @staticmethod\n def log_c_eotf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n LogC electro-optical transfer function\n\n :param im: LogC encoded image tensor\n :return: linear image tensor \n \"\"\"\n offset = 0.00937677\n x = im.clone()\n x = torch.where(x > 0.1496582, \n torch.pow(10.0, (x - 0.385537) / 0.2471896) * 0.18 - offset,\n (x / 0.9661776 - 0.04378604) * 0.18 - offset)\n return x\n\n @staticmethod\n def log_c_oetf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n LogC opto-electronic transfer function\n\n :param im: linear image tensor \n :return: LogC encoded image tensor\n \"\"\"\n offset = 0.00937677\n x = im.clone()\n x = torch.where(x > 0.02 - offset,\n (((torch.log10((x + offset) / 0.18)) * 0.2471896) + 0.385537),\n ((((x + offset) / 0.18) + 0.04378604) * 0.9661776))\n return x\n\n @staticmethod\n def s_log_eotf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n S-Log electro-optical transfer function\n\n :param im: S-Log encoded image tensor\n :return: linear image tensor \n \"\"\"\n x = im.clone()\n return torch.pow(10.0, ((x - 0.616596 - 0.03) / 0.432699)) - 0.037584\n\n @staticmethod\n def s_log_oetf(im:torch.Tensor) -> torch.Tensor:\n \"\"\"\n S-Log opto-electronic transfer function\n\n :param im: linear image tensor \n :return: S-Log encoded image tensor\n \"\"\"\n x = im.clone()\n return (0.432699 * torch.log10(x + 0.037584) + 0.616596) + 0.03" } ]
import torch import numpy as np import typing from enum import IntEnum from .np_agx.agx import applyAgX, applyAgXPunchy from .colorspace import ColorSpace, TransferFunction
17,663
class ToneMapping: """ Map high-dynamic-range values to low-dynamic-range. LDR is typically sRGB in [0, 1] range. Example: .. highlight:: python .. code-block:: python tm = ToneMapping.Variant.HABLE tonemapped_image = ToneMapping.apply(input_im, tone_mapper=tm) """ class Variant(IntEnum): """ Tone mapper enum. Available options are: .. highlight:: text .. code-block:: text - NONE - CLAMP - AGX - AGX_PUNCHY - HABLE - REINHARD - ACESCG """ NONE = 1<<0 CLAMP = 1<<1 AGX = 1<<2 AGX_PUNCHY = 1<<3 HABLE = 1<<4 REINHARD = 1<<5 ACESCG = 1<<6 IP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD IP_ACESCG = ACESCG OP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD OP_ACESCG = ACESCG DISABLED = 0 @classmethod def apply(cls, im:torch.Tensor, tone_mapper:Variant): """ Apply tone mapping to HDR image tensor. Input data is expected to be in the correct color space for the chosen tone mapper. .. note:: :code:`ACESCG` tone mapping is performed on AP1 primaries and expects input in the :code:`ACESCG` color space. All other tone mappers expect :code:`SRGB_LIN`. The :code:`tone_map()` method of :class:`ColorImage` handles this conversion automatically. :param torch.Tensor im: [C=3, H, W] sized image tensor :param ToneMapping.Variant tone_mapper: tonemapper to be used :return: image tensor :rtype: torch.Tensor """ assert im.dim() == 3 and im.size(0) == 3, f"expected [C=3, H, W] image tensor, got {im.size()}" op, tm = tone_mapper, cls.Variant err_not_supported, err_disabled = f"ToneMapping {op.name} is not supported", f"ToneMapping {op.name} is disabled" if op & tm.DISABLED: raise Exception(err_disabled) if op == tm.NONE: return im elif op == tm.CLAMP: return im.clamp(0., 1.) elif op == tm.AGX: return cls._agx(im) elif op == tm.AGX_PUNCHY: return cls._agx_punchy(im) elif op == tm.HABLE: return cls._hable(im) elif op == tm.REINHARD: return cls._reinhard_extended_luminance(im) elif op == tm.ACESCG: return cls._aces_fitted(im) else: raise Exception(err_not_supported) return out @classmethod def _agx(cls, im:torch.Tensor): device = im.device out = applyAgX(im.permute(1, 2, 0).cpu().numpy()) out = torch.from_numpy(out).permute(2, 0, 1).to(device) return TransferFunction.srgb_eotf(out.clamp(0., 1.)) @classmethod def _agx_punchy(cls, im:torch.Tensor): device = im.device
class ToneMapping: """ Map high-dynamic-range values to low-dynamic-range. LDR is typically sRGB in [0, 1] range. Example: .. highlight:: python .. code-block:: python tm = ToneMapping.Variant.HABLE tonemapped_image = ToneMapping.apply(input_im, tone_mapper=tm) """ class Variant(IntEnum): """ Tone mapper enum. Available options are: .. highlight:: text .. code-block:: text - NONE - CLAMP - AGX - AGX_PUNCHY - HABLE - REINHARD - ACESCG """ NONE = 1<<0 CLAMP = 1<<1 AGX = 1<<2 AGX_PUNCHY = 1<<3 HABLE = 1<<4 REINHARD = 1<<5 ACESCG = 1<<6 IP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD IP_ACESCG = ACESCG OP_SRGB_LIN = CLAMP | AGX | AGX_PUNCHY | HABLE | REINHARD OP_ACESCG = ACESCG DISABLED = 0 @classmethod def apply(cls, im:torch.Tensor, tone_mapper:Variant): """ Apply tone mapping to HDR image tensor. Input data is expected to be in the correct color space for the chosen tone mapper. .. note:: :code:`ACESCG` tone mapping is performed on AP1 primaries and expects input in the :code:`ACESCG` color space. All other tone mappers expect :code:`SRGB_LIN`. The :code:`tone_map()` method of :class:`ColorImage` handles this conversion automatically. :param torch.Tensor im: [C=3, H, W] sized image tensor :param ToneMapping.Variant tone_mapper: tonemapper to be used :return: image tensor :rtype: torch.Tensor """ assert im.dim() == 3 and im.size(0) == 3, f"expected [C=3, H, W] image tensor, got {im.size()}" op, tm = tone_mapper, cls.Variant err_not_supported, err_disabled = f"ToneMapping {op.name} is not supported", f"ToneMapping {op.name} is disabled" if op & tm.DISABLED: raise Exception(err_disabled) if op == tm.NONE: return im elif op == tm.CLAMP: return im.clamp(0., 1.) elif op == tm.AGX: return cls._agx(im) elif op == tm.AGX_PUNCHY: return cls._agx_punchy(im) elif op == tm.HABLE: return cls._hable(im) elif op == tm.REINHARD: return cls._reinhard_extended_luminance(im) elif op == tm.ACESCG: return cls._aces_fitted(im) else: raise Exception(err_not_supported) return out @classmethod def _agx(cls, im:torch.Tensor): device = im.device out = applyAgX(im.permute(1, 2, 0).cpu().numpy()) out = torch.from_numpy(out).permute(2, 0, 1).to(device) return TransferFunction.srgb_eotf(out.clamp(0., 1.)) @classmethod def _agx_punchy(cls, im:torch.Tensor): device = im.device
out = applyAgXPunchy(im.permute(1, 2, 0).cpu().numpy())
1
2023-12-15 15:39:08+00:00
24k
quocanh34/magic-animate-modified
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n # for controlnet\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n # config[\"mid_block_type\"] = \"UNetMidBlock3DCrossAttn\"\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "ControlNetProcessor", "path": "magicanimate/models/multicontrolnet.py", "snippet": "class ControlNetProcessor(object):\n def __init__(\n self,\n controlnet: ControlNetModel,\n # image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]],\n # controlnet_cond = torch.FloatTensor, #fix\n # conditioning_scale: float = 1.0,\n ):\n self.controlnet = controlnet\n # self.image = image\n # self.controlnet_cond = controlnet_cond #fix\n # self.conditioning_scale = conditioning_scale\n\n # def _default_height_width(self, height, width, image):\n # if isinstance(image, list):\n # image = image[0]\n\n # if height is None:\n # if isinstance(image, PIL.Image.Image):\n # height = image.height\n # elif isinstance(image, torch.Tensor):\n # height = image.shape[3]\n\n # height = (height // 8) * 8 # round down to nearest multiple of 8\n\n # if width is None:\n # if isinstance(image, PIL.Image.Image):\n # width = image.width\n # elif isinstance(image, torch.Tensor):\n # width = image.shape[2]\n\n # width = (width // 8) * 8 # round down to nearest multiple of 8\n\n # return height, width\n\n # def default_height_width(self, height, width):\n # return self._default_height_width(height, width, self.image)\n\n # def _prepare_image(self, image, width, height, batch_size, num_images_per_prompt, device, dtype):\n # if not isinstance(image, torch.Tensor):\n # if isinstance(image, PIL.Image.Image):\n # image = [image]\n\n # if isinstance(image[0], PIL.Image.Image):\n # image = [\n # np.array(i.resize((width, height), resample=PIL_INTERPOLATION[\"lanczos\"]))[None, :] for i in image\n # ]\n # image = np.concatenate(image, axis=0)\n # image = np.array(image).astype(np.float32) / 255.0\n # image = image.transpose(0, 3, 1, 2)\n # image = torch.from_numpy(image)\n # elif isinstance(image[0], torch.Tensor):\n # image = torch.cat(image, dim=0)\n\n # image_batch_size = image.shape[0]\n\n # if image_batch_size == 1:\n # repeat_by = batch_size\n # else:\n # # image batch size is the same as prompt batch size\n # repeat_by = num_images_per_prompt\n\n # image = image.repeat_interleave(repeat_by, dim=0)\n\n # image = image.to(device=device, dtype=dtype)\n\n # return image\n\n # def _check_inputs(self, image, prompt, prompt_embeds):\n # image_is_pil = isinstance(image, PIL.Image.Image)\n # image_is_tensor = isinstance(image, torch.Tensor)\n # image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)\n # image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)\n\n # if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:\n # raise TypeError(\n # \"image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors\"\n # )\n\n # if image_is_pil:\n # image_batch_size = 1\n # elif image_is_tensor:\n # image_batch_size = image.shape[0]\n # elif image_is_pil_list:\n # image_batch_size = len(image)\n # elif image_is_tensor_list:\n # image_batch_size = len(image)\n\n # if prompt is not None and isinstance(prompt, str):\n # prompt_batch_size = 1\n # elif prompt is not None and isinstance(prompt, list):\n # prompt_batch_size = len(prompt)\n # elif prompt_embeds is not None:\n # prompt_batch_size = prompt_embeds.shape[0]\n\n # if image_batch_size != 1 and image_batch_size != prompt_batch_size:\n # raise ValueError(\n # f\"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}\"\n # )\n\n # def check_inputs(self, prompt, prompt_embeds):\n # self._check_inputs(self.image, prompt, prompt_embeds)\n\n # def prepare_image(self, width, height, batch_size, num_images_per_prompt, device, do_classifier_free_guidance):\n # self.image = self._prepare_image(\n # self.image, width, height, batch_size, num_images_per_prompt, device, self.controlnet.dtype\n # )\n # if do_classifier_free_guidance:\n # self.image = torch.cat([self.image] * 2)\n\n def __call__(\n self,\n controlnet_latent_input,\n t,\n encoder_hidden_states,\n controlnet_cond, #fix\n conditioning_scale,\n return_dict,\n ) -> Tuple:\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n controlnet_latent_input,\n t,\n encoder_hidden_states,\n controlnet_cond,\n conditioning_scale, \n return_dict=False,\n )\n down_block_res_samples = [\n down_block_res_sample * conditioning_scale for down_block_res_sample in down_block_res_samples\n ]\n mid_block_res_sample *= conditioning_scale\n return (down_block_res_samples, mid_block_res_sample)" }, { "identifier": "ReferenceAttentionControl", "path": "magicanimate/models/mutual_self_attention.py", "snippet": "class ReferenceAttentionControl():\n \n def __init__(self, \n unet,\n mode=\"write\",\n do_classifier_free_guidance=False,\n attention_auto_machine_weight = float('inf'),\n gn_auto_machine_weight = 1.0,\n style_fidelity = 1.0,\n reference_attn=True,\n reference_adain=False,\n fusion_blocks=\"midup\",\n batch_size=1, \n ) -> None:\n # 10. Modify self attention and group norm\n self.unet = unet\n assert mode in [\"read\", \"write\"]\n assert fusion_blocks in [\"midup\", \"full\"]\n self.reference_attn = reference_attn\n self.reference_adain = reference_adain\n self.fusion_blocks = fusion_blocks\n self.register_reference_hooks(\n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n fusion_blocks,\n batch_size=batch_size, \n )\n\n def register_reference_hooks(\n self, \n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n dtype=torch.float16,\n batch_size=1, \n num_images_per_prompt=1, \n device=torch.device(\"cpu\"), \n fusion_blocks='midup',\n ):\n MODE = mode\n do_classifier_free_guidance = do_classifier_free_guidance\n attention_auto_machine_weight = attention_auto_machine_weight\n gn_auto_machine_weight = gn_auto_machine_weight\n style_fidelity = style_fidelity\n reference_attn = reference_attn\n reference_adain = reference_adain\n fusion_blocks = fusion_blocks\n num_images_per_prompt = num_images_per_prompt\n dtype=dtype\n if do_classifier_free_guidance:\n uc_mask = (\n torch.Tensor([1] * batch_size * num_images_per_prompt * 16 + [0] * batch_size * num_images_per_prompt * 16)\n .to(device)\n .bool()\n )\n else:\n uc_mask = (\n torch.Tensor([0] * batch_size * num_images_per_prompt * 2)\n .to(device)\n .bool()\n )\n \n def hacked_basic_transformer_inner_forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n video_length=None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n if self.only_cross_attention:\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n else:\n if MODE == \"write\":\n self.bank.append(norm_hidden_states.clone())\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if MODE == \"read\":\n self.bank = [rearrange(d.unsqueeze(1).repeat(1, video_length, 1, 1), \"b t l c -> (b t) l c\")[:hidden_states.shape[0]] for d in self.bank]\n hidden_states_uc = self.attn1(norm_hidden_states, \n encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),\n attention_mask=attention_mask) + hidden_states\n hidden_states_c = hidden_states_uc.clone()\n _uc_mask = uc_mask.clone()\n if do_classifier_free_guidance:\n if hidden_states.shape[0] != _uc_mask.shape[0]:\n _uc_mask = (\n torch.Tensor([1] * (hidden_states.shape[0]//2) + [0] * (hidden_states.shape[0]//2))\n .to(device)\n .bool()\n )\n hidden_states_c[_uc_mask] = self.attn1(\n norm_hidden_states[_uc_mask],\n encoder_hidden_states=norm_hidden_states[_uc_mask],\n attention_mask=attention_mask,\n ) + hidden_states[_uc_mask]\n hidden_states = hidden_states_c.clone()\n \n self.bank.clear()\n if self.attn2 is not None:\n # Cross-Attention\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n hidden_states = (\n self.attn2(\n norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n + hidden_states\n )\n\n # Feed-forward\n hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states\n\n # Temporal-Attention\n if self.unet_use_temporal_attention:\n d = hidden_states.shape[1]\n hidden_states = rearrange(hidden_states, \"(b f) d c -> (b d) f c\", f=video_length)\n norm_hidden_states = (\n self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)\n )\n hidden_states = self.attn_temp(norm_hidden_states) + hidden_states\n hidden_states = rearrange(hidden_states, \"(b d) f c -> (b f) d c\", d=d)\n\n return hidden_states\n \n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states\n\n def hacked_mid_forward(self, *args, **kwargs):\n eps = 1e-6\n x = self.original_forward(*args, **kwargs)\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append(mean)\n self.var_bank.append(var)\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))\n var_acc = sum(self.var_bank) / float(len(self.var_bank))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n x_uc = (((x - mean) / std) * std_acc) + mean_acc\n x_c = x_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n x_c[uc_mask] = x[uc_mask]\n x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc\n self.mean_bank = []\n self.var_bank = []\n return x\n\n def hack_CrossAttnDownBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_DownBlock2D_forward(self, hidden_states, temb=None):\n eps = 1e-6\n\n output_states = ()\n\n for i, resnet in enumerate(self.resnets):\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_CrossAttnUpBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n # TODO(Patrick, William) - attention mask is not used\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):\n eps = 1e-6\n for i, resnet in enumerate(self.resnets):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)] \n attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n\n for i, module in enumerate(attn_modules):\n module._original_inner_forward = module.forward\n module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)\n module.bank = []\n module.attn_weight = float(i) / float(len(attn_modules))\n\n if self.reference_adain:\n gn_modules = [self.unet.mid_block]\n self.unet.mid_block.gn_weight = 0\n\n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n module.gn_weight = 1.0 - float(w) / float(len(down_blocks))\n gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n module.gn_weight = float(w) / float(len(up_blocks))\n gn_modules.append(module)\n\n for i, module in enumerate(gn_modules):\n if getattr(module, \"original_forward\", None) is None:\n module.original_forward = module.forward\n if i == 0:\n # mid_block\n module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)\n elif isinstance(module, CrossAttnDownBlock2D):\n module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)\n elif isinstance(module, DownBlock2D):\n module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)\n elif isinstance(module, CrossAttnUpBlock2D):\n module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)\n elif isinstance(module, UpBlock2D):\n module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)\n module.mean_bank = []\n module.var_bank = []\n module.gn_weight *= 2\n \n def update(self, writer, dtype=torch.float16):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in (torch_dfs(writer.unet.mid_block)+torch_dfs(writer.unet.up_blocks)) if isinstance(module, BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in torch_dfs(writer.unet) if isinstance(module, BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) \n writer_attn_modules = sorted(writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r, w in zip(reader_attn_modules, writer_attn_modules):\n r.bank = [v.clone().to(dtype) for v in w.bank]\n # w.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n writer_gn_modules = [writer.unet.mid_block]\n \n down_blocks = writer.unet.down_blocks\n for w, module in enumerate(down_blocks):\n writer_gn_modules.append(module)\n\n up_blocks = writer.unet.up_blocks\n for w, module in enumerate(up_blocks):\n writer_gn_modules.append(module)\n \n for r, w in zip(reader_gn_modules, writer_gn_modules):\n if len(w.mean_bank) > 0 and isinstance(w.mean_bank[0], list):\n r.mean_bank = [[v.clone().to(dtype) for v in vl] for vl in w.mean_bank]\n r.var_bank = [[v.clone().to(dtype) for v in vl] for vl in w.var_bank]\n else:\n r.mean_bank = [v.clone().to(dtype) for v in w.mean_bank]\n r.var_bank = [v.clone().to(dtype) for v in w.var_bank]\n \n def clear(self):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r in reader_attn_modules:\n r.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n for r in reader_gn_modules:\n r.mean_bank.clear()\n r.var_bank.clear()" }, { "identifier": "get_context_scheduler", "path": "magicanimate/pipelines/context.py", "snippet": "def get_context_scheduler(name: str) -> Callable:\n if name == \"uniform\":\n return uniform\n else:\n raise ValueError(f\"Unknown context_overlap policy {name}\")" }, { "identifier": "get_total_steps", "path": "magicanimate/pipelines/context.py", "snippet": "def get_total_steps(\n scheduler,\n timesteps: List[int],\n num_steps: Optional[int] = None,\n num_frames: int = ...,\n context_size: Optional[int] = None,\n context_stride: int = 3,\n context_overlap: int = 4,\n closed_loop: bool = True,\n):\n return sum(\n len(\n list(\n scheduler(\n i,\n num_steps,\n num_frames,\n context_size,\n context_stride,\n context_overlap,\n )\n )\n )\n for i in range(len(timesteps))\n )" }, { "identifier": "get_tensor_interpolation_method", "path": "magicanimate/utils/util.py", "snippet": "def get_tensor_interpolation_method():\n return tensor_interpolation" } ]
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.multicontrolnet import ControlNetProcessor #fix from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
15,845
""" Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]],
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ # from magicanimate.models.controlnet import ControlNetModel logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, # controlnet: ControlNetModel, # processors: List[ControlNetProcessor], scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, # controlnet1=processors[0], scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition1, condition2, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # Prepare first condition condition1 = torch.from_numpy(condition1.copy()).to(device=device, dtype=dtype) / 255.0 condition1 = torch.stack([condition1 for _ in range(num_videos_per_prompt)], dim=0) condition1 = rearrange(condition1, 'b f h w c -> (b f) c h w').clone() # Prepare second condition condition2 = torch.from_numpy(condition2.copy()).to(device=device, dtype=dtype) / 255.0 condition2 = torch.stack([condition2 for _ in range(num_videos_per_prompt)], dim=0) condition2 = rearrange(condition2, 'b f h w c -> (b f) c h w').clone() # Here, we're averaging the two conditions combined_condition = (condition1*8+condition2*2)/10 if do_classifier_free_guidance: combined_condition = torch.cat([combined_condition] * 2) #combined_condition = torch.from_numpy(combined_condition.copy()).to(device=device, dtype=dtype) return combined_condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate: v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f) new_latents[:,:,new_index,:,:] = v.to(latents.device) new_index += 1 new_latents[:,:,new_index,:,:] = v1 new_index += 1 return new_latents def select_controlnet_res_samples(self, controlnet_res_samples_cache_dict, context, do_classifier_free_guidance, b, f): _down_block_res_samples = [] _mid_block_res_sample = [] for i in np.concatenate(np.array(context)): _down_block_res_samples.append(controlnet_res_samples_cache_dict[i][0]) _mid_block_res_sample.append(controlnet_res_samples_cache_dict[i][1]) down_block_res_samples = [[] for _ in range(len(controlnet_res_samples_cache_dict[i][0]))] for res_t in _down_block_res_samples: for i, res in enumerate(res_t): down_block_res_samples[i].append(res) down_block_res_samples = [torch.cat(res) for res in down_block_res_samples] mid_block_res_sample = torch.cat(_mid_block_res_sample) # reshape controlnet output to match the unet3d inputs b = b // 2 if do_classifier_free_guidance else b _down_block_res_samples = [] for sample in down_block_res_samples: sample = rearrange(sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: sample = sample.repeat(2, 1, 1, 1, 1) _down_block_res_samples.append(sample) down_block_res_samples = _down_block_res_samples mid_block_res_sample = rearrange(mid_block_res_sample, '(b f) c h w -> b c f h w', b=b, f=f) if do_classifier_free_guidance: mid_block_res_sample = mid_block_res_sample.repeat(2, 1, 1, 1, 1) return down_block_res_samples, mid_block_res_sample @torch.no_grad() def __call__( self, prompt: Union[str, List[str]],
processors: List[ControlNetProcessor], #fix
1
2023-12-15 01:22:37+00:00
24k
Azure-Samples/functions-python-web-crawler
.venv/Lib/site-packages/urllib3/connection.py
[ { "identifier": "HTTPHeaderDict", "path": ".venv/Lib/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n _container: typing.MutableMapping[str, list[str]]\n\n def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str):\n super().__init__()\n self._container = {} # 'dict' is insert-ordered\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key: str, val: str) -> None:\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n self._container[key.lower()] = [key, val]\n\n def __getitem__(self, key: str) -> str:\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key: str) -> None:\n del self._container[key.lower()]\n\n def __contains__(self, key: object) -> bool:\n if isinstance(key, str):\n return key.lower() in self._container\n return False\n\n def setdefault(self, key: str, default: str = \"\") -> str:\n return super().setdefault(key, default)\n\n def __eq__(self, other: object) -> bool:\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return False\n else:\n other_as_http_header_dict = type(self)(maybe_constructable)\n\n return {k.lower(): v for k, v in self.itermerged()} == {\n k.lower(): v for k, v in other_as_http_header_dict.itermerged()\n }\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n\n def __len__(self) -> int:\n return len(self._container)\n\n def __iter__(self) -> typing.Iterator[str]:\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def discard(self, key: str) -> None:\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key: str, val: str, *, combine: bool = False) -> None:\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n If this is called with combine=True, instead of adding a new header value\n as a distinct item during iteration, this will instead append the value to\n any existing header value with a comma. If no existing header value exists\n for the key, then the value will simply be added, ignoring the combine parameter.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n >>> list(headers.items())\n [('foo', 'bar'), ('foo', 'baz')]\n >>> headers.add('foo', 'quz', combine=True)\n >>> list(headers.items())\n [('foo', 'bar, baz, quz')]\n \"\"\"\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n # if there are values here, then there is at least the initial\n # key/value pair\n assert len(vals) >= 2\n if combine:\n vals[-1] = vals[-1] + \", \" + val\n else:\n vals.append(val)\n\n def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None:\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n f\"extend() takes at most 1 positional arguments ({len(args)} given)\"\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, typing.Mapping):\n for key, val in other.items():\n self.add(key, val)\n elif isinstance(other, typing.Iterable):\n other = typing.cast(typing.Iterable[typing.Tuple[str, str]], other)\n for key, value in other:\n self.add(key, value)\n elif hasattr(other, \"keys\") and hasattr(other, \"__getitem__\"):\n # THIS IS NOT A TYPESAFE BRANCH\n # In this branch, the object has a `keys` attr but is not a Mapping or any of\n # the other types indicated in the method signature. We do some stuff with\n # it as though it partially implements the Mapping interface, but we're not\n # doing that stuff safely AT ALL.\n for key in other.keys():\n self.add(key, other[key])\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n @typing.overload\n def getlist(self, key: str) -> list[str]:\n ...\n\n @typing.overload\n def getlist(self, key: str, default: _DT) -> list[str] | _DT:\n ...\n\n def getlist(\n self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed\n ) -> list[str] | _DT:\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is _Sentinel.not_passed:\n # _DT is unbound; empty list is instance of List[str]\n return []\n # _DT is bound; default is instance of _DT\n return default\n else:\n # _DT may or may not be bound; vals[1:] is instance of List[str], which\n # meets our external interface requirement of `Union[List[str], _DT]`.\n return vals[1:]\n\n def _prepare_for_method_change(self) -> Self:\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({dict(self.itermerged())})\"\n\n def _copy_from(self, other: HTTPHeaderDict) -> None:\n for key in other:\n val = other.getlist(key)\n self._container[key.lower()] = [key, *val]\n\n def copy(self) -> HTTPHeaderDict:\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self) -> HTTPHeaderDictItemView: # type: ignore[override]\n return HTTPHeaderDictItemView(self)\n\n def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:\n if header_name in self:\n return potential_value in self._container[header_name.lower()][1:]\n return False\n\n def __ior__(self, other: object) -> HTTPHeaderDict:\n # Supports extending a header dict in-place using operator |=\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n self.extend(maybe_constructable)\n return self\n\n def __or__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator |\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = self.copy()\n result.extend(maybe_constructable)\n return result\n\n def __ror__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator | when other is on left side\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = type(self)(maybe_constructable)\n result.extend(self)\n return result" }, { "identifier": "assert_header_parsing", "path": ".venv/Lib/site-packages/urllib3/util/response.py", "snippet": "def assert_header_parsing(headers: httplib.HTTPMessage) -> None:\n \"\"\"\n Asserts whether all headers have been successfully parsed.\n Extracts encountered errors from the result of parsing headers.\n\n Only works on Python 3.\n\n :param http.client.HTTPMessage headers: Headers to verify.\n\n :raises urllib3.exceptions.HeaderParsingError:\n If parsing errors are found.\n \"\"\"\n\n # This will fail silently if we pass in the wrong kind of parameter.\n # To make debugging easier add an explicit check.\n if not isinstance(headers, httplib.HTTPMessage):\n raise TypeError(f\"expected httplib.Message, got {type(headers)}.\")\n\n unparsed_data = None\n\n # get_payload is actually email.message.Message.get_payload;\n # we're only interested in the result if it's not a multipart message\n if not headers.is_multipart():\n payload = headers.get_payload()\n\n if isinstance(payload, (bytes, str)):\n unparsed_data = payload\n\n # httplib is assuming a response body is available\n # when parsing headers even when httplib only sends\n # header data to parse_headers() This results in\n # defects on multipart responses in particular.\n # See: https://github.com/urllib3/urllib3/issues/800\n\n # So we ignore the following defects:\n # - StartBoundaryNotFoundDefect:\n # The claimed start boundary was never found.\n # - MultipartInvariantViolationDefect:\n # A message claimed to be a multipart but no subparts were found.\n defects = [\n defect\n for defect in headers.defects\n if not isinstance(\n defect, (StartBoundaryNotFoundDefect, MultipartInvariantViolationDefect)\n )\n ]\n\n if defects or unparsed_data:\n raise HeaderParsingError(defects=defects, unparsed_data=unparsed_data)" }, { "identifier": "_DEFAULT_TIMEOUT", "path": ".venv/Lib/site-packages/urllib3/util/timeout.py", "snippet": "_DEFAULT_TIMEOUT: Final[_TYPE_DEFAULT] = _TYPE_DEFAULT.token" }, { "identifier": "_TYPE_TIMEOUT", "path": ".venv/Lib/site-packages/urllib3/util/timeout.py", "snippet": "_TYPE_TIMEOUT = typing.Optional[typing.Union[float, _TYPE_DEFAULT]]" }, { "identifier": "Timeout", "path": ".venv/Lib/site-packages/urllib3/util/timeout.py", "snippet": "class Timeout:\n \"\"\"Timeout configuration.\n\n Timeouts can be defined as a default for a pool:\n\n .. code-block:: python\n\n import urllib3\n\n timeout = urllib3.util.Timeout(connect=2.0, read=7.0)\n\n http = urllib3.PoolManager(timeout=timeout)\n\n resp = http.request(\"GET\", \"https://example.com/\")\n\n print(resp.status)\n\n Or per-request (which overrides the default for the pool):\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", timeout=Timeout(10))\n\n Timeouts can be disabled by setting all the parameters to ``None``:\n\n .. code-block:: python\n\n no_timeout = Timeout(connect=None, read=None)\n response = http.request(\"GET\", \"https://example.com/\", timeout=no_timeout)\n\n\n :param total:\n This combines the connect and read timeouts into one; the read timeout\n will be set to the time leftover from the connect attempt. In the\n event that both a connect timeout and a total are specified, or a read\n timeout and a total are specified, the shorter timeout will be applied.\n\n Defaults to None.\n\n :type total: int, float, or None\n\n :param connect:\n The maximum amount of time (in seconds) to wait for a connection\n attempt to a server to succeed. Omitting the parameter will default the\n connect timeout to the system default, probably `the global default\n timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout for connection attempts.\n\n :type connect: int, float, or None\n\n :param read:\n The maximum amount of time (in seconds) to wait between consecutive\n read operations for a response from the server. Omitting the parameter\n will default the read timeout to the system default, probably `the\n global default timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout.\n\n :type read: int, float, or None\n\n .. note::\n\n Many factors can affect the total amount of time for urllib3 to return\n an HTTP response.\n\n For example, Python's DNS resolver does not obey the timeout specified\n on the socket. Other factors that can affect total request time include\n high CPU load, high swap, the program running at a low priority level,\n or other behaviors.\n\n In addition, the read and total timeouts only measure the time between\n read operations on the socket connecting the client and the server,\n not the total amount of time for the request to return a complete\n response. For most requests, the timeout is raised because the server\n has not sent the first byte in the specified time. This is not always\n the case; if a server streams one byte every fifteen seconds, a timeout\n of 20 seconds will not trigger, even though the request will take\n several minutes to complete.\n\n If your goal is to cut off any request after a set amount of wall clock\n time, consider having a second \"watcher\" thread to cut off a slow\n request.\n \"\"\"\n\n #: A sentinel object representing the default timeout value\n DEFAULT_TIMEOUT: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT\n\n def __init__(\n self,\n total: _TYPE_TIMEOUT = None,\n connect: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n read: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n ) -> None:\n self._connect = self._validate_timeout(connect, \"connect\")\n self._read = self._validate_timeout(read, \"read\")\n self.total = self._validate_timeout(total, \"total\")\n self._start_connect: float | None = None\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}(connect={self._connect!r}, read={self._read!r}, total={self.total!r})\"\n\n # __str__ provided for backwards compatibility\n __str__ = __repr__\n\n @staticmethod\n def resolve_default_timeout(timeout: _TYPE_TIMEOUT) -> float | None:\n return getdefaulttimeout() if timeout is _DEFAULT_TIMEOUT else timeout\n\n @classmethod\n def _validate_timeout(cls, value: _TYPE_TIMEOUT, name: str) -> _TYPE_TIMEOUT:\n \"\"\"Check that a timeout attribute is valid.\n\n :param value: The timeout value to validate\n :param name: The name of the timeout attribute to validate. This is\n used to specify in error messages.\n :return: The validated and casted version of the given value.\n :raises ValueError: If it is a numeric value less than or equal to\n zero, or the type is not an integer, float, or None.\n \"\"\"\n if value is None or value is _DEFAULT_TIMEOUT:\n return value\n\n if isinstance(value, bool):\n raise ValueError(\n \"Timeout cannot be a boolean value. It must \"\n \"be an int, float or None.\"\n )\n try:\n float(value)\n except (TypeError, ValueError):\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n try:\n if value <= 0:\n raise ValueError(\n \"Attempted to set %s timeout to %s, but the \"\n \"timeout cannot be set to a value less \"\n \"than or equal to 0.\" % (name, value)\n )\n except TypeError:\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n return value\n\n @classmethod\n def from_float(cls, timeout: _TYPE_TIMEOUT) -> Timeout:\n \"\"\"Create a new Timeout from a legacy timeout value.\n\n The timeout value used by httplib.py sets the same timeout on the\n connect(), and recv() socket requests. This creates a :class:`Timeout`\n object that sets the individual timeouts to the ``timeout`` value\n passed to this function.\n\n :param timeout: The legacy timeout value.\n :type timeout: integer, float, :attr:`urllib3.util.Timeout.DEFAULT_TIMEOUT`, or None\n :return: Timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n return Timeout(read=timeout, connect=timeout)\n\n def clone(self) -> Timeout:\n \"\"\"Create a copy of the timeout object\n\n Timeout properties are stored per-pool but each request needs a fresh\n Timeout object to ensure each one has its own start/stop configured.\n\n :return: a copy of the timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n # We can't use copy.deepcopy because that will also create a new object\n # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to\n # detect the user default.\n return Timeout(connect=self._connect, read=self._read, total=self.total)\n\n def start_connect(self) -> float:\n \"\"\"Start the timeout clock, used during a connect() attempt\n\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to start a timer that has been started already.\n \"\"\"\n if self._start_connect is not None:\n raise TimeoutStateError(\"Timeout timer has already been started.\")\n self._start_connect = time.monotonic()\n return self._start_connect\n\n def get_connect_duration(self) -> float:\n \"\"\"Gets the time elapsed since the call to :meth:`start_connect`.\n\n :return: Elapsed time in seconds.\n :rtype: float\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to get duration for a timer that hasn't been started.\n \"\"\"\n if self._start_connect is None:\n raise TimeoutStateError(\n \"Can't get connect duration for timer that has not started.\"\n )\n return time.monotonic() - self._start_connect\n\n @property\n def connect_timeout(self) -> _TYPE_TIMEOUT:\n \"\"\"Get the value to use when setting a connection timeout.\n\n This will be a positive float or integer, the value None\n (never timeout), or the default system timeout.\n\n :return: Connect timeout.\n :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None\n \"\"\"\n if self.total is None:\n return self._connect\n\n if self._connect is None or self._connect is _DEFAULT_TIMEOUT:\n return self.total\n\n return min(self._connect, self.total) # type: ignore[type-var]\n\n @property\n def read_timeout(self) -> float | None:\n \"\"\"Get the value for the read timeout.\n\n This assumes some time has elapsed in the connection timeout and\n computes the read timeout appropriately.\n\n If self.total is set, the read timeout is dependent on the amount of\n time taken by the connect timeout. If the connection time has not been\n established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be\n raised.\n\n :return: Value to use for the read timeout.\n :rtype: int, float or None\n :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`\n has not yet been called on this object.\n \"\"\"\n if (\n self.total is not None\n and self.total is not _DEFAULT_TIMEOUT\n and self._read is not None\n and self._read is not _DEFAULT_TIMEOUT\n ):\n # In case the connect timeout has not yet been established.\n if self._start_connect is None:\n return self._read\n return max(0, min(self.total - self.get_connect_duration(), self._read))\n elif self.total is not None and self.total is not _DEFAULT_TIMEOUT:\n return max(0, self.total - self.get_connect_duration())\n else:\n return self.resolve_default_timeout(self._read)" }, { "identifier": "to_str", "path": ".venv/Lib/site-packages/urllib3/util/util.py", "snippet": "def to_str(\n x: str | bytes, encoding: str | None = None, errors: str | None = None\n) -> str:\n if isinstance(x, str):\n return x\n elif not isinstance(x, bytes):\n raise TypeError(f\"not expecting type {type(x).__name__}\")\n if encoding or errors:\n return x.decode(encoding or \"utf-8\", errors=errors or \"strict\")\n return x.decode()" }, { "identifier": "wait_for_read", "path": ".venv/Lib/site-packages/urllib3/util/wait.py", "snippet": "def wait_for_read(sock: socket.socket, timeout: float | None = None) -> bool:\n \"\"\"Waits for reading to be available on a given socket.\n Returns True if the socket is readable, or False if the timeout expired.\n \"\"\"\n return wait_for_socket(sock, read=True, timeout=timeout)" }, { "identifier": "_TYPE_BODY", "path": ".venv/Lib/site-packages/urllib3/_base_connection.py", "snippet": "_TYPE_BODY = typing.Union[bytes, typing.IO[typing.Any], typing.Iterable[bytes], str]" }, { "identifier": "ProxyConfig", "path": ".venv/Lib/site-packages/urllib3/_base_connection.py", "snippet": "class ProxyConfig(typing.NamedTuple):\n ssl_context: ssl.SSLContext | None\n use_forwarding_for_https: bool\n assert_hostname: None | str | Literal[False]\n assert_fingerprint: str | None" }, { "identifier": "_ResponseOptions", "path": ".venv/Lib/site-packages/urllib3/_base_connection.py", "snippet": "class _ResponseOptions(typing.NamedTuple):\n # TODO: Remove this in favor of a better\n # HTTP request/response lifecycle tracking.\n request_method: str\n request_url: str\n preload_content: bool\n decode_content: bool\n enforce_content_length: bool" }, { "identifier": "__version__", "path": ".venv/Lib/site-packages/urllib3/_version.py", "snippet": "" }, { "identifier": "ConnectTimeoutError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class ConnectTimeoutError(TimeoutError):\n \"\"\"Raised when a socket timeout occurs while connecting to a server\"\"\"" }, { "identifier": "HeaderParsingError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class HeaderParsingError(HTTPError):\n \"\"\"Raised by assert_header_parsing, but we convert it to a log.warning statement.\"\"\"\n\n def __init__(\n self, defects: list[MessageDefect], unparsed_data: bytes | str | None\n ) -> None:\n message = f\"{defects or 'Unknown'}, unparsed data: {unparsed_data!r}\"\n super().__init__(message)" }, { "identifier": "NameResolutionError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class NameResolutionError(NewConnectionError):\n \"\"\"Raised when host name resolution fails.\"\"\"\n\n def __init__(self, host: str, conn: HTTPConnection, reason: socket.gaierror):\n message = f\"Failed to resolve '{host}' ({reason})\"\n super().__init__(conn, message)" }, { "identifier": "NewConnectionError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class NewConnectionError(ConnectTimeoutError, HTTPError):\n \"\"\"Raised when we fail to establish a new connection. Usually ECONNREFUSED.\"\"\"\n\n def __init__(self, conn: HTTPConnection, message: str) -> None:\n self.conn = conn\n super().__init__(f\"{conn}: {message}\")\n\n @property\n def pool(self) -> HTTPConnection:\n warnings.warn(\n \"The 'pool' property is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Use 'conn' instead.\",\n DeprecationWarning,\n stacklevel=2,\n )\n\n return self.conn" }, { "identifier": "ProxyError", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class ProxyError(HTTPError):\n \"\"\"Raised when the connection to a proxy fails.\"\"\"\n\n # The original error is also available as __cause__.\n original_error: Exception\n\n def __init__(self, message: str, error: Exception) -> None:\n super().__init__(message, error)\n self.original_error = error" }, { "identifier": "SystemTimeWarning", "path": ".venv/Lib/site-packages/urllib3/exceptions.py", "snippet": "class SystemTimeWarning(SecurityWarning):\n \"\"\"Warned when system time is suspected to be wrong\"\"\"" }, { "identifier": "connection", "path": ".venv/Lib/site-packages/urllib3/util/connection.py", "snippet": "_TYPE_SOCKET_OPTIONS = typing.Sequence[typing.Tuple[int, int, typing.Union[int, bytes]]]\nHAS_IPV6 = _has_ipv6(\"::1\")\ndef is_connection_dropped(conn: BaseHTTPConnection) -> bool: # Platform-specific\ndef create_connection(\n address: tuple[str, int],\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n socket_options: _TYPE_SOCKET_OPTIONS | None = None,\n) -> socket.socket:\ndef _set_socket_options(\n sock: socket.socket, options: _TYPE_SOCKET_OPTIONS | None\n) -> None:\ndef allowed_gai_family() -> socket.AddressFamily:\ndef _has_ipv6(host: str) -> bool:" }, { "identifier": "ssl_", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "HAS_NEVER_CHECK_COMMON_NAME = False\nIS_PYOPENSSL = False\nALPN_PROTOCOLS = [\"http/1.1\"]\n_TYPE_VERSION_INFO = typing.Tuple[int, int, int, str, int]\nHASHFUNC_MAP = {32: md5, 40: sha1, 64: sha256}\n_SSL_VERSION_TO_TLS_VERSION: dict[int, int] = {}\n HAS_NEVER_CHECK_COMMON_NAME = False\n OP_NO_COMPRESSION = 0x20000 # type: ignore[assignment]\n OP_NO_TICKET = 0x4000 # type: ignore[assignment]\n PROTOCOL_TLS_CLIENT = 16 # type: ignore[assignment]\n_TYPE_PEER_CERT_RET = typing.Union[\"_TYPE_PEER_CERT_RET_DICT\", bytes, None]\ndef _is_bpo_43522_fixed(\n implementation_name: str,\n version_info: _TYPE_VERSION_INFO,\n pypy_version_info: _TYPE_VERSION_INFO | None,\n) -> bool:\ndef _is_has_never_check_common_name_reliable(\n openssl_version: str,\n openssl_version_number: int,\n implementation_name: str,\n version_info: _TYPE_VERSION_INFO,\n pypy_version_info: _TYPE_VERSION_INFO | None,\n) -> bool:\ndef assert_fingerprint(cert: bytes | None, fingerprint: str) -> None:\ndef resolve_cert_reqs(candidate: None | int | str) -> VerifyMode:\ndef resolve_ssl_version(candidate: None | int | str) -> int:\ndef create_urllib3_context(\n ssl_version: int | None = None,\n cert_reqs: int | None = None,\n options: int | None = None,\n ciphers: str | None = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n) -> ssl.SSLContext:\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = ...,\n certfile: str | None = ...,\n cert_reqs: int | None = ...,\n ca_certs: str | None = ...,\n server_hostname: str | None = ...,\n ssl_version: int | None = ...,\n ciphers: str | None = ...,\n ssl_context: ssl.SSLContext | None = ...,\n ca_cert_dir: str | None = ...,\n key_password: str | None = ...,\n ca_cert_data: None | str | bytes = ...,\n tls_in_tls: Literal[False] = ...,\n) -> ssl.SSLSocket:\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = ...,\n certfile: str | None = ...,\n cert_reqs: int | None = ...,\n ca_certs: str | None = ...,\n server_hostname: str | None = ...,\n ssl_version: int | None = ...,\n ciphers: str | None = ...,\n ssl_context: ssl.SSLContext | None = ...,\n ca_cert_dir: str | None = ...,\n key_password: str | None = ...,\n ca_cert_data: None | str | bytes = ...,\n tls_in_tls: bool = ...,\n) -> ssl.SSLSocket | SSLTransportType:\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = None,\n certfile: str | None = None,\n cert_reqs: int | None = None,\n ca_certs: str | None = None,\n server_hostname: str | None = None,\n ssl_version: int | None = None,\n ciphers: str | None = None,\n ssl_context: ssl.SSLContext | None = None,\n ca_cert_dir: str | None = None,\n key_password: str | None = None,\n ca_cert_data: None | str | bytes = None,\n tls_in_tls: bool = False,\n) -> ssl.SSLSocket | SSLTransportType:\ndef is_ipaddress(hostname: str | bytes) -> bool:\ndef _is_key_file_encrypted(key_file: str) -> bool:\ndef _ssl_wrap_socket_impl(\n sock: socket.socket,\n ssl_context: ssl.SSLContext,\n tls_in_tls: bool,\n server_hostname: str | None = None,\n) -> ssl.SSLSocket | SSLTransportType:\n class _TYPE_PEER_CERT_RET_DICT(TypedDict, total=False):" }, { "identifier": "SKIP_HEADER", "path": ".venv/Lib/site-packages/urllib3/util/request.py", "snippet": "SKIP_HEADER = \"@@@SKIP_HEADER@@@\"" }, { "identifier": "SKIPPABLE_HEADERS", "path": ".venv/Lib/site-packages/urllib3/util/request.py", "snippet": "SKIPPABLE_HEADERS = frozenset([\"accept-encoding\", \"host\", \"user-agent\"])" }, { "identifier": "body_to_chunks", "path": ".venv/Lib/site-packages/urllib3/util/request.py", "snippet": "def body_to_chunks(\n body: typing.Any | None, method: str, blocksize: int\n) -> ChunksAndContentLength:\n \"\"\"Takes the HTTP request method, body, and blocksize and\n transforms them into an iterable of chunks to pass to\n socket.sendall() and an optional 'Content-Length' header.\n\n A 'Content-Length' of 'None' indicates the length of the body\n can't be determined so should use 'Transfer-Encoding: chunked'\n for framing instead.\n \"\"\"\n\n chunks: typing.Iterable[bytes] | None\n content_length: int | None\n\n # No body, we need to make a recommendation on 'Content-Length'\n # based on whether that request method is expected to have\n # a body or not.\n if body is None:\n chunks = None\n if method.upper() not in _METHODS_NOT_EXPECTING_BODY:\n content_length = 0\n else:\n content_length = None\n\n # Bytes or strings become bytes\n elif isinstance(body, (str, bytes)):\n chunks = (to_bytes(body),)\n content_length = len(chunks[0])\n\n # File-like object, TODO: use seek() and tell() for length?\n elif hasattr(body, \"read\"):\n\n def chunk_readable() -> typing.Iterable[bytes]:\n nonlocal body, blocksize\n encode = isinstance(body, io.TextIOBase)\n while True:\n datablock = body.read(blocksize)\n if not datablock:\n break\n if encode:\n datablock = datablock.encode(\"iso-8859-1\")\n yield datablock\n\n chunks = chunk_readable()\n content_length = None\n\n # Otherwise we need to start checking via duck-typing.\n else:\n try:\n # Check if the body implements the buffer API.\n mv = memoryview(body)\n except TypeError:\n try:\n # Check if the body is an iterable\n chunks = iter(body)\n content_length = None\n except TypeError:\n raise TypeError(\n f\"'body' must be a bytes-like object, file-like \"\n f\"object, or iterable. Instead was {body!r}\"\n ) from None\n else:\n # Since it implements the buffer API can be passed directly to socket.sendall()\n chunks = (body,)\n content_length = mv.nbytes\n\n return ChunksAndContentLength(chunks=chunks, content_length=content_length)" }, { "identifier": "assert_fingerprint", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def assert_fingerprint(cert: bytes | None, fingerprint: str) -> None:\n \"\"\"\n Checks if given fingerprint matches the supplied certificate.\n\n :param cert:\n Certificate as bytes object.\n :param fingerprint:\n Fingerprint as string of hexdigits, can be interspersed by colons.\n \"\"\"\n\n if cert is None:\n raise SSLError(\"No certificate for the peer.\")\n\n fingerprint = fingerprint.replace(\":\", \"\").lower()\n digest_length = len(fingerprint)\n hashfunc = HASHFUNC_MAP.get(digest_length)\n if not hashfunc:\n raise SSLError(f\"Fingerprint of invalid length: {fingerprint}\")\n\n # We need encode() here for py32; works on py2 and p33.\n fingerprint_bytes = unhexlify(fingerprint.encode())\n\n cert_digest = hashfunc(cert).digest()\n\n if not hmac.compare_digest(cert_digest, fingerprint_bytes):\n raise SSLError(\n f'Fingerprints did not match. Expected \"{fingerprint}\", got \"{cert_digest.hex()}\"'\n )" }, { "identifier": "create_urllib3_context", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def create_urllib3_context(\n ssl_version: int | None = None,\n cert_reqs: int | None = None,\n options: int | None = None,\n ciphers: str | None = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n) -> ssl.SSLContext:\n \"\"\"Creates and configures an :class:`ssl.SSLContext` instance for use with urllib3.\n\n :param ssl_version:\n The desired protocol version to use. This will default to\n PROTOCOL_SSLv23 which will negotiate the highest protocol that both\n the server and your installation of OpenSSL support.\n\n This parameter is deprecated instead use 'ssl_minimum_version'.\n :param ssl_minimum_version:\n The minimum version of TLS to be used. Use the 'ssl.TLSVersion' enum for specifying the value.\n :param ssl_maximum_version:\n The maximum version of TLS to be used. Use the 'ssl.TLSVersion' enum for specifying the value.\n Not recommended to set to anything other than 'ssl.TLSVersion.MAXIMUM_SUPPORTED' which is the\n default value.\n :param cert_reqs:\n Whether to require the certificate verification. This defaults to\n ``ssl.CERT_REQUIRED``.\n :param options:\n Specific OpenSSL options. These default to ``ssl.OP_NO_SSLv2``,\n ``ssl.OP_NO_SSLv3``, ``ssl.OP_NO_COMPRESSION``, and ``ssl.OP_NO_TICKET``.\n :param ciphers:\n Which cipher suites to allow the server to select. Defaults to either system configured\n ciphers if OpenSSL 1.1.1+, otherwise uses a secure default set of ciphers.\n :returns:\n Constructed SSLContext object with specified options\n :rtype: SSLContext\n \"\"\"\n if SSLContext is None:\n raise TypeError(\"Can't create an SSLContext object without an ssl module\")\n\n # This means 'ssl_version' was specified as an exact value.\n if ssl_version not in (None, PROTOCOL_TLS, PROTOCOL_TLS_CLIENT):\n # Disallow setting 'ssl_version' and 'ssl_minimum|maximum_version'\n # to avoid conflicts.\n if ssl_minimum_version is not None or ssl_maximum_version is not None:\n raise ValueError(\n \"Can't specify both 'ssl_version' and either \"\n \"'ssl_minimum_version' or 'ssl_maximum_version'\"\n )\n\n # 'ssl_version' is deprecated and will be removed in the future.\n else:\n # Use 'ssl_minimum_version' and 'ssl_maximum_version' instead.\n ssl_minimum_version = _SSL_VERSION_TO_TLS_VERSION.get(\n ssl_version, TLSVersion.MINIMUM_SUPPORTED\n )\n ssl_maximum_version = _SSL_VERSION_TO_TLS_VERSION.get(\n ssl_version, TLSVersion.MAXIMUM_SUPPORTED\n )\n\n # This warning message is pushing users to use 'ssl_minimum_version'\n # instead of both min/max. Best practice is to only set the minimum version and\n # keep the maximum version to be it's default value: 'TLSVersion.MAXIMUM_SUPPORTED'\n warnings.warn(\n \"'ssl_version' option is deprecated and will be \"\n \"removed in urllib3 v2.1.0. Instead use 'ssl_minimum_version'\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n\n # PROTOCOL_TLS is deprecated in Python 3.10 so we always use PROTOCOL_TLS_CLIENT\n context = SSLContext(PROTOCOL_TLS_CLIENT)\n\n if ssl_minimum_version is not None:\n context.minimum_version = ssl_minimum_version\n else: # Python <3.10 defaults to 'MINIMUM_SUPPORTED' so explicitly set TLSv1.2 here\n context.minimum_version = TLSVersion.TLSv1_2\n\n if ssl_maximum_version is not None:\n context.maximum_version = ssl_maximum_version\n\n # Unless we're given ciphers defer to either system ciphers in\n # the case of OpenSSL 1.1.1+ or use our own secure default ciphers.\n if ciphers:\n context.set_ciphers(ciphers)\n\n # Setting the default here, as we may have no ssl module on import\n cert_reqs = ssl.CERT_REQUIRED if cert_reqs is None else cert_reqs\n\n if options is None:\n options = 0\n # SSLv2 is easily broken and is considered harmful and dangerous\n options |= OP_NO_SSLv2\n # SSLv3 has several problems and is now dangerous\n options |= OP_NO_SSLv3\n # Disable compression to prevent CRIME attacks for OpenSSL 1.0+\n # (issue #309)\n options |= OP_NO_COMPRESSION\n # TLSv1.2 only. Unless set explicitly, do not request tickets.\n # This may save some bandwidth on wire, and although the ticket is encrypted,\n # there is a risk associated with it being on wire,\n # if the server is not rotating its ticketing keys properly.\n options |= OP_NO_TICKET\n\n context.options |= options\n\n # Enable post-handshake authentication for TLS 1.3, see GH #1634. PHA is\n # necessary for conditional client cert authentication with TLS 1.3.\n # The attribute is None for OpenSSL <= 1.1.0 or does not exist in older\n # versions of Python. We only enable if certificate verification is enabled to work\n # around Python issue #37428\n # See: https://bugs.python.org/issue37428\n if (\n cert_reqs == ssl.CERT_REQUIRED\n and getattr(context, \"post_handshake_auth\", None) is not None\n ):\n context.post_handshake_auth = True\n\n # The order of the below lines setting verify_mode and check_hostname\n # matter due to safe-guards SSLContext has to prevent an SSLContext with\n # check_hostname=True, verify_mode=NONE/OPTIONAL.\n # We always set 'check_hostname=False' for pyOpenSSL so we rely on our own\n # 'ssl.match_hostname()' implementation.\n if cert_reqs == ssl.CERT_REQUIRED and not IS_PYOPENSSL:\n context.verify_mode = cert_reqs\n context.check_hostname = True\n else:\n context.check_hostname = False\n context.verify_mode = cert_reqs\n\n try:\n context.hostname_checks_common_name = False\n except AttributeError: # Defensive: for CPython < 3.8.9 and 3.9.3; for PyPy < 7.3.8\n pass\n\n # Enable logging of TLS session keys via defacto standard environment variable\n # 'SSLKEYLOGFILE', if the feature is available (Python 3.8+). Skip empty values.\n if hasattr(context, \"keylog_filename\"):\n sslkeylogfile = os.environ.get(\"SSLKEYLOGFILE\")\n if sslkeylogfile:\n context.keylog_filename = sslkeylogfile\n\n return context" }, { "identifier": "is_ipaddress", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def is_ipaddress(hostname: str | bytes) -> bool:\n \"\"\"Detects whether the hostname given is an IPv4 or IPv6 address.\n Also detects IPv6 addresses with Zone IDs.\n\n :param str hostname: Hostname to examine.\n :return: True if the hostname is an IP address, False otherwise.\n \"\"\"\n if isinstance(hostname, bytes):\n # IDN A-label bytes are ASCII compatible.\n hostname = hostname.decode(\"ascii\")\n return bool(_IPV4_RE.match(hostname) or _BRACELESS_IPV6_ADDRZ_RE.match(hostname))" }, { "identifier": "resolve_cert_reqs", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def resolve_cert_reqs(candidate: None | int | str) -> VerifyMode:\n \"\"\"\n Resolves the argument to a numeric constant, which can be passed to\n the wrap_socket function/method from the ssl module.\n Defaults to :data:`ssl.CERT_REQUIRED`.\n If given a string it is assumed to be the name of the constant in the\n :mod:`ssl` module or its abbreviation.\n (So you can specify `REQUIRED` instead of `CERT_REQUIRED`.\n If it's neither `None` nor a string we assume it is already the numeric\n constant which can directly be passed to wrap_socket.\n \"\"\"\n if candidate is None:\n return CERT_REQUIRED\n\n if isinstance(candidate, str):\n res = getattr(ssl, candidate, None)\n if res is None:\n res = getattr(ssl, \"CERT_\" + candidate)\n return res # type: ignore[no-any-return]\n\n return candidate # type: ignore[return-value]" }, { "identifier": "resolve_ssl_version", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "def resolve_ssl_version(candidate: None | int | str) -> int:\n \"\"\"\n like resolve_cert_reqs\n \"\"\"\n if candidate is None:\n return PROTOCOL_TLS\n\n if isinstance(candidate, str):\n res = getattr(ssl, candidate, None)\n if res is None:\n res = getattr(ssl, \"PROTOCOL_\" + candidate)\n return typing.cast(int, res)\n\n return candidate" }, { "identifier": "ssl_wrap_socket", "path": ".venv/Lib/site-packages/urllib3/util/ssl_.py", "snippet": "@typing.overload\ndef ssl_wrap_socket(\n sock: socket.socket,\n keyfile: str | None = ...,\n certfile: str | None = ...,\n cert_reqs: int | None = ...,\n ca_certs: str | None = ...,\n server_hostname: str | None = ...,\n ssl_version: int | None = ...,\n ciphers: str | None = ...,\n ssl_context: ssl.SSLContext | None = ...,\n ca_cert_dir: str | None = ...,\n key_password: str | None = ...,\n ca_cert_data: None | str | bytes = ...,\n tls_in_tls: Literal[False] = ...,\n) -> ssl.SSLSocket:\n ..." }, { "identifier": "CertificateError", "path": ".venv/Lib/site-packages/urllib3/util/ssl_match_hostname.py", "snippet": "class CertificateError(ValueError):\n pass" }, { "identifier": "match_hostname", "path": ".venv/Lib/site-packages/urllib3/util/ssl_match_hostname.py", "snippet": "def match_hostname(\n cert: _TYPE_PEER_CERT_RET_DICT | None,\n hostname: str,\n hostname_checks_common_name: bool = False,\n) -> None:\n \"\"\"Verify that *cert* (in decoded format as returned by\n SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125\n rules are followed, but IP addresses are not accepted for *hostname*.\n\n CertificateError is raised on failure. On success, the function\n returns nothing.\n \"\"\"\n if not cert:\n raise ValueError(\n \"empty or no certificate, match_hostname needs a \"\n \"SSL socket or SSL context with either \"\n \"CERT_OPTIONAL or CERT_REQUIRED\"\n )\n try:\n # Divergence from upstream: ipaddress can't handle byte str\n #\n # The ipaddress module shipped with Python < 3.9 does not support\n # scoped IPv6 addresses so we unconditionally strip the Zone IDs for\n # now. Once we drop support for Python 3.9 we can remove this branch.\n if \"%\" in hostname:\n host_ip = ipaddress.ip_address(hostname[: hostname.rfind(\"%\")])\n else:\n host_ip = ipaddress.ip_address(hostname)\n\n except ValueError:\n # Not an IP address (common case)\n host_ip = None\n dnsnames = []\n san: tuple[tuple[str, str], ...] = cert.get(\"subjectAltName\", ())\n key: str\n value: str\n for key, value in san:\n if key == \"DNS\":\n if host_ip is None and _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n elif key == \"IP Address\":\n if host_ip is not None and _ipaddress_match(value, host_ip):\n return\n dnsnames.append(value)\n\n # We only check 'commonName' if it's enabled and we're not verifying\n # an IP address. IP addresses aren't valid within 'commonName'.\n if hostname_checks_common_name and host_ip is None and not dnsnames:\n for sub in cert.get(\"subject\", ()):\n for key, value in sub:\n if key == \"commonName\":\n if _dnsname_match(value, hostname):\n return\n dnsnames.append(value)\n\n if len(dnsnames) > 1:\n raise CertificateError(\n \"hostname %r \"\n \"doesn't match either of %s\" % (hostname, \", \".join(map(repr, dnsnames)))\n )\n elif len(dnsnames) == 1:\n raise CertificateError(f\"hostname {hostname!r} doesn't match {dnsnames[0]!r}\")\n else:\n raise CertificateError(\"no appropriate subjectAltName fields were found\")" }, { "identifier": "Url", "path": ".venv/Lib/site-packages/urllib3/util/url.py", "snippet": "class Url(\n typing.NamedTuple(\n \"Url\",\n [\n (\"scheme\", typing.Optional[str]),\n (\"auth\", typing.Optional[str]),\n (\"host\", typing.Optional[str]),\n (\"port\", typing.Optional[int]),\n (\"path\", typing.Optional[str]),\n (\"query\", typing.Optional[str]),\n (\"fragment\", typing.Optional[str]),\n ],\n )\n):\n \"\"\"\n Data structure for representing an HTTP URL. Used as a return value for\n :func:`parse_url`. Both the scheme and host are normalized as they are\n both case-insensitive according to RFC 3986.\n \"\"\"\n\n def __new__( # type: ignore[no-untyped-def]\n cls,\n scheme: str | None = None,\n auth: str | None = None,\n host: str | None = None,\n port: int | None = None,\n path: str | None = None,\n query: str | None = None,\n fragment: str | None = None,\n ):\n if path and not path.startswith(\"/\"):\n path = \"/\" + path\n if scheme is not None:\n scheme = scheme.lower()\n return super().__new__(cls, scheme, auth, host, port, path, query, fragment)\n\n @property\n def hostname(self) -> str | None:\n \"\"\"For backwards-compatibility with urlparse. We're nice like that.\"\"\"\n return self.host\n\n @property\n def request_uri(self) -> str:\n \"\"\"Absolute path including the query string.\"\"\"\n uri = self.path or \"/\"\n\n if self.query is not None:\n uri += \"?\" + self.query\n\n return uri\n\n @property\n def authority(self) -> str | None:\n \"\"\"\n Authority component as defined in RFC 3986 3.2.\n This includes userinfo (auth), host and port.\n\n i.e.\n userinfo@host:port\n \"\"\"\n userinfo = self.auth\n netloc = self.netloc\n if netloc is None or userinfo is None:\n return netloc\n else:\n return f\"{userinfo}@{netloc}\"\n\n @property\n def netloc(self) -> str | None:\n \"\"\"\n Network location including host and port.\n\n If you need the equivalent of urllib.parse's ``netloc``,\n use the ``authority`` property instead.\n \"\"\"\n if self.host is None:\n return None\n if self.port:\n return f\"{self.host}:{self.port}\"\n return self.host\n\n @property\n def url(self) -> str:\n \"\"\"\n Convert self into a url\n\n This function should more or less round-trip with :func:`.parse_url`. The\n returned url may not be exactly the same as the url inputted to\n :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls\n with a blank port will have : removed).\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n U = urllib3.util.parse_url(\"https://google.com/mail/\")\n\n print(U.url)\n # \"https://google.com/mail/\"\n\n print( urllib3.util.Url(\"https\", \"username:password\",\n \"host.com\", 80, \"/path\", \"query\", \"fragment\"\n ).url\n )\n # \"https://username:[email protected]:80/path?query#fragment\"\n \"\"\"\n scheme, auth, host, port, path, query, fragment = self\n url = \"\"\n\n # We use \"is not None\" we want things to happen with empty strings (or 0 port)\n if scheme is not None:\n url += scheme + \"://\"\n if auth is not None:\n url += auth + \"@\"\n if host is not None:\n url += host\n if port is not None:\n url += \":\" + str(port)\n if path is not None:\n url += path\n if query is not None:\n url += \"?\" + query\n if fragment is not None:\n url += \"#\" + fragment\n\n return url\n\n def __str__(self) -> str:\n return self.url" } ]
import datetime import logging import os import re import socket import sys import typing import warnings import ssl from http.client import HTTPConnection as _HTTPConnection from http.client import HTTPException as HTTPException # noqa: F401 from http.client import ResponseNotReady from socket import timeout as SocketTimeout from typing import Literal from .response import HTTPResponse from .util.ssl_ import _TYPE_PEER_CERT_RET_DICT from .util.ssltransport import SSLTransport from ._collections import HTTPHeaderDict from .util.response import assert_header_parsing from .util.timeout import _DEFAULT_TIMEOUT, _TYPE_TIMEOUT, Timeout from .util.util import to_str from .util.wait import wait_for_read from ._base_connection import _TYPE_BODY from ._base_connection import ProxyConfig as ProxyConfig from ._base_connection import _ResponseOptions as _ResponseOptions from ._version import __version__ from .exceptions import ( ConnectTimeoutError, HeaderParsingError, NameResolutionError, NewConnectionError, ProxyError, SystemTimeWarning, ) from .util import SKIP_HEADER, SKIPPABLE_HEADERS, connection, ssl_ from .util.request import body_to_chunks from .util.ssl_ import assert_fingerprint as _assert_fingerprint from .util.ssl_ import ( create_urllib3_context, is_ipaddress, resolve_cert_reqs, resolve_ssl_version, ssl_wrap_socket, ) from .util.ssl_match_hostname import CertificateError, match_hostname from .util.url import Url from .response import HTTPResponse
15,905
self._response_options = _ResponseOptions( request_method=method, request_url=url, preload_content=preload_content, decode_content=decode_content, enforce_content_length=enforce_content_length, ) if headers is None: headers = {} header_keys = frozenset(to_str(k.lower()) for k in headers) skip_accept_encoding = "accept-encoding" in header_keys skip_host = "host" in header_keys self.putrequest( method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) # Transform the body into an iterable of sendall()-able chunks # and detect if an explicit Content-Length is doable. chunks_and_cl = body_to_chunks(body, method=method, blocksize=self.blocksize) chunks = chunks_and_cl.chunks content_length = chunks_and_cl.content_length # When chunked is explicit set to 'True' we respect that. if chunked: if "transfer-encoding" not in header_keys: self.putheader("Transfer-Encoding", "chunked") else: # Detect whether a framing mechanism is already in use. If so # we respect that value, otherwise we pick chunked vs content-length # depending on the type of 'body'. if "content-length" in header_keys: chunked = False elif "transfer-encoding" in header_keys: chunked = True # Otherwise we go off the recommendation of 'body_to_chunks()'. else: chunked = False if content_length is None: if chunks is not None: chunked = True self.putheader("Transfer-Encoding", "chunked") else: self.putheader("Content-Length", str(content_length)) # Now that framing headers are out of the way we send all the other headers. if "user-agent" not in header_keys: self.putheader("User-Agent", _get_default_user_agent()) for header, value in headers.items(): self.putheader(header, value) self.endheaders() # If we're given a body we start sending that in chunks. if chunks is not None: for chunk in chunks: # Sending empty chunks isn't allowed for TE: chunked # as it indicates the end of the body. if not chunk: continue if isinstance(chunk, str): chunk = chunk.encode("utf-8") if chunked: self.send(b"%x\r\n%b\r\n" % (len(chunk), chunk)) else: self.send(chunk) # Regardless of whether we have a body or not, if we're in # chunked mode we want to send an explicit empty chunk. if chunked: self.send(b"0\r\n\r\n") def request_chunked( self, method: str, url: str, body: _TYPE_BODY | None = None, headers: typing.Mapping[str, str] | None = None, ) -> None: """ Alternative to the common request method, which sends the body with chunked encoding and not as one block """ warnings.warn( "HTTPConnection.request_chunked() is deprecated and will be removed " "in urllib3 v2.1.0. Instead use HTTPConnection.request(..., chunked=True).", category=DeprecationWarning, stacklevel=2, ) self.request(method, url, body=body, headers=headers, chunked=True) def getresponse( # type: ignore[override] self, ) -> HTTPResponse: """ Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed. """ # Raise the same error as http.client.HTTPConnection if self._response_options is None: raise ResponseNotReady() # Reset this attribute for being used again. resp_options = self._response_options self._response_options = None # Since the connection's timeout value may have been updated # we need to set the timeout on the socket. self.sock.settimeout(self.timeout) # This is needed here to avoid circular import errors # Get the response from http.client.HTTPConnection httplib_response = super().getresponse() try: assert_header_parsing(httplib_response.msg)
from __future__ import annotations if typing.TYPE_CHECKING: try: # Compiled with SSL? BaseSSLError = ssl.SSLError except (ImportError, AttributeError): ssl = None # type: ignore[assignment] class BaseSSLError(BaseException): # type: ignore[no-redef] pass # Not a no-op, we're adding this to the namespace so it can be imported. ConnectionError = ConnectionError BrokenPipeError = BrokenPipeError log = logging.getLogger(__name__) port_by_scheme = {"http": 80, "https": 443} # When it comes time to update this value as a part of regular maintenance # (ie test_recent_date is failing) update it to ~6 months before the current date. RECENT_DATE = datetime.date(2022, 1, 1) _CONTAINS_CONTROL_CHAR_RE = re.compile(r"[^-!#$%&'*+.^_`|~0-9a-zA-Z]") _HAS_SYS_AUDIT = hasattr(sys, "audit") class HTTPConnection(_HTTPConnection): """ Based on :class:`http.client.HTTPConnection` but provides an extra constructor backwards-compatibility layer between older and newer Pythons. Additional keyword parameters are used to configure attributes of the connection. Accepted parameters include: - ``source_address``: Set the source address for the current connection. - ``socket_options``: Set specific options on the underlying socket. If not specified, then defaults are loaded from ``HTTPConnection.default_socket_options`` which includes disabling Nagle's algorithm (sets TCP_NODELAY to 1) unless the connection is behind a proxy. For example, if you wish to enable TCP Keep Alive in addition to the defaults, you might pass: .. code-block:: python HTTPConnection.default_socket_options + [ (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1), ] Or you may want to disable the defaults by passing an empty list (e.g., ``[]``). """ default_port: typing.ClassVar[int] = port_by_scheme["http"] # type: ignore[misc] #: Disable Nagle's algorithm by default. #: ``[(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)]`` default_socket_options: typing.ClassVar[connection._TYPE_SOCKET_OPTIONS] = [ (socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) ] #: Whether this connection verifies the host's certificate. is_verified: bool = False #: Whether this proxy connection verified the proxy host's certificate. # If no proxy is currently connected to the value will be ``None``. proxy_is_verified: bool | None = None blocksize: int source_address: tuple[str, int] | None socket_options: connection._TYPE_SOCKET_OPTIONS | None _has_connected_to_proxy: bool _response_options: _ResponseOptions | None _tunnel_host: str | None _tunnel_port: int | None _tunnel_scheme: str | None def __init__( self, host: str, port: int | None = None, *, timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT, source_address: tuple[str, int] | None = None, blocksize: int = 16384, socket_options: None | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options, proxy: Url | None = None, proxy_config: ProxyConfig | None = None, ) -> None: super().__init__( host=host, port=port, timeout=Timeout.resolve_default_timeout(timeout), source_address=source_address, blocksize=blocksize, ) self.socket_options = socket_options self.proxy = proxy self.proxy_config = proxy_config self._has_connected_to_proxy = False self._response_options = None self._tunnel_host: str | None = None self._tunnel_port: int | None = None self._tunnel_scheme: str | None = None # https://github.com/python/mypy/issues/4125 # Mypy treats this as LSP violation, which is considered a bug. # If `host` is made a property it violates LSP, because a writeable attribute is overridden with a read-only one. # However, there is also a `host` setter so LSP is not violated. # Potentially, a `@host.deleter` might be needed depending on how this issue will be fixed. @property def host(self) -> str: """ Getter method to remove any trailing dots that indicate the hostname is an FQDN. In general, SSL certificates don't include the trailing dot indicating a fully-qualified domain name, and thus, they don't validate properly when checked against a domain name that includes the dot. In addition, some servers may not expect to receive the trailing dot when provided. However, the hostname with trailing dot is critical to DNS resolution; doing a lookup with the trailing dot will properly only resolve the appropriate FQDN, whereas a lookup without a trailing dot will search the system's search domain list. Thus, it's important to keep the original host around for use only in those cases where it's appropriate (i.e., when doing DNS lookup to establish the actual TCP connection across which we're going to send HTTP requests). """ return self._dns_host.rstrip(".") @host.setter def host(self, value: str) -> None: """ Setter for the `host` property. We assume that only urllib3 uses the _dns_host attribute; httplib itself only uses `host`, and it seems reasonable that other libraries follow suit. """ self._dns_host = value def _new_conn(self) -> socket.socket: """Establish a socket connection and set nodelay settings on it. :return: New socket connection. """ try: sock = connection.create_connection( (self._dns_host, self.port), self.timeout, source_address=self.source_address, socket_options=self.socket_options, ) except socket.gaierror as e: raise NameResolutionError(self.host, self, e) from e except SocketTimeout as e: raise ConnectTimeoutError( self, f"Connection to {self.host} timed out. (connect timeout={self.timeout})", ) from e except OSError as e: raise NewConnectionError( self, f"Failed to establish a new connection: {e}" ) from e # Audit hooks are only available in Python 3.8+ if _HAS_SYS_AUDIT: sys.audit("http.client.connect", self, self.host, self.port) return sock def set_tunnel( self, host: str, port: int | None = None, headers: typing.Mapping[str, str] | None = None, scheme: str = "http", ) -> None: if scheme not in ("http", "https"): raise ValueError( f"Invalid proxy scheme for tunneling: {scheme!r}, must be either 'http' or 'https'" ) super().set_tunnel(host, port=port, headers=headers) self._tunnel_scheme = scheme def connect(self) -> None: self.sock = self._new_conn() if self._tunnel_host: # If we're tunneling it means we're connected to our proxy. self._has_connected_to_proxy = True # TODO: Fix tunnel so it doesn't depend on self.sock state. self._tunnel() # type: ignore[attr-defined] # If there's a proxy to be connected to we are fully connected. # This is set twice (once above and here) due to forwarding proxies # not using tunnelling. self._has_connected_to_proxy = bool(self.proxy) @property def is_closed(self) -> bool: return self.sock is None @property def is_connected(self) -> bool: if self.sock is None: return False return not wait_for_read(self.sock, timeout=0.0) @property def has_connected_to_proxy(self) -> bool: return self._has_connected_to_proxy def close(self) -> None: try: super().close() finally: # Reset all stateful properties so connection # can be re-used without leaking prior configs. self.sock = None self.is_verified = False self.proxy_is_verified = None self._has_connected_to_proxy = False self._response_options = None self._tunnel_host = None self._tunnel_port = None self._tunnel_scheme = None def putrequest( self, method: str, url: str, skip_host: bool = False, skip_accept_encoding: bool = False, ) -> None: """""" # Empty docstring because the indentation of CPython's implementation # is broken but we don't want this method in our documentation. match = _CONTAINS_CONTROL_CHAR_RE.search(method) if match: raise ValueError( f"Method cannot contain non-token characters {method!r} (found at least {match.group()!r})" ) return super().putrequest( method, url, skip_host=skip_host, skip_accept_encoding=skip_accept_encoding ) def putheader(self, header: str, *values: str) -> None: """""" if not any(isinstance(v, str) and v == SKIP_HEADER for v in values): super().putheader(header, *values) elif to_str(header.lower()) not in SKIPPABLE_HEADERS: skippable_headers = "', '".join( [str.title(header) for header in sorted(SKIPPABLE_HEADERS)] ) raise ValueError( f"urllib3.util.SKIP_HEADER only supports '{skippable_headers}'" ) # `request` method's signature intentionally violates LSP. # urllib3's API is different from `http.client.HTTPConnection` and the subclassing is only incidental. def request( # type: ignore[override] self, method: str, url: str, body: _TYPE_BODY | None = None, headers: typing.Mapping[str, str] | None = None, *, chunked: bool = False, preload_content: bool = True, decode_content: bool = True, enforce_content_length: bool = True, ) -> None: # Update the inner socket's timeout value to send the request. # This only triggers if the connection is re-used. if self.sock is not None: self.sock.settimeout(self.timeout) # Store these values to be fed into the HTTPResponse # object later. TODO: Remove this in favor of a real # HTTP lifecycle mechanism. # We have to store these before we call .request() # because sometimes we can still salvage a response # off the wire even if we aren't able to completely # send the request body. self._response_options = _ResponseOptions( request_method=method, request_url=url, preload_content=preload_content, decode_content=decode_content, enforce_content_length=enforce_content_length, ) if headers is None: headers = {} header_keys = frozenset(to_str(k.lower()) for k in headers) skip_accept_encoding = "accept-encoding" in header_keys skip_host = "host" in header_keys self.putrequest( method, url, skip_accept_encoding=skip_accept_encoding, skip_host=skip_host ) # Transform the body into an iterable of sendall()-able chunks # and detect if an explicit Content-Length is doable. chunks_and_cl = body_to_chunks(body, method=method, blocksize=self.blocksize) chunks = chunks_and_cl.chunks content_length = chunks_and_cl.content_length # When chunked is explicit set to 'True' we respect that. if chunked: if "transfer-encoding" not in header_keys: self.putheader("Transfer-Encoding", "chunked") else: # Detect whether a framing mechanism is already in use. If so # we respect that value, otherwise we pick chunked vs content-length # depending on the type of 'body'. if "content-length" in header_keys: chunked = False elif "transfer-encoding" in header_keys: chunked = True # Otherwise we go off the recommendation of 'body_to_chunks()'. else: chunked = False if content_length is None: if chunks is not None: chunked = True self.putheader("Transfer-Encoding", "chunked") else: self.putheader("Content-Length", str(content_length)) # Now that framing headers are out of the way we send all the other headers. if "user-agent" not in header_keys: self.putheader("User-Agent", _get_default_user_agent()) for header, value in headers.items(): self.putheader(header, value) self.endheaders() # If we're given a body we start sending that in chunks. if chunks is not None: for chunk in chunks: # Sending empty chunks isn't allowed for TE: chunked # as it indicates the end of the body. if not chunk: continue if isinstance(chunk, str): chunk = chunk.encode("utf-8") if chunked: self.send(b"%x\r\n%b\r\n" % (len(chunk), chunk)) else: self.send(chunk) # Regardless of whether we have a body or not, if we're in # chunked mode we want to send an explicit empty chunk. if chunked: self.send(b"0\r\n\r\n") def request_chunked( self, method: str, url: str, body: _TYPE_BODY | None = None, headers: typing.Mapping[str, str] | None = None, ) -> None: """ Alternative to the common request method, which sends the body with chunked encoding and not as one block """ warnings.warn( "HTTPConnection.request_chunked() is deprecated and will be removed " "in urllib3 v2.1.0. Instead use HTTPConnection.request(..., chunked=True).", category=DeprecationWarning, stacklevel=2, ) self.request(method, url, body=body, headers=headers, chunked=True) def getresponse( # type: ignore[override] self, ) -> HTTPResponse: """ Get the response from the server. If the HTTPConnection is in the correct state, returns an instance of HTTPResponse or of whatever object is returned by the response_class variable. If a request has not been sent or if a previous response has not be handled, ResponseNotReady is raised. If the HTTP response indicates that the connection should be closed, then it will be closed before the response is returned. When the connection is closed, the underlying socket is closed. """ # Raise the same error as http.client.HTTPConnection if self._response_options is None: raise ResponseNotReady() # Reset this attribute for being used again. resp_options = self._response_options self._response_options = None # Since the connection's timeout value may have been updated # we need to set the timeout on the socket. self.sock.settimeout(self.timeout) # This is needed here to avoid circular import errors # Get the response from http.client.HTTPConnection httplib_response = super().getresponse() try: assert_header_parsing(httplib_response.msg)
except (HeaderParsingError, TypeError) as hpe:
12
2023-12-16 04:12:01+00:00
24k
YaoFANGUK/video-subtitle-remover
backend/scenedetect/scene_manager.py
[ { "identifier": "SimpleTableCell", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableCell(object):\n \"\"\"A table class to create table cells.\n\n Example:\n cell = SimpleTableCell('Hello, world!')\n \"\"\"\n\n def __init__(self, text, header=False):\n \"\"\"Table cell constructor.\n\n Keyword arguments:\n text -- text to be displayed\n header -- flag to indicate this cell is a header cell.\n \"\"\"\n self.text = text\n self.header = header\n\n def __str__(self):\n \"\"\"Return the HTML code for the table cell.\"\"\"\n if self.header:\n return '<th>%s</th>' % (self.text)\n else:\n return '<td>%s</td>' % (self.text)" }, { "identifier": "SimpleTableImage", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableImage(object):\n \"\"\"A table class to create table cells with an image.\n\n Example:\n cell = SimpleTableImage('images/image_1.jpg')\n \"\"\"\n\n def __init__(self, image_file, width=None, height=None):\n \"\"\"Table cell constructor.\n\n Keyword arguments:\n image_file -- relative filepath to image file to display.\n width -- (optional) width of the image in pixels\n height -- (optional) height of the image in pixels\n \"\"\"\n self.image_file = image_file\n if width:\n self.width = round(width)\n else:\n self.width = width\n if height:\n self.height = round(height)\n else:\n self.height = height\n\n def __str__(self):\n \"\"\"Return the HTML code for the table cell with the image.\"\"\"\n safe_filename = quote(self.image_file)\n output = '<a href=\"%s\" target=\"_blank\">' % (safe_filename)\n output += '<img src=\"%s\"' % (safe_filename)\n if self.height:\n output += ' height=\"%s\"' % (self.height)\n if self.width:\n output += ' width=\"%s\"' % (self.width)\n output += '></a>'\n\n return output" }, { "identifier": "SimpleTableRow", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTableRow(object):\n \"\"\"A table class to create table rows, populated by table cells.\n\n Example:\n # Row from list\n row = SimpleTableRow(['Hello,', 'world!'])\n\n # Row from SimpleTableCell\n cell1 = SimpleTableCell('Hello,')\n cell2 = SimpleTableCell('world!')\n row = SimpleTableRow([cell1, cell2])\n \"\"\"\n\n def __init__(self, cells=None, header=False):\n \"\"\"Table row constructor.\n\n Keyword arguments:\n cells -- iterable of SimpleTableCell (default None)\n header -- flag to indicate this row is a header row.\n if the cells are SimpleTableCell, it is the programmer's\n responsibility to verify whether it was created with the\n header flag set to True.\n \"\"\"\n cells = cells or []\n if isinstance(cells[0], SimpleTableCell):\n self.cells = cells\n else:\n self.cells = [SimpleTableCell(cell, header=header) for cell in cells]\n\n self.header = header\n\n def __str__(self):\n \"\"\"Return the HTML code for the table row and its cells as a string.\"\"\"\n row = []\n\n row.append('<tr>')\n\n for cell in self.cells:\n row.append(str(cell))\n\n row.append('</tr>')\n\n return '\\n'.join(row)\n\n def __iter__(self):\n \"\"\"Iterate through row cells\"\"\"\n for cell in self.cells:\n yield cell\n\n def add_cell(self, cell):\n \"\"\"Add a SimpleTableCell object to the list of cells.\"\"\"\n self.cells.append(cell)\n\n def add_cells(self, cells):\n \"\"\"Add a list of SimpleTableCell objects to the list of cells.\"\"\"\n for cell in cells:\n self.cells.append(cell)" }, { "identifier": "SimpleTable", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class SimpleTable(object):\n \"\"\"A table class to create HTML tables, populated by HTML table rows.\n\n Example:\n # Table from lists\n table = SimpleTable([['Hello,', 'world!'], ['How', 'are', 'you?']])\n\n # Table with header row\n table = SimpleTable([['Hello,', 'world!'], ['How', 'are', 'you?']],\n header_row=['Header1', 'Header2', 'Header3'])\n\n # Table from SimpleTableRow\n rows = SimpleTableRow(['Hello,', 'world!'])\n table = SimpleTable(rows)\n \"\"\"\n\n def __init__(self, rows=None, header_row=None, css_class=None):\n \"\"\"Table constructor.\n\n Keyword arguments:\n rows -- iterable of SimpleTableRow\n header_row -- row that will be displayed at the beginning of the table.\n if this row is SimpleTableRow, it is the programmer's\n responsibility to verify whether it was created with the\n header flag set to True.\n css_class -- table CSS class\n \"\"\"\n rows = rows or []\n if isinstance(rows[0], SimpleTableRow):\n self.rows = rows\n else:\n self.rows = [SimpleTableRow(row) for row in rows]\n\n if header_row is None:\n self.header_row = None\n elif isinstance(header_row, SimpleTableRow):\n self.header_row = header_row\n else:\n self.header_row = SimpleTableRow(header_row, header=True)\n\n self.css_class = css_class\n\n def __str__(self):\n \"\"\"Return the HTML code for the table as a string.\"\"\"\n table = []\n\n if self.css_class:\n table.append('<table class=%s>' % self.css_class)\n else:\n table.append('<table>')\n\n if self.header_row:\n table.append(str(self.header_row))\n\n for row in self.rows:\n table.append(str(row))\n\n table.append('</table>')\n\n return '\\n'.join(table)\n\n def __iter__(self):\n \"\"\"Iterate through table rows\"\"\"\n for row in self.rows:\n yield row\n\n def add_row(self, row):\n \"\"\"Add a SimpleTableRow object to the list of rows.\"\"\"\n self.rows.append(row)\n\n def add_rows(self, rows):\n \"\"\"Add a list of SimpleTableRow objects to the list of rows.\"\"\"\n for row in rows:\n self.rows.append(row)" }, { "identifier": "HTMLPage", "path": "backend/scenedetect/_thirdparty/simpletable.py", "snippet": "class HTMLPage(object):\n \"\"\"A class to create HTML pages containing CSS and tables.\"\"\"\n\n def __init__(self, tables=None, css=None, encoding=\"utf-8\"):\n \"\"\"HTML page constructor.\n\n Keyword arguments:\n tables -- List of SimpleTable objects\n css -- Cascading Style Sheet specification that is appended before the\n table string\n encoding -- Characters encoding. Default: UTF-8\n \"\"\"\n self.tables = tables or []\n self.css = css\n self.encoding = encoding\n\n def __str__(self):\n \"\"\"Return the HTML page as a string.\"\"\"\n page = []\n\n if self.css:\n page.append('<style type=\"text/css\">\\n%s\\n</style>' % self.css)\n\n # Set encoding\n page.append('<meta http-equiv=\"Content-Type\" content=\"text/html;'\n 'charset=%s\">' % self.encoding)\n\n for table in self.tables:\n page.append(str(table))\n page.append('<br />')\n\n return '\\n'.join(page)\n\n def __iter__(self):\n \"\"\"Iterate through tables\"\"\"\n for table in self.tables:\n yield table\n\n def save(self, filename):\n \"\"\"Save HTML page to a file using the proper encoding\"\"\"\n with codecs.open(filename, 'w', self.encoding) as outfile:\n for line in str(self):\n outfile.write(line)\n\n def add_table(self, table):\n \"\"\"Add a SimpleTable to the page list of tables\"\"\"\n self.tables.append(table)" }, { "identifier": "tqdm", "path": "backend/scenedetect/platform.py", "snippet": "class FakeTqdmObject:\nclass FakeTqdmLoggingRedirect:\nclass CommandTooLong(Exception):\nclass Template(string.Template):\n def __init__(self, **kawrgs):\n def update(self, n=1):\n def close(self):\n def set_description(self, desc=None, refresh=True):\n def __init__(self, **kawrgs):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\ndef get_cv2_imwrite_params() -> Dict[str, Union[int, None]]:\n def _get_cv2_param(param_name: str) -> Union[int, None]:\ndef get_file_name(file_path: AnyStr, include_extension=True) -> AnyStr:\ndef get_and_create_path(file_path: AnyStr, output_directory: Optional[AnyStr] = None) -> AnyStr:\ndef init_logger(log_level: int = logging.INFO,\n show_stdout: bool = False,\n log_file: Optional[str] = None):\ndef invoke_command(args: List[str]) -> int:\ndef get_ffmpeg_path() -> Optional[str]:\ndef get_ffmpeg_version() -> Optional[str]:\ndef get_mkvmerge_version() -> Optional[str]:\ndef get_system_version_info() -> str:\n INFO_TEMPLATE = '[PySceneDetect] %(message)s'\n DEBUG_TEMPLATE = '%(levelname)s: %(module)s.%(funcName)s(): %(message)s'" }, { "identifier": "FrameTimecode", "path": "backend/scenedetect/frame_timecode.py", "snippet": "class FrameTimecode:\n \"\"\"Object for frame-based timecodes, using the video framerate to compute back and\n forth between frame number and seconds/timecode.\n\n A timecode is valid only if it complies with one of the following three types/formats:\n\n 1. Timecode as `str` in the form 'HH:MM:SS[.nnn]' (`'01:23:45'` or `'01:23:45.678'`)\n 2. Number of seconds as `float`, or `str` in form 'Ss' or 'S.SSSs' (`'2s'` or `'2.3456s'`)\n 3. Exact number of frames as `int`, or `str` in form NNNNN (`123` or `'123'`)\n \"\"\"\n\n def __init__(self,\n timecode: Union[int, float, str, 'FrameTimecode'] = None,\n fps: Union[int, float, str, 'FrameTimecode'] = None):\n \"\"\"\n Arguments:\n timecode: A frame number (int), number of seconds (float), or timecode (str in\n the form `'HH:MM:SS'` or `'HH:MM:SS.nnn'`).\n fps: The framerate or FrameTimecode to use as a time base for all arithmetic.\n Raises:\n TypeError: Thrown if either `timecode` or `fps` are unsupported types.\n ValueError: Thrown when specifying a negative timecode or framerate.\n \"\"\"\n # The following two properties are what is used to keep track of time\n # in a frame-specific manner. Note that once the framerate is set,\n # the value should never be modified (only read if required).\n # TODO(v1.0): Make these actual @properties.\n self.framerate = None\n self.frame_num = None\n\n # Copy constructor. Only the timecode argument is used in this case.\n if isinstance(timecode, FrameTimecode):\n self.framerate = timecode.framerate\n self.frame_num = timecode.frame_num\n if fps is not None:\n raise TypeError('Framerate cannot be overwritten when copying a FrameTimecode.')\n else:\n # Ensure other arguments are consistent with API.\n if fps is None:\n raise TypeError('Framerate (fps) is a required argument.')\n if isinstance(fps, FrameTimecode):\n fps = fps.framerate\n\n # Process the given framerate, if it was not already set.\n if not isinstance(fps, (int, float)):\n raise TypeError('Framerate must be of type int/float.')\n if (isinstance(fps, int) and not fps > 0) or (isinstance(fps, float)\n and not fps >= MAX_FPS_DELTA):\n raise ValueError('Framerate must be positive and greater than zero.')\n self.framerate = float(fps)\n\n # Process the timecode value, storing it as an exact number of frames.\n if isinstance(timecode, str):\n self.frame_num = self._parse_timecode_string(timecode)\n else:\n self.frame_num = self._parse_timecode_number(timecode)\n\n # TODO(v1.0): Add a `frame` property to replace the existing one and deprecate this getter.\n def get_frames(self) -> int:\n \"\"\"Get the current time/position in number of frames. This is the\n equivalent of accessing the self.frame_num property (which, along\n with the specified framerate, forms the base for all of the other\n time measurement calculations, e.g. the :meth:`get_seconds` method).\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 10``).\n\n Returns:\n int: The current time in frames (the current frame number).\n \"\"\"\n return self.frame_num\n\n # TODO(v1.0): Add a `framerate` property to replace the existing one and deprecate this getter.\n def get_framerate(self) -> float:\n \"\"\"Get Framerate: Returns the framerate used by the FrameTimecode object.\n\n Returns:\n float: Framerate of the current FrameTimecode object, in frames per second.\n \"\"\"\n return self.framerate\n\n def equal_framerate(self, fps) -> bool:\n \"\"\"Equal Framerate: Determines if the passed framerate is equal to that of this object.\n\n Arguments:\n fps: Framerate to compare against within the precision constant defined in this module\n (see :data:`MAX_FPS_DELTA`).\n Returns:\n bool: True if passed fps matches the FrameTimecode object's framerate, False otherwise.\n\n \"\"\"\n return math.fabs(self.framerate - fps) < MAX_FPS_DELTA\n\n # TODO(v1.0): Add a `seconds` property to replace this and deprecate the existing one.\n def get_seconds(self) -> float:\n \"\"\"Get the frame's position in number of seconds.\n\n If using to compare a :class:`FrameTimecode` with a frame number,\n you can do so directly against the object (e.g. ``FrameTimecode(10, 10.0) <= 1.0``).\n\n Returns:\n float: The current time/position in seconds.\n \"\"\"\n return float(self.frame_num) / self.framerate\n\n # TODO(v1.0): Add a `timecode` property to replace this and deprecate the existing one.\n def get_timecode(self, precision: int = 3, use_rounding: bool = True) -> str:\n \"\"\"Get a formatted timecode string of the form HH:MM:SS[.nnn].\n\n Args:\n precision: The number of decimal places to include in the output ``[.nnn]``.\n use_rounding: Rounds the output to the desired precision. If False, the value\n will be truncated to the specified precision.\n\n Returns:\n str: The current time in the form ``\"HH:MM:SS[.nnn]\"``.\n \"\"\"\n # Compute hours and minutes based off of seconds, and update seconds.\n secs = self.get_seconds()\n base = 60.0 * 60.0\n hrs = int(secs / base)\n secs -= (hrs * base)\n base = 60.0\n mins = int(secs / base)\n secs -= (mins * base)\n # Convert seconds into string based on required precision.\n if precision > 0:\n if use_rounding:\n secs = round(secs, precision)\n msec = format(secs, '.%df' % precision)[-precision:]\n secs = '%02d.%s' % (int(secs), msec)\n else:\n secs = '%02d' % int(round(secs, 0)) if use_rounding else '%02d' % int(secs)\n # Return hours, minutes, and seconds as a formatted timecode string.\n return '%02d:%02d:%s' % (hrs, mins, secs)\n\n # TODO(v1.0): Add a `previous` property to replace the existing one and deprecate this getter.\n def previous_frame(self) -> 'FrameTimecode':\n \"\"\"Return a new FrameTimecode for the previous frame (or 0 if on frame 0).\"\"\"\n new_timecode = FrameTimecode(self)\n new_timecode.frame_num = max(0, new_timecode.frame_num - 1)\n return new_timecode\n\n def _seconds_to_frames(self, seconds: float) -> int:\n \"\"\"Convert the passed value seconds to the nearest number of frames using\n the current FrameTimecode object's FPS (self.framerate).\n\n Returns:\n Integer number of frames the passed number of seconds represents using\n the current FrameTimecode's framerate property.\n \"\"\"\n return round(seconds * self.framerate)\n\n def _parse_timecode_number(self, timecode: Union[int, float]) -> int:\n \"\"\" Parse a timecode number, storing it as the exact number of frames.\n Can be passed as frame number (int), seconds (float)\n\n Raises:\n TypeError, ValueError\n \"\"\"\n # Process the timecode value, storing it as an exact number of frames.\n # Exact number of frames N\n if isinstance(timecode, int):\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive and greater than zero.')\n return timecode\n # Number of seconds S\n elif isinstance(timecode, float):\n if timecode < 0.0:\n raise ValueError('Timecode value must be positive and greater than zero.')\n return self._seconds_to_frames(timecode)\n # FrameTimecode\n elif isinstance(timecode, FrameTimecode):\n return timecode.frame_num\n elif timecode is None:\n raise TypeError('Timecode/frame number must be specified!')\n else:\n raise TypeError('Timecode format/type unrecognized.')\n\n def _parse_timecode_string(self, timecode_string: str) -> int:\n \"\"\"Parses a string based on the three possible forms (in timecode format,\n as an integer number of frames, or floating-point seconds, ending with 's').\n\n Requires that the `framerate` property is set before calling this method.\n Assuming a framerate of 30.0 FPS, the strings '00:05:00.000', '00:05:00',\n '9000', '300s', and '300.0s' are all possible valid values, all representing\n a period of time equal to 5 minutes, 300 seconds, or 9000 frames (at 30 FPS).\n\n Raises:\n TypeError, ValueError\n \"\"\"\n if self.framerate is None:\n raise TypeError('self.framerate must be set before calling _parse_timecode_string.')\n # Number of seconds S\n if timecode_string.endswith('s'):\n secs = timecode_string[:-1]\n if not secs.replace('.', '').isdigit():\n raise ValueError('All characters in timecode seconds string must be digits.')\n secs = float(secs)\n if secs < 0.0:\n raise ValueError('Timecode seconds value must be positive.')\n return self._seconds_to_frames(secs)\n # Exact number of frames N\n elif timecode_string.isdigit():\n timecode = int(timecode_string)\n if timecode < 0:\n raise ValueError('Timecode frame number must be positive.')\n return timecode\n # Standard timecode in string format 'HH:MM:SS[.nnn]'\n else:\n tc_val = timecode_string.split(':')\n if not (len(tc_val) == 3 and tc_val[0].isdigit() and tc_val[1].isdigit()\n and tc_val[2].replace('.', '').isdigit()):\n raise ValueError('Unrecognized or improperly formatted timecode string.')\n hrs, mins = int(tc_val[0]), int(tc_val[1])\n secs = float(tc_val[2]) if '.' in tc_val[2] else int(tc_val[2])\n if not (hrs >= 0 and mins >= 0 and secs >= 0 and mins < 60 and secs < 60):\n raise ValueError('Invalid timecode range (values outside allowed range).')\n secs += (((hrs * 60.0) + mins) * 60.0)\n return self._seconds_to_frames(secs)\n\n def __iadd__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num += other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num += other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for addition.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num += self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num += self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing addition with FrameTimecode.')\n if self.frame_num < 0: # Required to allow adding negative seconds/frames.\n self.frame_num = 0\n return self\n\n def __add__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return += other\n return to_return\n\n def __isub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n self.frame_num -= other\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n self.frame_num -= other.frame_num\n else:\n raise ValueError('FrameTimecode instances require equal framerate for subtraction.')\n # Check if value to add is in number of seconds.\n elif isinstance(other, float):\n self.frame_num -= self._seconds_to_frames(other)\n elif isinstance(other, str):\n self.frame_num -= self._parse_timecode_string(other)\n else:\n raise TypeError('Unsupported type for performing subtraction with FrameTimecode: %s' %\n type(other))\n if self.frame_num < 0:\n self.frame_num = 0\n return self\n\n def __sub__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n to_return = FrameTimecode(timecode=self)\n to_return -= other\n return to_return\n\n def __eq__(self, other: Union[int, float, str, 'FrameTimecode']) -> 'FrameTimecode':\n if isinstance(other, int):\n return self.frame_num == other\n elif isinstance(other, float):\n return self.get_seconds() == other\n elif isinstance(other, str):\n return self.frame_num == self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num == other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n elif other is None:\n return False\n else:\n raise TypeError('Unsupported type for performing == with FrameTimecode: %s' %\n type(other))\n\n def __ne__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n return not self == other\n\n def __lt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num < other\n elif isinstance(other, float):\n return self.get_seconds() < other\n elif isinstance(other, str):\n return self.frame_num < self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num < other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing < with FrameTimecode: %s' %\n type(other))\n\n def __le__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num <= other\n elif isinstance(other, float):\n return self.get_seconds() <= other\n elif isinstance(other, str):\n return self.frame_num <= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num <= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing <= with FrameTimecode: %s' %\n type(other))\n\n def __gt__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num > other\n elif isinstance(other, float):\n return self.get_seconds() > other\n elif isinstance(other, str):\n return self.frame_num > self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num > other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing > with FrameTimecode: %s' %\n type(other))\n\n def __ge__(self, other: Union[int, float, str, 'FrameTimecode']) -> bool:\n if isinstance(other, int):\n return self.frame_num >= other\n elif isinstance(other, float):\n return self.get_seconds() >= other\n elif isinstance(other, str):\n return self.frame_num >= self._parse_timecode_string(other)\n elif isinstance(other, FrameTimecode):\n if self.equal_framerate(other.framerate):\n return self.frame_num >= other.frame_num\n else:\n raise TypeError(\n 'FrameTimecode objects must have the same framerate to be compared.')\n else:\n raise TypeError('Unsupported type for performing >= with FrameTimecode: %s' %\n type(other))\n\n # TODO(v1.0): __int__ and __float__ should be removed. Mark as deprecated, and indicate\n # need to use relevant property instead.\n\n def __int__(self) -> int:\n return self.frame_num\n\n def __float__(self) -> float:\n return self.get_seconds()\n\n def __str__(self) -> str:\n return self.get_timecode()\n\n def __repr__(self) -> str:\n return '%s [frame=%d, fps=%.3f]' % (self.get_timecode(), self.frame_num, self.framerate)\n\n def __hash__(self) -> int:\n return self.frame_num" }, { "identifier": "VideoStream", "path": "backend/scenedetect/video_stream.py", "snippet": "class VideoStream(ABC):\n \"\"\" Interface which all video backends must implement. \"\"\"\n\n #\n # Default Implementations\n #\n\n @property\n def base_timecode(self) -> FrameTimecode:\n \"\"\"FrameTimecode object to use as a time base.\"\"\"\n return FrameTimecode(timecode=0, fps=self.frame_rate)\n\n #\n # Abstract Static Methods\n #\n\n @staticmethod\n @abstractmethod\n def BACKEND_NAME() -> str:\n \"\"\"Unique name used to identify this backend. Should be a static property in derived\n classes (`BACKEND_NAME = 'backend_identifier'`).\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Properties\n #\n\n @property\n @abstractmethod\n def path(self) -> Union[bytes, str]:\n \"\"\"Video or device path.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def name(self) -> Union[bytes, str]:\n \"\"\"Name of the video, without extension, or device.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def is_seekable(self) -> bool:\n \"\"\"True if seek() is allowed, False otherwise.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_rate(self) -> float:\n \"\"\"Frame rate in frames/sec.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def duration(self) -> Optional[FrameTimecode]:\n \"\"\"Duration of the stream as a FrameTimecode, or None if non terminating.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_size(self) -> Tuple[int, int]:\n \"\"\"Size of each video frame in pixels as a tuple of (width, height).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def aspect_ratio(self) -> float:\n \"\"\"Pixel aspect ratio as a float (1.0 represents square pixels).\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position(self) -> FrameTimecode:\n \"\"\"Current position within stream as FrameTimecode.\n\n This can be interpreted as presentation time stamp, thus frame 1 corresponds\n to the presentation time 0. Returns 0 even if `frame_number` is 1.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def position_ms(self) -> float:\n \"\"\"Current position within stream as a float of the presentation time in\n milliseconds. The first frame has a PTS of 0.\"\"\"\n raise NotImplementedError\n\n @property\n @abstractmethod\n def frame_number(self) -> int:\n \"\"\"Current position within stream as the frame number.\n\n Will return 0 until the first frame is `read`.\"\"\"\n raise NotImplementedError\n\n #\n # Abstract Methods\n #\n\n @abstractmethod\n def read(self, decode: bool = True, advance: bool = True) -> Union[ndarray, bool]:\n \"\"\"Read and decode the next frame as a numpy.ndarray. Returns False when video ends.\n\n Arguments:\n decode: Decode and return the frame.\n advance: Seek to the next frame. If False, will return the current (last) frame.\n\n Returns:\n If decode = True, the decoded frame (numpy.ndarray), or False (bool) if end of video.\n If decode = False, a bool indicating if advancing to the the next frame succeeded.\n \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def reset(self) -> None:\n \"\"\" Close and re-open the VideoStream (equivalent to seeking back to beginning). \"\"\"\n raise NotImplementedError\n\n @abstractmethod\n def seek(self, target: Union[FrameTimecode, float, int]) -> None:\n \"\"\"Seek to the given timecode. If given as a frame number, represents the current seek\n pointer (e.g. if seeking to 0, the next frame decoded will be the first frame of the video).\n\n For 1-based indices (first frame is frame #1), the target frame number needs to be converted\n to 0-based by subtracting one. For example, if we want to seek to the first frame, we call\n seek(0) followed by read(). If we want to seek to the 5th frame, we call seek(4) followed\n by read(), at which point frame_number will be 5.\n\n May not be supported on all backend types or inputs (e.g. cameras).\n\n Arguments:\n target: Target position in video stream to seek to.\n If float, interpreted as time in seconds.\n If int, interpreted as frame number.\n Raises:\n SeekError: An error occurs while seeking, or seeking is not supported.\n ValueError: `target` is not a valid value (i.e. it is negative).\n \"\"\"\n raise NotImplementedError" }, { "identifier": "SceneDetector", "path": "backend/scenedetect/scene_detector.py", "snippet": "class SceneDetector:\n \"\"\" Base class to inherit from when implementing a scene detection algorithm.\n\n This API is not yet stable and subject to change.\n\n This represents a \"dense\" scene detector, which returns a list of frames where\n the next scene/shot begins in a video.\n\n Also see the implemented scene detectors in the scenedetect.detectors module\n to get an idea of how a particular detector can be created.\n \"\"\"\n # TODO(v0.7): Make this a proper abstract base class.\n\n stats_manager: Optional[StatsManager] = None\n \"\"\"Optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to\n use for caching frame metrics to and from.\"\"\"\n\n # TODO(v1.0): Remove - this is a rarely used case for what is now a neglegible performance gain.\n def is_processing_required(self, frame_num: int) -> bool:\n \"\"\"[DEPRECATED] DO NOT USE\n\n Test if all calculations for a given frame are already done.\n\n Returns:\n False if the SceneDetector has assigned _metric_keys, and the\n stats_manager property is set to a valid StatsManager object containing\n the required frame metrics/calculations for the given frame - thus, not\n needing the frame to perform scene detection.\n\n True otherwise (i.e. the frame_img passed to process_frame is required\n to be passed to process_frame for the given frame_num).\n \"\"\"\n metric_keys = self.get_metrics()\n return not metric_keys or not (self.stats_manager is not None\n and self.stats_manager.metrics_exist(frame_num, metric_keys))\n\n def stats_manager_required(self) -> bool:\n \"\"\"Stats Manager Required: Prototype indicating if detector requires stats.\n\n Returns:\n True if a StatsManager is required for the detector, False otherwise.\n \"\"\"\n return False\n\n def get_metrics(self) -> List[str]:\n \"\"\"Get Metrics: Get a list of all metric names/keys used by the detector.\n\n Returns:\n List of strings of frame metric key names that will be used by\n the detector when a StatsManager is passed to process_frame.\n \"\"\"\n return []\n\n def process_frame(self, frame_num: int, frame_img: Optional[numpy.ndarray]) -> List[int]:\n \"\"\"Process Frame: Computes/stores metrics and detects any scene changes.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame numbers of cuts to be added to the cutting list.\n \"\"\"\n return []\n\n def post_process(self, frame_num: int) -> List[int]:\n \"\"\"Post Process: Performs any processing after the last frame has been read.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame numbers of cuts to be added to the cutting list.\n \"\"\"\n return []\n\n @property\n def event_buffer_length(self) -> int:\n \"\"\"The amount of frames a given event can be buffered for, in time. Represents maximum\n amount any event can be behind `frame_number` in the result of :meth:`process_frame`.\n \"\"\"\n return 0" }, { "identifier": "SparseSceneDetector", "path": "backend/scenedetect/scene_detector.py", "snippet": "class SparseSceneDetector(SceneDetector):\n \"\"\"Base class to inherit from when implementing a sparse scene detection algorithm.\n\n This class will be removed in v1.0 and should not be used.\n\n Unlike dense detectors, sparse detectors scene_detect \"events\" and return a *pair* of frames,\n as opposed to just a single cut.\n\n An example of a SparseSceneDetector is the MotionDetector.\n \"\"\"\n\n def process_frame(self, frame_num: int, frame_img: numpy.ndarray) -> List[Tuple[int, int]]:\n \"\"\"Process Frame: Computes/stores metrics and detects any scene changes.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame pairs representing individual scenes\n to be added to the output scene list directly.\n \"\"\"\n return []\n\n def post_process(self, frame_num: int) -> List[Tuple[int, int]]:\n \"\"\"Post Process: Performs any processing after the last frame has been read.\n\n Prototype method, no actual detection.\n\n Returns:\n List of frame pairs representing individual scenes\n to be added to the output scene list directly.\n \"\"\"\n return []" }, { "identifier": "StatsManager", "path": "backend/scenedetect/stats_manager.py", "snippet": "class StatsManager:\n \"\"\"Provides a key-value store for frame metrics/calculations which can be used\n for two-pass detection algorithms, as well as saving stats to a CSV file.\n\n Analyzing a statistics CSV file is also very useful for finding the optimal\n algorithm parameters for certain detection methods. Additionally, the data\n may be plotted by a graphing module (e.g. matplotlib) by obtaining the\n metric of interest for a series of frames by iteratively calling get_metrics(),\n after having called the detect_scenes(...) method on the SceneManager object\n which owns the given StatsManager instance.\n\n Only metrics consisting of `float` or `int` should be used currently.\n \"\"\"\n\n def __init__(self, base_timecode: FrameTimecode = None):\n \"\"\"Initialize a new StatsManager.\n\n Arguments:\n base_timecode: Timecode associated with this object. Must not be None (default value\n will be removed in a future release).\n \"\"\"\n # Frame metrics is a dict of frame (int): metric_dict (Dict[str, float])\n # of each frame metric key and the value it represents (usually float).\n self._frame_metrics: Dict[FrameTimecode, Dict[str, float]] = dict()\n self._registered_metrics: Set[str] = set() # Set of frame metric keys.\n self._loaded_metrics: Set[str] = set() # Metric keys loaded from stats file.\n self._metrics_updated: bool = False # Flag indicating if metrics require saving.\n self._base_timecode: Optional[FrameTimecode] = base_timecode # Used for timing calculations.\n\n def register_metrics(self, metric_keys: Iterable[str]) -> None:\n \"\"\"Register a list of metric keys that will be used by the detector.\n\n Used to ensure that multiple detector keys don't overlap.\n\n Raises:\n FrameMetricRegistered: A particular metric_key has already been registered/added\n to the StatsManager. Only if the StatsManager is being used for read-only\n access (i.e. all frames in the video have already been processed for the given\n metric_key in the exception) is this behavior desirable.\n \"\"\"\n for metric_key in metric_keys:\n if metric_key not in self._registered_metrics:\n self._registered_metrics.add(metric_key)\n else:\n raise FrameMetricRegistered(metric_key)\n\n # TODO(v1.0): Change frame_number to a FrameTimecode now that it is just a hash and will\n # be required for VFR support.\n def get_metrics(self, frame_number: int, metric_keys: Iterable[str]) -> List[Any]:\n \"\"\"Return the requested statistics/metrics for a given frame.\n\n Arguments:\n frame_number (int): Frame number to retrieve metrics for.\n metric_keys (List[str]): A list of metric keys to look up.\n\n Returns:\n A list containing the requested frame metrics for the given frame number\n in the same order as the input list of metric keys. If a metric could\n not be found, None is returned for that particular metric.\n \"\"\"\n return [self._get_metric(frame_number, metric_key) for metric_key in metric_keys]\n\n def set_metrics(self, frame_number: int, metric_kv_dict: Dict[str, Any]) -> None:\n \"\"\" Set Metrics: Sets the provided statistics/metrics for a given frame.\n\n Arguments:\n frame_number: Frame number to retrieve metrics for.\n metric_kv_dict: A dict mapping metric keys to the\n respective integer/floating-point metric values to set.\n \"\"\"\n for metric_key in metric_kv_dict:\n self._set_metric(frame_number, metric_key, metric_kv_dict[metric_key])\n\n def metrics_exist(self, frame_number: int, metric_keys: Iterable[str]) -> bool:\n \"\"\" Metrics Exist: Checks if the given metrics/stats exist for the given frame.\n\n Returns:\n bool: True if the given metric keys exist for the frame, False otherwise.\n \"\"\"\n return all([self._metric_exists(frame_number, metric_key) for metric_key in metric_keys])\n\n def is_save_required(self) -> bool:\n \"\"\" Is Save Required: Checks if the stats have been updated since loading.\n\n Returns:\n bool: True if there are frame metrics/statistics not yet written to disk,\n False otherwise.\n \"\"\"\n return self._metrics_updated\n\n def save_to_csv(self,\n csv_file: Union[str, bytes, TextIO],\n base_timecode: Optional[FrameTimecode] = None,\n force_save=True) -> None:\n \"\"\" Save To CSV: Saves all frame metrics stored in the StatsManager to a CSV file.\n\n Arguments:\n csv_file: A file handle opened in write mode (e.g. open('...', 'w')) or a path as str.\n base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility.\n force_save: If True, writes metrics out even if an update is not required.\n\n Raises:\n OSError: If `path` cannot be opened or a write failure occurs.\n \"\"\"\n # TODO(v0.7): Replace with DeprecationWarning that `base_timecode` will be removed in v0.8.\n if base_timecode is not None:\n logger.error('base_timecode is deprecated.')\n\n # Ensure we need to write to the file, and that we have data to do so with.\n if not ((self.is_save_required() or force_save) and self._registered_metrics\n and self._frame_metrics):\n logger.info(\"No metrics to save.\")\n return\n\n assert self._base_timecode is not None\n\n # If we get a path instead of an open file handle, recursively call ourselves\n # again but with file handle instead of path.\n if isinstance(csv_file, (str, bytes)):\n with open(csv_file, 'w') as file:\n self.save_to_csv(csv_file=file, force_save=force_save)\n return\n\n csv_writer = csv.writer(csv_file, lineterminator='\\n')\n metric_keys = sorted(list(self._registered_metrics.union(self._loaded_metrics)))\n csv_writer.writerow([COLUMN_NAME_FRAME_NUMBER, COLUMN_NAME_TIMECODE] + metric_keys)\n frame_keys = sorted(self._frame_metrics.keys())\n logger.info(\"Writing %d frames to CSV...\", len(frame_keys))\n for frame_key in frame_keys:\n frame_timecode = self._base_timecode + frame_key\n csv_writer.writerow(\n [frame_timecode.get_frames() +\n 1, frame_timecode.get_timecode()] +\n [str(metric) for metric in self.get_metrics(frame_key, metric_keys)])\n\n @staticmethod\n def valid_header(row: List[str]) -> bool:\n \"\"\"Check that the given CSV row is a valid header for a statsfile.\n\n Arguments:\n row: A row decoded from the CSV reader.\n\n Returns:\n True if `row` is a valid statsfile header, False otherwise.\n \"\"\"\n if not row or not len(row) >= 2:\n return False\n if row[0] != COLUMN_NAME_FRAME_NUMBER or row[1] != COLUMN_NAME_TIMECODE:\n return False\n return True\n\n # TODO(v1.0): Remove.\n def load_from_csv(self, csv_file: Union[str, bytes, TextIO]) -> Optional[int]:\n \"\"\"[DEPRECATED] DO NOT USE\n\n Load all metrics stored in a CSV file into the StatsManager instance. Will be removed in a\n future release after becoming a no-op.\n\n Arguments:\n csv_file: A file handle opened in read mode (e.g. open('...', 'r')) or a path as str.\n\n Returns:\n int or None: Number of frames/rows read from the CSV file, or None if the\n input file was blank or could not be found.\n\n Raises:\n StatsFileCorrupt: Stats file is corrupt and can't be loaded, or wrong file\n was specified.\n \"\"\"\n # TODO: Make this an error, then make load_from_csv() a no-op, and finally, remove it.\n logger.warning(\"load_from_csv() is deprecated and will be removed in a future release.\")\n\n # If we get a path instead of an open file handle, check that it exists, and if so,\n # recursively call ourselves again but with file set instead of path.\n if isinstance(csv_file, (str, bytes)):\n if os.path.exists(csv_file):\n with open(csv_file, 'r') as file:\n return self.load_from_csv(csv_file=file)\n # Path doesn't exist.\n return None\n\n # If we get here, file is a valid file handle in read-only text mode.\n csv_reader = csv.reader(csv_file, lineterminator='\\n')\n num_cols = None\n num_metrics = None\n num_frames = None\n # First Row: Frame Num, Timecode, [metrics...]\n try:\n row = next(csv_reader)\n # Backwards compatibility for previous versions of statsfile\n # which included an additional header row.\n if not self.valid_header(row):\n row = next(csv_reader)\n except StopIteration:\n # If the file is blank or we couldn't decode anything, assume the file was empty.\n return None\n if not self.valid_header(row):\n raise StatsFileCorrupt()\n num_cols = len(row)\n num_metrics = num_cols - 2\n if not num_metrics > 0:\n raise StatsFileCorrupt('No metrics defined in CSV file.')\n self._loaded_metrics = row[2:]\n num_frames = 0\n for row in csv_reader:\n metric_dict = {}\n if not len(row) == num_cols:\n raise StatsFileCorrupt('Wrong number of columns detected in stats file row.')\n for i, metric_str in enumerate(row[2:]):\n if metric_str and metric_str != 'None':\n try:\n metric_dict[self._loaded_metrics[i]] = float(metric_str)\n except ValueError:\n raise StatsFileCorrupt('Corrupted value in stats file: %s' %\n metric_str) from ValueError\n frame_number = int(row[0])\n # Switch from 1-based to 0-based frame numbers.\n if frame_number > 0:\n frame_number -= 1\n self.set_metrics(frame_number, metric_dict)\n num_frames += 1\n logger.info('Loaded %d metrics for %d frames.', num_metrics, num_frames)\n self._metrics_updated = False\n return num_frames\n\n def _get_metric(self, frame_number: int, metric_key: str) -> Optional[Any]:\n if self._metric_exists(frame_number, metric_key):\n return self._frame_metrics[frame_number][metric_key]\n return None\n\n def _set_metric(self, frame_number: int, metric_key: str, metric_value: Any) -> None:\n self._metrics_updated = True\n if not frame_number in self._frame_metrics:\n self._frame_metrics[frame_number] = dict()\n self._frame_metrics[frame_number][metric_key] = metric_value\n\n def _metric_exists(self, frame_number: int, metric_key: str) -> bool:\n return (frame_number in self._frame_metrics\n and metric_key in self._frame_metrics[frame_number])" }, { "identifier": "FrameMetricRegistered", "path": "backend/scenedetect/stats_manager.py", "snippet": "class FrameMetricRegistered(Exception):\n \"\"\" Raised when attempting to register a frame metric key which has\n already been registered. \"\"\"\n\n def __init__(self,\n metric_key: str,\n message: str = \"Attempted to re-register frame metric key.\"):\n super().__init__(message)\n self.metric_key = metric_key" } ]
import csv import threading import queue import logging import math import sys import cv2 import numpy as np from enum import Enum from typing import Iterable, List, Tuple, Optional, Dict, Callable, Union, TextIO from backend.scenedetect._thirdparty.simpletable import (SimpleTableCell, SimpleTableImage, SimpleTableRow, SimpleTable, HTMLPage) from backend.scenedetect.platform import (tqdm, get_and_create_path, get_cv2_imwrite_params, Template) from backend.scenedetect.frame_timecode import FrameTimecode from backend.scenedetect.video_stream import VideoStream from backend.scenedetect.scene_detector import SceneDetector, SparseSceneDetector from backend.scenedetect.stats_manager import StatsManager, FrameMetricRegistered
14,719
if not completed: logger.error('Could not generate all output images.') return image_filenames ## ## SceneManager Class Implementation ## class SceneManager: """The SceneManager facilitates detection of scenes (:meth:`detect_scenes`) on a video (:class:`VideoStream <scenedetect.video_stream.VideoStream>`) using a detector (:meth:`add_detector`). Video decoding is done in parallel in a background thread. """ def __init__( self, stats_manager: Optional[StatsManager] = None, ): """ Arguments: stats_manager: :class:`StatsManager` to bind to this `SceneManager`. Can be accessed via the `stats_manager` property of the resulting object to save to disk. """ self._cutting_list = [] self._event_list = [] self._detector_list = [] self._sparse_detector_list = [] # TODO(v1.0): This class should own a StatsManager instead of taking an optional one. # Expose a new `stats_manager` @property from the SceneManager, and either change the # `stats_manager` argument to to `store_stats: bool=False`, or lazy-init one. # TODO(v1.0): This class should own a VideoStream as well, instead of passing one # to the detect_scenes method. If concatenation is required, it can be implemented as # a generic VideoStream wrapper. self._stats_manager: Optional[StatsManager] = stats_manager # Position of video that was first passed to detect_scenes. self._start_pos: FrameTimecode = None # Position of video on the last frame processed by detect_scenes. self._last_pos: FrameTimecode = None self._base_timecode: Optional[FrameTimecode] = None self._downscale: int = 1 self._auto_downscale: bool = True # Interpolation method to use when downscaling. Defaults to linear interpolation # as a good balance between quality and performance. self._interpolation: Interpolation = Interpolation.LINEAR # Boolean indicating if we have only seen EventType.CUT events so far. self._only_cuts: bool = True # Set by decode thread when an exception occurs. self._exception_info = None self._stop = threading.Event() self._frame_buffer = [] self._frame_buffer_size = 0 @property def interpolation(self) -> Interpolation: """Interpolation method to use when downscaling frames. Must be one of cv2.INTER_*.""" return self._interpolation @interpolation.setter def interpolation(self, value: Interpolation): self._interpolation = value @property def stats_manager(self) -> Optional[StatsManager]: """Getter for the StatsManager associated with this SceneManager, if any.""" return self._stats_manager @property def downscale(self) -> int: """Factor to downscale each frame by. Will always be >= 1, where 1 indicates no scaling. Will be ignored if auto_downscale=True.""" return self._downscale @downscale.setter def downscale(self, value: int): """Set to 1 for no downscaling, 2 for 2x downscaling, 3 for 3x, etc...""" if value < 1: raise ValueError("Downscale factor must be a positive integer >= 1!") if self.auto_downscale: logger.warning("Downscale factor will be ignored because auto_downscale=True!") if value is not None and not isinstance(value, int): logger.warning("Downscale factor will be truncated to integer!") value = int(value) self._downscale = value @property def auto_downscale(self) -> bool: """If set to True, will automatically downscale based on video frame size. Overrides `downscale` if set.""" return self._auto_downscale @auto_downscale.setter def auto_downscale(self, value: bool): self._auto_downscale = value def add_detector(self, detector: SceneDetector) -> None: """Add/register a SceneDetector (e.g. ContentDetector, ThresholdDetector) to run when detect_scenes is called. The SceneManager owns the detector object, so a temporary may be passed. Arguments: detector (SceneDetector): Scene detector to add to the SceneManager. """ if self._stats_manager is None and detector.stats_manager_required(): # Make sure the lists are empty so that the detectors don't get # out of sync (require an explicit statsmanager instead) assert not self._detector_list and not self._sparse_detector_list self._stats_manager = StatsManager() detector.stats_manager = self._stats_manager if self._stats_manager is not None: try: self._stats_manager.register_metrics(detector.get_metrics())
# -*- coding: utf-8 -*- # # PySceneDetect: Python-Based Video Scene Detector # ------------------------------------------------------------------- # [ Site: https://scenedetect.com ] # [ Docs: https://scenedetect.com/docs/ ] # [ Github: https://github.com/Breakthrough/PySceneDetect/ ] # # Copyright (C) 2014-2023 Brandon Castellano <http://www.bcastell.com>. # PySceneDetect is licensed under the BSD 3-Clause License; see the # included LICENSE file, or visit one of the above pages for details. # """``scenedetect.scene_manager`` Module This module implements :class:`SceneManager`, coordinates running a :mod:`SceneDetector <scenedetect.detectors>` over the frames of a video (:mod:`VideoStream <scenedetect.video_stream>`). Video decoding is done in a separate thread to improve performance. This module also contains other helper functions (e.g. :func:`save_images`) which can be used to process the resulting scene list. =============================================================== Usage =============================================================== The following example shows basic usage of a :class:`SceneManager`: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector video = open_video(video_path) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) # Detect all scenes in video from current position to end. scene_manager.detect_scenes(video) # `get_scene_list` returns a list of start/end timecode pairs # for each scene that was found. scenes = scene_manager.get_scene_list() An optional callback can also be invoked on each detected scene, for example: .. code:: python from scenedetect import open_video, SceneManager, ContentDetector # Callback to invoke on the first frame of every new scene detection. def on_new_scene(frame_img: numpy.ndarray, frame_num: int): print("New scene found at frame %d." % frame_num) video = open_video(test_video_file) scene_manager = SceneManager() scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video, callback=on_new_scene) To use a `SceneManager` with a webcam/device or existing `cv2.VideoCapture` device, use the :class:`VideoCaptureAdapter <scenedetect.backends.opencv.VideoCaptureAdapter>` instead of `open_video`. ======================================================================= Storing Per-Frame Statistics ======================================================================= `SceneManager` can use an optional :class:`StatsManager <scenedetect.stats_manager.StatsManager>` to save frame statistics to disk: .. code:: python from scenedetect import open_video, ContentDetector, SceneManager, StatsManager video = open_video(test_video_file) scene_manager = SceneManager(stats_manager=StatsManager()) scene_manager.add_detector(ContentDetector()) scene_manager.detect_scenes(video=video) scene_list = scene_manager.get_scene_list() print_scenes(scene_list=scene_list) # Save per-frame statistics to disk. scene_manager.stats_manager.save_to_csv(csv_file=STATS_FILE_PATH) The statsfile can be used to find a better threshold for certain inputs, or perform statistical analysis of the video. """ logger = logging.getLogger('pyscenedetect') # TODO: This value can and should be tuned for performance improvements as much as possible, # until accuracy falls, on a large enough dataset. This has yet to be done, but the current # value doesn't seem to have caused any issues at least. DEFAULT_MIN_WIDTH: int = 256 """The default minimum width a frame will be downscaled to when calculating a downscale factor.""" MAX_FRAME_QUEUE_LENGTH: int = 4 """Maximum number of decoded frames which can be buffered while waiting to be processed.""" PROGRESS_BAR_DESCRIPTION = 'Detected: %d | Progress' """Template to use for progress bar.""" class Interpolation(Enum): """Interpolation method used for image resizing. Based on constants defined in OpenCV.""" NEAREST = cv2.INTER_NEAREST """Nearest neighbor interpolation.""" LINEAR = cv2.INTER_LINEAR """Bilinear interpolation.""" CUBIC = cv2.INTER_CUBIC """Bicubic interpolation.""" AREA = cv2.INTER_AREA """Pixel area relation resampling. Provides moire'-free downscaling.""" LANCZOS4 = cv2.INTER_LANCZOS4 """Lanczos interpolation over 8x8 neighborhood.""" def compute_downscale_factor(frame_width: int, effective_width: int = DEFAULT_MIN_WIDTH) -> int: """Get the optimal default downscale factor based on a video's resolution (currently only the width in pixels is considered). The resulting effective width of the video will be between frame_width and 1.5 * frame_width pixels (e.g. if frame_width is 200, the range of effective widths will be between 200 and 300). Arguments: frame_width: Actual width of the video frame in pixels. effective_width: Desired minimum width in pixels. Returns: int: The default downscale factor to use to achieve at least the target effective_width. """ assert not (frame_width < 1 or effective_width < 1) if frame_width < effective_width: return 1 return frame_width // effective_width def get_scenes_from_cuts( cut_list: Iterable[FrameTimecode], start_pos: Union[int, FrameTimecode], end_pos: Union[int, FrameTimecode], base_timecode: Optional[FrameTimecode] = None, ) -> List[Tuple[FrameTimecode, FrameTimecode]]: """Returns a list of tuples of start/end FrameTimecodes for each scene based on a list of detected scene cuts/breaks. This function is called when using the :meth:`SceneManager.get_scene_list` method. The scene list is generated from a cutting list (:meth:`SceneManager.get_cut_list`), noting that each scene is contiguous, starting from the first to last frame of the input. If `cut_list` is empty, the resulting scene will span from `start_pos` to `end_pos`. Arguments: cut_list: List of FrameTimecode objects where scene cuts/breaks occur. base_timecode: The base_timecode of which all FrameTimecodes in the cut_list are based on. num_frames: The number of frames, or FrameTimecode representing duration, of the video that was processed (used to generate last scene's end time). start_frame: The start frame or FrameTimecode of the cut list. Used to generate the first scene's start time. base_timecode: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: List of tuples in the form (start_time, end_time), where both start_time and end_time are FrameTimecode objects representing the exact time/frame where each scene occupies based on the input cut_list. """ # TODO(v0.7): Use the warnings module to turn this into a warning. if base_timecode is not None: logger.error('`base_timecode` argument is deprecated has no effect.') # Scene list, where scenes are tuples of (Start FrameTimecode, End FrameTimecode). scene_list = [] if not cut_list: scene_list.append((start_pos, end_pos)) return scene_list # Initialize last_cut to the first frame we processed,as it will be # the start timecode for the first scene in the list. last_cut = start_pos for cut in cut_list: scene_list.append((last_cut, cut)) last_cut = cut # Last scene is from last cut to end of video. scene_list.append((last_cut, end_pos)) return scene_list def write_scene_list(output_csv_file: TextIO, scene_list: Iterable[Tuple[FrameTimecode, FrameTimecode]], include_cut_list: bool = True, cut_list: Optional[Iterable[FrameTimecode]] = None) -> None: """Writes the given list of scenes to an output file handle in CSV format. Arguments: output_csv_file: Handle to open file in write mode. scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. include_cut_list: Bool indicating if the first row should include the timecodes where each scene starts. Should be set to False if RFC 4180 compliant CSV output is required. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not specified, the cut list is generated using the start times of each scene following the first one. """ csv_writer = csv.writer(output_csv_file, lineterminator='\n') # If required, output the cutting list as the first row (i.e. before the header row). if include_cut_list: csv_writer.writerow( ["Timecode List:"] + cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]]) csv_writer.writerow([ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ]) for i, (start, end) in enumerate(scene_list): duration = end - start csv_writer.writerow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) def write_scene_list_html(output_html_filename, scene_list, cut_list=None, css=None, css_class='mytable', image_filenames=None, image_width=None, image_height=None): """Writes the given list of scenes to an output file handle in html format. Arguments: output_html_filename: filename of output html file scene_list: List of pairs of FrameTimecodes denoting each scene's start/end FrameTimecode. cut_list: Optional list of FrameTimecode objects denoting the cut list (i.e. the frames in the video that need to be split to generate individual scenes). If not passed, the start times of each scene (besides the 0th scene) is used instead. css: String containing all the css information for the resulting html page. css_class: String containing the named css class image_filenames: dict where key i contains a list with n elements (filenames of the n saved images from that scene) image_width: Optional desired width of images in table in pixels image_height: Optional desired height of images in table in pixels """ if not css: css = """ table.mytable { font-family: times; font-size:12px; color:#000000; border-width: 1px; border-color: #eeeeee; border-collapse: collapse; background-color: #ffffff; width=100%; max-width:550px; table-layout:fixed; } table.mytable th { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; background-color: #e6eed6; color:#000000; } table.mytable td { border-width: 1px; padding: 8px; border-style: solid; border-color: #eeeeee; } #code { display:inline; font-family: courier; color: #3d9400; } #string { display:inline; font-weight: bold; } """ # Output Timecode list timecode_table = SimpleTable( [["Timecode List:"] + (cut_list if cut_list else [start.get_timecode() for start, _ in scene_list[1:]])], css_class=css_class) # Output list of scenes header_row = [ "Scene Number", "Start Frame", "Start Timecode", "Start Time (seconds)", "End Frame", "End Timecode", "End Time (seconds)", "Length (frames)", "Length (timecode)", "Length (seconds)" ] for i, (start, end) in enumerate(scene_list): duration = end - start row = SimpleTableRow([ '%d' % (i + 1), '%d' % (start.get_frames() + 1), start.get_timecode(), '%.3f' % start.get_seconds(), '%d' % end.get_frames(), end.get_timecode(), '%.3f' % end.get_seconds(), '%d' % duration.get_frames(), duration.get_timecode(), '%.3f' % duration.get_seconds() ]) if image_filenames: for image in image_filenames[i]: row.add_cell( SimpleTableCell( SimpleTableImage(image, width=image_width, height=image_height))) if i == 0: scene_table = SimpleTable(rows=[row], header_row=header_row, css_class=css_class) else: scene_table.add_row(row=row) # Write html file page = HTMLPage() page.add_table(timecode_table) page.add_table(scene_table) page.css = css page.save(output_html_filename) # # TODO(v1.0): Refactor to take a SceneList object; consider moving this and save scene list # to a better spot, or just move them to scene_list.py. # def save_images(scene_list: List[Tuple[FrameTimecode, FrameTimecode]], video: VideoStream, num_images: int = 3, frame_margin: int = 1, image_extension: str = 'jpg', encoder_param: int = 95, image_name_template: str = '$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER', output_dir: Optional[str] = None, show_progress: Optional[bool] = False, scale: Optional[float] = None, height: Optional[int] = None, width: Optional[int] = None, interpolation: Interpolation = Interpolation.CUBIC, video_manager=None) -> Dict[int, List[str]]: """Save a set number of images from each scene, given a list of scenes and the associated video/frame source. Arguments: scene_list: A list of scenes (pairs of FrameTimecode objects) returned from calling a SceneManager's detect_scenes() method. video: A VideoStream object corresponding to the scene list. Note that the video will be closed/re-opened and seeked through. num_images: Number of images to generate for each scene. Minimum is 1. frame_margin: Number of frames to pad each scene around the beginning and end (e.g. moves the first/last image into the scene by N frames). Can set to 0, but will result in some video files failing to extract the very last frame. image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp'). encoder_param: Quality/compression efficiency, based on type of image: 'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp. 'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode. image_name_template: Template to use when creating the images on disk. Can use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image extension is applied automatically as per the argument image_extension. output_dir: Directory to output the images into. If not set, the output is created in the working directory. show_progress: If True, shows a progress bar if tqdm is installed. scale: Optional factor by which to rescale saved images. A scaling factor of 1 would not result in rescaling. A value < 1 results in a smaller saved image, while a value > 1 results in an image larger than the original. This value is ignored if either the height or width values are specified. height: Optional value for the height of the saved images. Specifying both the height and width will resize images to an exact size, regardless of aspect ratio. Specifying only height will rescale the image to that number of pixels in height while preserving the aspect ratio. width: Optional value for the width of the saved images. Specifying both the width and height will resize images to an exact size, regardless of aspect ratio. Specifying only width will rescale the image to that number of pixels wide while preserving the aspect ratio. interpolation: Type of interpolation to use when resizing images. video_manager: [DEPRECATED] DO NOT USE. For backwards compatibility only. Returns: Dictionary of the format { scene_num : [image_paths] }, where scene_num is the number of the scene in scene_list (starting from 1), and image_paths is a list of the paths to the newly saved/created images. Raises: ValueError: Raised if any arguments are invalid or out of range (e.g. if num_images is negative). """ # TODO(v0.7): Add DeprecationWarning that `video_manager` will be removed in v0.8. if video_manager is not None: logger.error('`video_manager` argument is deprecated, use `video` instead.') video = video_manager if not scene_list: return {} if num_images <= 0 or frame_margin < 0: raise ValueError() # TODO: Validate that encoder_param is within the proper range. # Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png. imwrite_param = [get_cv2_imwrite_params()[image_extension], encoder_param ] if encoder_param is not None else [] video.reset() # Setup flags and init progress bar if available. completed = True logger.info('Generating output images (%d per scene)...', num_images) progress_bar = None if show_progress: progress_bar = tqdm(total=len(scene_list) * num_images, unit='images', dynamic_ncols=True) filename_template = Template(image_name_template) scene_num_format = '%0' scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd' image_num_format = '%0' image_num_format += str(math.floor(math.log(num_images, 10)) + 2) + 'd' framerate = scene_list[0][0].framerate # TODO(v1.0): Split up into multiple sub-expressions so auto-formatter works correctly. timecode_list = [ [ FrameTimecode(int(f), fps=framerate) for f in [ # middle frames a[len(a) // 2] if (0 < j < num_images - 1) or num_images == 1 # first frame else min(a[0] + frame_margin, a[-1]) if j == 0 # last frame else max(a[-1] - frame_margin, a[0]) # for each evenly-split array of frames in the scene list for j, a in enumerate(np.array_split(r, num_images)) ] ] for i, r in enumerate([ # pad ranges to number of images r if 1 + r[-1] - r[0] >= num_images else list(r) + [r[-1]] * (num_images - len(r)) # create range of frames in scene for r in ( range( start.get_frames(), start.get_frames() + max( 1, # guard against zero length scenes end.get_frames() - start.get_frames())) # for each scene in scene list for start, end in scene_list) ]) ] image_filenames = {i: [] for i in range(len(timecode_list))} aspect_ratio = video.aspect_ratio if abs(aspect_ratio - 1.0) < 0.01: aspect_ratio = None logger.debug('Writing images with template %s', filename_template.template) for i, scene_timecodes in enumerate(timecode_list): for j, image_timecode in enumerate(scene_timecodes): video.seek(image_timecode) frame_im = video.read() if frame_im is not None: # TODO: Allow NUM to be a valid suffix in addition to NUMBER. file_path = '%s.%s' % (filename_template.safe_substitute( VIDEO_NAME=video.name, SCENE_NUMBER=scene_num_format % (i + 1), IMAGE_NUMBER=image_num_format % (j + 1), FRAME_NUMBER=image_timecode.get_frames()), image_extension) image_filenames[i].append(file_path) # TODO(0.6.3): Combine this resize with the ones below. if aspect_ratio is not None: frame_im = cv2.resize( frame_im, (0, 0), fx=aspect_ratio, fy=1.0, interpolation=interpolation.value) frame_height = frame_im.shape[0] frame_width = frame_im.shape[1] # Figure out what kind of resizing needs to be done if height or width: if height and not width: factor = height / float(frame_height) width = int(factor * frame_width) if width and not height: factor = width / float(frame_width) height = int(factor * frame_height) assert height > 0 and width > 0 frame_im = cv2.resize( frame_im, (width, height), interpolation=interpolation.value) elif scale: frame_im = cv2.resize( frame_im, (0, 0), fx=scale, fy=scale, interpolation=interpolation.value) cv2.imwrite(get_and_create_path(file_path, output_dir), frame_im, imwrite_param) else: completed = False break if progress_bar is not None: progress_bar.update(1) if progress_bar is not None: progress_bar.close() if not completed: logger.error('Could not generate all output images.') return image_filenames ## ## SceneManager Class Implementation ## class SceneManager: """The SceneManager facilitates detection of scenes (:meth:`detect_scenes`) on a video (:class:`VideoStream <scenedetect.video_stream.VideoStream>`) using a detector (:meth:`add_detector`). Video decoding is done in parallel in a background thread. """ def __init__( self, stats_manager: Optional[StatsManager] = None, ): """ Arguments: stats_manager: :class:`StatsManager` to bind to this `SceneManager`. Can be accessed via the `stats_manager` property of the resulting object to save to disk. """ self._cutting_list = [] self._event_list = [] self._detector_list = [] self._sparse_detector_list = [] # TODO(v1.0): This class should own a StatsManager instead of taking an optional one. # Expose a new `stats_manager` @property from the SceneManager, and either change the # `stats_manager` argument to to `store_stats: bool=False`, or lazy-init one. # TODO(v1.0): This class should own a VideoStream as well, instead of passing one # to the detect_scenes method. If concatenation is required, it can be implemented as # a generic VideoStream wrapper. self._stats_manager: Optional[StatsManager] = stats_manager # Position of video that was first passed to detect_scenes. self._start_pos: FrameTimecode = None # Position of video on the last frame processed by detect_scenes. self._last_pos: FrameTimecode = None self._base_timecode: Optional[FrameTimecode] = None self._downscale: int = 1 self._auto_downscale: bool = True # Interpolation method to use when downscaling. Defaults to linear interpolation # as a good balance between quality and performance. self._interpolation: Interpolation = Interpolation.LINEAR # Boolean indicating if we have only seen EventType.CUT events so far. self._only_cuts: bool = True # Set by decode thread when an exception occurs. self._exception_info = None self._stop = threading.Event() self._frame_buffer = [] self._frame_buffer_size = 0 @property def interpolation(self) -> Interpolation: """Interpolation method to use when downscaling frames. Must be one of cv2.INTER_*.""" return self._interpolation @interpolation.setter def interpolation(self, value: Interpolation): self._interpolation = value @property def stats_manager(self) -> Optional[StatsManager]: """Getter for the StatsManager associated with this SceneManager, if any.""" return self._stats_manager @property def downscale(self) -> int: """Factor to downscale each frame by. Will always be >= 1, where 1 indicates no scaling. Will be ignored if auto_downscale=True.""" return self._downscale @downscale.setter def downscale(self, value: int): """Set to 1 for no downscaling, 2 for 2x downscaling, 3 for 3x, etc...""" if value < 1: raise ValueError("Downscale factor must be a positive integer >= 1!") if self.auto_downscale: logger.warning("Downscale factor will be ignored because auto_downscale=True!") if value is not None and not isinstance(value, int): logger.warning("Downscale factor will be truncated to integer!") value = int(value) self._downscale = value @property def auto_downscale(self) -> bool: """If set to True, will automatically downscale based on video frame size. Overrides `downscale` if set.""" return self._auto_downscale @auto_downscale.setter def auto_downscale(self, value: bool): self._auto_downscale = value def add_detector(self, detector: SceneDetector) -> None: """Add/register a SceneDetector (e.g. ContentDetector, ThresholdDetector) to run when detect_scenes is called. The SceneManager owns the detector object, so a temporary may be passed. Arguments: detector (SceneDetector): Scene detector to add to the SceneManager. """ if self._stats_manager is None and detector.stats_manager_required(): # Make sure the lists are empty so that the detectors don't get # out of sync (require an explicit statsmanager instead) assert not self._detector_list and not self._sparse_detector_list self._stats_manager = StatsManager() detector.stats_manager = self._stats_manager if self._stats_manager is not None: try: self._stats_manager.register_metrics(detector.get_metrics())
except FrameMetricRegistered:
11
2023-10-25 02:50:01+00:00
24k
EulerSearch/embedding_studio
embedding_studio/embeddings/training/embeddings_finetuner.py
[ { "identifier": "QueryRetriever", "path": "embedding_studio/embeddings/data/clickstream/query_retriever.py", "snippet": "class QueryRetriever(object):\n \"\"\"As we can't exactly predict a schema of storing queries:\n 1. As text exceptly in clickstream service\n 2. As ID of a record with a text\n 3. As a path to an image\n\n We provide an ability to use any query item. So, a user can specify any.\n\n \"\"\"\n\n def setup(self, clickstream_sessions: List[ClickstreamSession]):\n pass\n\n def __call__(self, query: QueryItem):\n return query" }, { "identifier": "ClickstreamSession", "path": "embedding_studio/embeddings/data/clickstream/raw_session.py", "snippet": "class RawClickstreamSession(BaseModel):\n class Config:\n def __init__(self, **data):\n def __len__(self) -> int:\n def from_mongo(\n cls,\n session: SessionWithEvents,\n query_item_type: type,\n search_result_type: type,\n item_type: type,\n event_type: type,\n ) -> \"RawClickstreamSession\":\n def from_dict(\n cls,\n data: dict,\n query_item_type: type,\n search_result_type: type,\n item_type: type,\n event_type: type,\n ) -> \"RawClickstreamSession\":\n def get_session(self) -> ClickstreamSession:" }, { "identifier": "dummy_confidences", "path": "embedding_studio/embeddings/features/event_confidences.py", "snippet": "@torch.no_grad()\ndef dummy_confidences(\n ranks: FloatTensor, events: Tensor\n) -> Union[Tensor, FloatTensor]:\n \"\"\"Confidence = 1.0\n\n :param ranks: list of ranks from search results\n :param events: list of 0 if it's not an event, 1 if it's an event\n :return: list of confidences\n \"\"\"\n return torch.ones(len(events))" }, { "identifier": "COSINE_SIMILARITY", "path": "embedding_studio/embeddings/features/extractor.py", "snippet": "COSINE_SIMILARITY = torch.nn.CosineSimilarity(dim=1, eps=1e-6)" }, { "identifier": "FeaturesExtractor", "path": "embedding_studio/embeddings/features/extractor.py", "snippet": "class FeaturesExtractor(pl.LightningModule):\n def __init__(\n self,\n model: EmbeddingsModelInterface,\n ranker: Optional[\n Callable[[FloatTensor, FloatTensor], FloatTensor]\n ] = COSINE_SIMILARITY,\n is_similarity: Optional[bool] = True,\n not_irrelevant_only: Optional[bool] = True,\n negative_downsampling_factor: Optional[float] = 1.0,\n min_abs_difference_threshold: Optional[float] = 0.0,\n max_abs_difference_threshold: Optional[float] = 1.0,\n confidence_calculator: Optional[Callable] = dummy_confidences,\n exmaples_order: Optional[List[ExamplesType]] = None,\n ):\n \"\"\"Logic of extracting features:\n 1. Positive and negative examples ranks\n 2. Event confidences\n 3. Target: 1 if is_similarity else -1\n\n and pack it in SessionFeatures object.\n\n :param model: embedding model itself\n :param ranker: ranking function (query, items) -> ranks (defult: cosine similarity)\n :param is_similarity: is ranking function similarity like or distance (default: True)\n :param not_irrelevant_only: use only not irrelevant sessions (default: True)\n True - Triplet loss\n False - Contrastive-like loss\n :param negative_downsampling_factor: in real tasks amount of results is much larger than not-results,\n use this parameters to fix a balance (default: 0.5)\n :param min_abs_difference_threshold: filter out soft pairs abs(neg_dist - pos_dist) < small value\n :param max_abs_difference_threshold: filter out hard pairs abs(neg_dist - pos_dist) > huge value\n :param confidence_calculator: function to calculate results confidences (default: dummy_confidences)\n :param examples_order: order of passing examples to a trainer (default: None)\n \"\"\"\n super(FeaturesExtractor, self).__init__()\n # Check model type\n if not isinstance(model, EmbeddingsModelInterface):\n raise ValueError(\n \"Model must be an instance of EmbeddingsModelInterface.\"\n )\n self.model = model\n\n # Check ranker type and value\n if not callable(ranker):\n raise ValueError(\"Ranker must be a callable function.\")\n self.ranker = ranker\n\n # Check is_similarity type\n if not isinstance(is_similarity, bool):\n raise ValueError(\"is_similarity must be a boolean.\")\n self.is_similarity = is_similarity\n\n # Check not_irrelevant_only type\n if not isinstance(not_irrelevant_only, bool):\n raise ValueError(\"not_irrelevant_only must be a boolean.\")\n self.not_irrelevant_only = not_irrelevant_only\n\n # TODO: use pydantic models here\n if (\n not isinstance(negative_downsampling_factor, float)\n or negative_downsampling_factor < 0.0\n or negative_downsampling_factor >= 1\n ):\n raise ValueError(\n \"negative downsampling factor should be un range (0.0, 1.0)\"\n )\n self.negative_donwsampling_factor = negative_downsampling_factor\n\n if (\n not isinstance(min_abs_difference_threshold, float)\n or min_abs_difference_threshold < 0.0\n ):\n raise ValueError(\n \"min_abs_difference_threshold should be positive numeric\"\n )\n self.min_abs_difference_threshold = min_abs_difference_threshold\n if (\n not isinstance(max_abs_difference_threshold, float)\n or max_abs_difference_threshold <= 0.0\n ):\n raise ValueError(\n \"max_abs_difference_threshold should be positive numeric\"\n )\n self.max_abs_difference_threshold = max_abs_difference_threshold\n self.confidence_calculator = confidence_calculator\n\n if not exmaples_order:\n exmaples_order = [ExamplesType.all_examples]\n logger.debug(\"All types of examples will be used in training\")\n\n if len({isinstance(e, ExamplesType) for e in exmaples_order}) > 1:\n raise ValueError(\n \"Some of exmaple types are not instances of ExampleType\"\n )\n self.exmaples_order = (\n exmaples_order # TODO: use different examples order\n )\n\n def _confidences(\n self, session: ClickstreamSession, not_events: List[str]\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"Calculate confidences for a given clickstream session items.\n\n :param session: provided clickstream session\n :param not_events: not-results (negatives) used for ranks prediction\n :return: positive (results) confidences, negative (not-results) confidences\n \"\"\"\n only_used: List[bool] = [\n (id_ in session.events or id_ in not_events)\n for id_ in session.results\n ]\n only_used_ids: List[str] = [\n id_\n for id_ in session.results\n if (id_ in session.events or id_ in not_events)\n ]\n ranks: FloatTensor = FloatTensor(\n [session.ranks[i] for i in session.results]\n )\n bin_clicks: FloatTensor = FloatTensor(\n [(1 if i in session.events else 0) for i in session.results]\n )\n confidences: FloatTensor = self.confidence_calculator(\n ranks, bin_clicks\n )[only_used]\n\n # Sort confidences among positive and negative types\n positive_confidences: Tensor = torch.zeros(len(session.events))\n negative_confidences: Tensor = torch.zeros(len(not_events))\n\n for id_index, id_ in enumerate(only_used_ids):\n if id_ in session.events:\n positive_confidences[session.events.index(id_)] = confidences[\n id_index\n ]\n\n elif id_ in not_events:\n negative_confidences[not_events.index(id_)] = confidences[\n id_index\n ]\n\n return positive_confidences.to(self.device), negative_confidences.to(\n self.device\n )\n\n @torch.no_grad()\n def calculate_ranks(\n self,\n session: ClickstreamSession,\n dataset: ItemsStorage,\n query_retriever: QueryRetriever,\n ) -> Dict[str, float]:\n \"\"\"Calculate ranks for a single session\n\n :param session: given session\n :param dataset: items storage related to a given session\n :param query_retriever: object to get item related to query, that can be used in \"forward\"\n :return: provided session's results ranks\n \"\"\"\n query_vector: FloatTensor = self.model.forward_query(\n query_retriever(session.query)\n )\n items_vectors: FloatTensor = self.model.forward_items(\n dataset.items_by_ids(session.results)\n )\n ranks_: FloatTensor = self.ranker(query_vector, items_vectors)\n ranks = dict()\n for id_, rank in zip(session.results, ranks_.cpu().tolist()):\n ranks[id_] = rank\n\n return ranks\n\n def _get_session_features(\n self,\n session: ClickstreamSession,\n dataset: ItemsStorage,\n query_retriever: QueryRetriever,\n ) -> SessionFeatures:\n \"\"\"Calculate features for a single session\n\n :param session: given session\n :param dataset: items storage related to a given session\n :param query_retriever: object to get item related to query, that can be used in \"forward\"\n :return: provided session's features\n \"\"\"\n features = SessionFeatures()\n\n # For keep balance between results and not-results, we decrease a number of not-results\n not_events_count: int = int(\n self.negative_donwsampling_factor * len(session.not_events)\n )\n not_events_indexes: List[int] = random.choices(\n list(range(len(session.not_events))), k=not_events_count\n ) # we use indexes instead of ids to keep order\n not_events: List[str] = [\n session.not_events[i] for i in sorted(not_events_indexes)\n ]\n\n # And calculate confidences for two groups of items\n (\n features.positive_confidences,\n features.negative_confidences,\n ) = self._confidences(session, not_events)\n\n # Then we calculate query and items vectors\n query_vector: FloatTensor = self.model.forward_query(\n query_retriever(session.query)\n )\n items_vectors: FloatTensor = self.model.forward_items(\n dataset.items_by_ids(session.events + not_events)\n )\n\n positive_indexes: List[int] = [i for i in range(len(session.events))]\n negative_indexes: List[int] = [\n i + len(session.events) for i in range(len(not_events))\n ]\n\n # For each group we calculate ranks\n positive_ranks_: FloatTensor = self.ranker(\n query_vector, items_vectors[positive_indexes]\n )\n negative_ranks_: FloatTensor = self.ranker(\n query_vector, items_vectors[negative_indexes]\n )\n\n if len(positive_indexes) > 0:\n positive_idx = []\n negatie_idx = []\n for pos_i_ in positive_indexes:\n for neg_i_ in negative_indexes:\n pos_i = pos_i_\n neg_i = neg_i_ - len(session.events)\n positive_idx.append(pos_i)\n negatie_idx.append(neg_i)\n\n features.positive_ranks = positive_ranks_[positive_idx]\n features.negative_ranks = negative_ranks_[negatie_idx]\n\n features.positive_confidences = features.positive_confidences[\n positive_idx\n ]\n features.negative_confidences = features.negative_confidences[\n negatie_idx\n ]\n\n else:\n features.negative_distances = negative_ranks_\n\n target_value: int = 1 if self.is_similarity else -1\n features.target = target_value * torch.ones(\n features.negative_confidences.shape[0]\n ).to(self.device)\n\n # Filter out noises\n features.clamp_diff_in(\n self.min_abs_difference_threshold,\n self.max_abs_difference_threshold,\n )\n\n return features\n\n def _get_paired_sessions_features(\n self,\n not_irrelevant_session: ClickstreamSession,\n irrelevant_session: ClickstreamSession,\n dataset: ItemsStorage,\n query_retriever: QueryRetriever,\n ) -> SessionFeatures:\n \"\"\"Calculate features for a given pair: irrelevant and not irrelevant sessions\n\n :param not_irrelevant_session: not-irrelevant session\n :param irrelevant_session: irrelevant session\n :param dataset: storage of items related to clickstream sessions\n :param query_retriever: object to get item related to query, that can be used in \"forward\"\n :return: features related for both irrelevant and not irrelevant sessions\n \"\"\"\n not_irrelevant_features: SessionFeatures = self._get_session_features(\n not_irrelevant_session, dataset, query_retriever\n )\n irrelevant_features: SessionFeatures = self._get_session_features(\n irrelevant_session, dataset, query_retriever\n )\n\n irrelevant_features.use_positive_from(not_irrelevant_features)\n\n not_irrelevant_features += irrelevant_features\n\n return not_irrelevant_features\n\n def forward(\n self,\n batch: List[Tuple[ClickstreamSession, ClickstreamSession]],\n dataset: ItemsStorage,\n query_retriever: QueryRetriever,\n ) -> SessionFeatures:\n \"\"\"Calculate features for a given batch of pairs: irrelevant and not irrelevant sessions\n\n :param batch: list of pairs: irrelevant and not irrelevant sessions\n :param dataset: storage of items related to clickstream sessions\n :param query_retriever: object to get item related to query, that can be used in \"forward\"\n :return: session features related to a given batch\n \"\"\"\n features = SessionFeatures()\n\n for not_irrelevant_session, irrelevant_session in batch:\n if len(not_irrelevant_session.events) == 0:\n logger.warning(\"Not irrelevant session has no results\")\n continue\n\n if (\n irrelevant_session is not None and len(irrelevant_session) > 0\n ) and not self.not_irrelevant_only:\n features += self._get_paired_sessions_features(\n not_irrelevant_session,\n irrelevant_session,\n dataset,\n query_retriever,\n )\n\n else:\n features += self._get_session_features(\n not_irrelevant_session, dataset, query_retriever\n )\n\n return features" }, { "identifier": "SessionFeatures", "path": "embedding_studio/embeddings/features/session_features.py", "snippet": "class SessionFeatures:\n def __init__(\n self,\n positive_ranks: Optional[FloatTensor] = None,\n negative_ranks: Optional[FloatTensor] = None,\n target: Optional[Tensor] = None,\n positive_confidences: Optional[FloatTensor] = None,\n negative_confidences: Optional[FloatTensor] = None,\n ):\n \"\"\"Extracted features of clickstream session using embeddings.\n\n :param positive_ranks: ranks of positive results\n :param negative_ranks: ranks of negative results\n :param target: if target == 1 ranks are similarities, if target == -1 ranks are distances\n :param positive_confidences: confidences of positive results (like clicks)\n :param negative_confidences: confidences of not positive results\n \"\"\"\n\n self._positive_ranks = positive_ranks\n self._negative_ranks = negative_ranks\n self._target = target\n self._positive_confidences = positive_confidences\n self._negative_confidences = negative_confidences\n self._check_types()\n self._check_lengths()\n\n def _check_types(self):\n if self.positive_ranks is not None and not isinstance(\n self.positive_ranks, torch.Tensor\n ):\n raise TypeError(\"positive_ranks must be a torch.Tensor or None\")\n if self.negative_ranks is not None and not isinstance(\n self.negative_ranks, torch.Tensor\n ):\n raise TypeError(\"negative_ranks must be a torch.Tensor or None\")\n if self.target is not None and not isinstance(\n self.target, torch.Tensor\n ):\n raise TypeError(\"target must be a torch.Tensor or None\")\n if self.positive_confidences is not None and not isinstance(\n self.positive_confidences, torch.Tensor\n ):\n raise TypeError(\n \"positive_confidences must be a torch.Tensor or None\"\n )\n if self.negative_confidences is not None and not isinstance(\n self.negative_confidences, torch.Tensor\n ):\n raise TypeError(\n \"negative_confidences must be a torch.Tensor or None\"\n )\n\n def _check_lengths(self):\n length_set = {\n len(x)\n for x in [\n self.positive_ranks,\n self.negative_ranks,\n self.target,\n self.positive_confidences,\n self.negative_confidences,\n ]\n if x is not None\n }\n if len(length_set) > 1:\n raise ValueError(\n \"All non-None parameters must have the same length\"\n )\n\n @property\n def positive_ranks(self) -> Optional[FloatTensor]:\n return self._positive_ranks\n\n @positive_ranks.setter\n def positive_ranks(self, value: Optional[FloatTensor]):\n self._positive_ranks = value\n self._check_types()\n\n @property\n def negative_ranks(self) -> Optional[FloatTensor]:\n return self._negative_ranks\n\n @negative_ranks.setter\n def negative_ranks(self, value: Optional[FloatTensor]):\n self._negative_ranks = value\n self._check_types()\n\n @property\n def target(self) -> Optional[Tensor]:\n return self._target\n\n @target.setter\n def target(self, value: Optional[Tensor]):\n self._target = value\n self._check_types()\n\n @property\n def positive_confidences(self) -> Optional[FloatTensor]:\n return self._positive_confidences\n\n @positive_confidences.setter\n def positive_confidences(self, value: Optional[FloatTensor]):\n self._positive_confidences = value\n self._check_types()\n\n @property\n def negative_confidences(self) -> Optional[FloatTensor]:\n return self._negative_confidences\n\n @negative_confidences.setter\n def negative_confidences(self, value: Optional[FloatTensor]):\n self._negative_confidences = value\n self._check_types()\n\n def _accumulate(self_var: Tensor, other_var: Tensor):\n if self_var is not None and other_var is not None:\n return torch.cat([self_var, other_var])\n elif other_var is not None:\n return other_var\n\n def __iadd__(self, other):\n \"\"\"Accumulate features from another session\n\n :param other: other session\n :return: aggregates features\n \"\"\"\n\n self._positive_ranks = SessionFeatures._accumulate(\n self._positive_ranks, other._positive_ranks\n )\n self._negative_ranks = SessionFeatures._accumulate(\n self._negative_ranks, other._negative_ranks\n )\n self._target = SessionFeatures._accumulate(self._target, other._target)\n self._positive_confidences = SessionFeatures._accumulate(\n self._positive_confidences, other._positive_confidences\n )\n self._negative_confidences = SessionFeatures._accumulate(\n self._negative_confidences, other._negative_confidences\n )\n\n self._check_types()\n self._check_lengths()\n return self\n\n def clamp_diff_in(self, min: float, max: float):\n \"\"\"Filter min < |positive_ranks - negative_ranks| < max examples.\n\n :param min: minimal difference between pos and neg ranks\n :param max: maximal difference between pos and neg ranks\n \"\"\"\n if (\n self._positive_ranks is not None\n and self._negative_ranks is not None\n ):\n hard_examples: Tensor = (\n torch.abs(self._positive_ranks - self._negative_ranks) > min\n )\n smooth_examples: Tensor = (\n torch.abs(self._positive_ranks - self._negative_ranks) < max\n )\n examples: Tensor = torch.logical_and(\n hard_examples, smooth_examples\n )\n\n self._positive_ranks = self._positive_ranks[examples]\n self._negative_ranks = self._negative_ranks[examples]\n self._target = self._target[examples]\n self._positive_confidences = self._positive_confidences[examples]\n self._negative_confidences = self._negative_confidences[examples]\n self._check_lengths()\n\n def use_positive_from(self, other):\n \"\"\"If session is fully irrelevant, to use positive pairs from another session.\n This way \"triple loss\" becomes \"contrastive\"\n\n :param other: not irrelevant session with positive evidences\n \"\"\"\n other._check_types()\n other._check_lengths()\n\n if self._negative_ranks.shape[0] < other._positive_ranks.shape[0]:\n positive_ranks_: FloatTensor = other._positive_ranks[\n : self._negative_ranks.shape[0]\n ]\n elif self._negative_ranks.shape[0] > other._positive_ranks.shape[0]:\n self._negative_ranks = self._negative_ranks[\n : other._positive_ranks.shape[0]\n ]\n self._target = self._target[: other._positive_ranks.shape[0]]\n self._negative_confidences = self._negative_confidences[\n : other._positive_ranks.shape[0]\n ]\n positive_ranks_: FloatTensor = other._positive_ranks\n else:\n positive_ranks_: FloatTensor = other._positive_ranks\n\n self._positive_confidences = other._positive_ranks\n self._positive_ranks = positive_ranks_\n\n self._check_types()\n self._check_lengths()" }, { "identifier": "RankingLossInterface", "path": "embedding_studio/embeddings/losses/ranking_loss_interface.py", "snippet": "class RankingLossInterface(pl.LightningModule):\n @abstractmethod\n def __call__(self, features: SessionFeatures) -> FloatTensor:\n raise NotImplemented()" }, { "identifier": "DistanceShift", "path": "embedding_studio/embeddings/metrics/distance_shift.py", "snippet": "class DistanceShift(MetricCalculator):\n def _calc_dist_shift(\n self,\n session: ClickstreamSession,\n extractor: FeaturesExtractor,\n items_storage: ItemsStorage,\n query_retriever: QueryRetriever,\n ) -> float:\n # TODO: encapsulate inference in one class / object\n query_vector: FloatTensor = extractor.model.forward_query(\n query_retriever(session.query)\n )\n items_vectors: FloatTensor = extractor.model.forward_items(\n items_storage.items_by_ids(session.events + session.not_events)\n )\n\n ranks: FloatTensor = (\n extractor.ranker(query_vector, items_vectors).cpu().tolist()\n )\n\n # for similarity ranks should be higher for results of not irrelevant sessions,\n # for distances should be vice versa\n target: int = 1 if extractor.is_similarity else -1\n compare = lambda prev, new: target * float(new - prev)\n results: List[str] = session.events\n if session.is_irrelevant:\n results = session.results\n compare = lambda prev, new: target * float(prev - new)\n\n return float(\n np.mean(\n [\n compare(session.ranks[id_], new_rank)\n for id_, new_rank in zip(results, ranks)\n ]\n )\n )\n\n @torch.no_grad()\n def __call__(\n self,\n batch: List[Tuple[ClickstreamSession, ClickstreamSession]],\n extractor: FeaturesExtractor,\n items_storage: ItemsStorage,\n query_retriever: QueryRetriever,\n ) -> List[MetricValue]:\n \"\"\"Calculate metric, that means how ranks of provided sessions were changed .\n\n :param batch: batch of pairs clickstream sessions (not_irrelevant, irrelevant)\n :param extractor: object to extract SessionFeatures out of provided sessions\n :param items_storage: items dataset\n :param query_retriever: how to retrieve a value related to session query\n :return: list of calculated not_irrelevant_dist_shift and irrelevant_dist_shift metrics\n \"\"\"\n not_irrelevenat_shifts: List[float] = []\n irrelevenat_shifts: List[float] = []\n for index, (not_irrelevenat_session, irrelevant_session) in enumerate(\n batch\n ):\n if not_irrelevenat_session is not None:\n not_irrelevenat_shifts.append(\n self._calc_dist_shift(\n not_irrelevenat_session,\n extractor,\n items_storage,\n query_retriever,\n )\n )\n\n if irrelevant_session is not None:\n irrelevenat_shifts.append(\n self._calc_dist_shift(\n irrelevant_session,\n extractor,\n items_storage,\n query_retriever,\n )\n )\n\n return [\n MetricValue(\n \"not_irrelevant_dist_shift\",\n float(np.mean(not_irrelevenat_shifts))\n if len(not_irrelevenat_shifts) > 0\n else 0.0,\n ),\n MetricValue(\n \"irrelevant_dist_shift\",\n float(np.mean(irrelevenat_shifts))\n if len(irrelevenat_shifts) > 0\n else 0.0,\n ),\n ]" }, { "identifier": "MetricCalculator", "path": "embedding_studio/embeddings/metrics/metric.py", "snippet": "class MetricCalculator(ABC):\n \"\"\"Interface of metrics calculator\"\"\"\n\n @abstractmethod\n @torch.no_grad()\n def __call__(\n self,\n batch: List[Tuple[ClickstreamSession, ClickstreamSession]],\n extractor: FeaturesExtractor,\n items_storage: ItemsStorage,\n query_retriever: QueryRetriever,\n ) -> List[MetricValue]:\n \"\"\"Calculate abstract metric value over provided batch of items.\n\n :param batch: batch of pairs clickstream sessions (not_irrelevant, irrelevant)\n :param extractor: object to extract SessionFeatures out of provided sessions\n :param items_storage: items dataset\n :param query_retriever: how to retrieve a value related to session query\n :return: list of calculated metrics\n \"\"\"" }, { "identifier": "EmbeddingsModelInterface", "path": "embedding_studio/embeddings/models/interface.py", "snippet": "class EmbeddingsModelInterface(pl.LightningModule):\n def __init__(self, same_query_and_items: bool = False):\n \"\"\"In search we have two entities, which could be multi domain: query and search result (item).\n This is the interface we used in fine-tuning procedure.\n\n :param same_query_and_items: are query and items models acutally the same model (default: False)\n \"\"\"\n super(EmbeddingsModelInterface, self).__init__()\n self.same_query_and_items = same_query_and_items\n\n @abstractmethod\n def get_query_model_params(self) -> Iterator[Parameter]:\n pass\n\n @abstractmethod\n def get_items_model_params(self) -> Iterator[Parameter]:\n pass\n\n @abstractmethod\n def fix_query_model(self, num_fixed_layers: int):\n \"\"\"One of fine-tuning hyperparams is num of fixed layers at a query model\n\n :param num_fixed_layers: how many layers to fix\n \"\"\"\n\n @abstractmethod\n def unfix_query_model(self):\n \"\"\"Unfix all layers of a query model.\"\"\"\n\n @abstractmethod\n def fix_item_model(self, num_fixed_layers: int):\n \"\"\"One of fine-tuning hyperparams is num of fixed layers at an item model\n\n :param num_fixed_layers: how many layers to fix\n \"\"\"\n\n @abstractmethod\n def unfix_item_model(self):\n \"\"\"Unfix all layers of an item model.\"\"\"\n\n @abstractmethod\n def forward_query(self, query: Any) -> FloatTensor:\n pass\n\n @abstractmethod\n def forward_items(self, items: List[Any]) -> FloatTensor:\n pass" }, { "identifier": "ExperimentsManager", "path": "embedding_studio/workers/fine_tuning/experiments/experiments_tracker.py", "snippet": "class ExperimentsManager:\n def __init__(\n self,\n tracking_uri: str,\n main_metric: str,\n accumulators: List[MetricsAccumulator],\n is_loss: bool = False,\n n_top_runs: int = 10,\n requirements: Optional[str] = None,\n retry_config: Optional[RetryConfig] = None,\n ):\n \"\"\"Wrapper over mlflow package to manage certain fine-tuning experiments.\n\n :param tracking_uri: url of MLFlow server\n :param main_metric: name of main metric that will be used to find best model\n :param accumulators: accumulators of metrics to be logged\n :param is_loss: is main metric loss (if True, then best quality is minimal) (default: False)\n :param n_top_runs: how many hyper params group consider to be used in following tuning steps (default: 10)\n :param requirements: extra requirements to be passed to mlflow.pytorch.log_model (default: None)\n :param retry_config: retry policy (default: None)\n \"\"\"\n if not isinstance(tracking_uri, str) or len(tracking_uri) == 0:\n raise ValueError(\n f\"MLFlow tracking URI value should be a not empty string\"\n )\n mlflow.set_tracking_uri(tracking_uri)\n self._tracking_uri = tracking_uri\n if self._tracking_uri.endswith(\"/\"):\n self._tracking_uri = self._tracking_uri[:-1]\n\n self.retry_config = (\n retry_config\n if retry_config\n else ExperimentsManager._get_default_retry_config()\n )\n self.attempt_exception_types = [RestException]\n\n if not isinstance(main_metric, str) or len(main_metric) == 0:\n raise ValueError(f\"main_metric value should be a not empty string\")\n self.main_metric = main_metric\n self._metric_field = f\"metrics.{self.main_metric}\"\n\n self._n_top_runs = n_top_runs\n self._is_loss = is_loss\n\n if len(accumulators) == 0:\n logger.warning(\n \"No accumulators were provided, there will be no metrics logged except loss\"\n )\n self._accumulators = accumulators\n\n self._requirements: List[str] = (\n _get_base_requirements() if requirements is None else requirements\n )\n\n self._iteration_experiment = None\n self._tuning_iteration = None\n self._tuning_iteration_id = None\n\n self._run = None\n self._run_params = None\n self._run_id = None\n\n def _check_artifact_exists(self, run_id, artifact_path):\n client = mlflow.MlflowClient()\n artifacts = client.list_artifacts(run_id, path=artifact_path)\n return any(artifact.path == artifact_path for artifact in artifacts)\n\n @staticmethod\n def _get_default_retry_config() -> RetryConfig:\n default_retry_params = RetryParams(\n max_attempts=settings.DEFAULT_MAX_ATTEMPTS,\n wait_time_seconds=settings.DEFAULT_WAIT_TIME_SECONDS,\n )\n\n config = RetryConfig(default_params=default_retry_params)\n config[\"log_metric\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_METRIC_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_METRIC_WAIT_TIME_SECONDS,\n )\n config[\"log_param\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_PARAM_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_PARAM_WAIT_TIME_SECONDS,\n )\n config[\"log_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOG_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOG_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"load_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_LOAD_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_LOAD_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"delete_model\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_MODEL_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_MODEL_WAIT_TIME_SECONDS,\n )\n config[\"search_runs\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_RUNS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_RUNS_WAIT_TIME_SECONDS,\n )\n config[\"end_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_END_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_END_RUN_WAIT_TIME_SECONDS,\n )\n config[\"get_run\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_RUN_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_RUN_WAIT_TIME_SECONDS,\n )\n config[\"search_experiments\"] = RetryParams(\n max_attempts=settings.MLFLOW_SEARCH_EXPERIMENTS_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_SEARCH_EXPERIMENTS_WAIT_TIME_SECONDS,\n )\n config[\"delete_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_DELETE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_DELETE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"create_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_CREATE_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_CREATE_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n config[\"get_experiment\"] = RetryParams(\n max_attempts=settings.MLFLOW_GET_EXPERIMENT_ATTEMPTS,\n wait_time_seconds=settings.MLFLOW_GET_EXPERIMENT_WAIT_TIME_SECONDS,\n )\n\n return config\n\n @property\n def is_loss(self) -> bool:\n return self._is_loss\n\n def __del__(self):\n self.finish_run()\n self.finish_iteration()\n\n def is_retryable_error(self, e: Exception) -> bool:\n return False\n\n def _get_model_exists_filter(self) -> str:\n return \"metrics.model_uploaded = 1\"\n\n def _get_artifact_url(self, run_id: str, artifact_path: str) -> str:\n return (\n f\"{self._tracking_uri}/get-artifact?path=\"\n f'{urllib.parse.quote(artifact_path, safe=\"\")}&run_uuid={run_id}'\n )\n\n @retry_method(name=\"log_model\")\n def upload_initial_model(self, model: EmbeddingsModelInterface):\n \"\"\"Upload the very first, initial model to the mlflow server\n\n :param model: model to be uploaded\n \"\"\"\n self.finish_iteration()\n experiment_id = get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME)\n if experiment_id is None:\n logger.info(\n f\"Can't find any active iteration with name: {INITIAL_EXPERIMENT_NAME}\"\n )\n try:\n logger.info(\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n except MlflowException as e:\n if \"Cannot set a deleted experiment\" in str(e):\n logger.error(\n f\"Creation of initial experiment is failed: experiment with the same name {INITIAL_EXPERIMENT_NAME} is deleted, but not archived\"\n )\n experiments = mlflow.search_experiments(\n view_type=mlflow.entities.ViewType.ALL\n )\n deleted_experiment_id = None\n\n for exp in experiments:\n if exp.name == INITIAL_EXPERIMENT_NAME:\n deleted_experiment_id = exp.experiment_id\n break\n\n logger.info(\n f\"Restore deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().restore_experiment(\n deleted_experiment_id\n )\n logger.info(\n f\"Archive deleted experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n deleted_experiment_id,\n INITIAL_EXPERIMENT_NAME + \"_archive\",\n )\n logger.info(\n f\"Delete archived experiment with the same name: {INITIAL_EXPERIMENT_NAME}\"\n )\n mlflow.delete_experiment(deleted_experiment_id)\n logger.info(f\"Create initial experiment\")\n mlflow.create_experiment(INITIAL_EXPERIMENT_NAME)\n else:\n raise e\n\n with mlflow.start_run(\n experiment_id=get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n run_name=INITIAL_RUN_NAME,\n ) as run:\n logger.info(\n f\"Upload initial model to {INITIAL_EXPERIMENT_NAME} / {INITIAL_RUN_NAME}\"\n )\n if self._check_artifact_exists(\n get_run_id_by_name(\n get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME),\n INITIAL_RUN_NAME,\n ),\n \"model\",\n ):\n logger.info(\"Model is already uploaded\")\n return\n\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n logger.info(\"Uploading is finished\")\n\n @retry_method(name=\"load_model\")\n def download_initial_model(self) -> EmbeddingsModelInterface:\n \"\"\"Download initial model.\n\n :return: initial embeddings model\n \"\"\"\n model_uri: str = f\"runs:/{get_run_id_by_name(get_experiment_id_by_name(INITIAL_EXPERIMENT_NAME), INITIAL_RUN_NAME)}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_runs\")\n def get_top_params(self) -> Optional[List[FineTuningParams]]:\n \"\"\"Get top N previous fine-tuning iteration best params\n\n :return: fine-tuning iteration params\n \"\"\"\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id:\n logger.warning(\n \"Can't retrieve top params, no previous iteration in history\"\n )\n return None\n\n else:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[last_session_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and only finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"Can't retrieve top params, no previous iteration's finished runs with uploaded model in history\"\n )\n return None\n\n # Get the indices that would sort the DataFrame based on the specified parameter\n sorted_indices: np.ndarray = np.argsort(\n runs[self._metric_field].values\n )\n if not self.is_loss:\n sorted_indices = sorted_indices[\n ::-1\n ] # Use [::-1] to sort in descending order\n\n # Extract the top N rows based on the sorted indices\n top_n_rows: np.ndarray = runs.iloc[\n sorted_indices[: self._n_top_runs]\n ]\n\n # Define a mapping dictionary to remove the \"params.\" prefix\n column_mapping: Dict[str, str] = {\n col: col.replace(\"params.\", \"\") for col in top_n_rows.columns\n }\n\n # Rename the columns\n top_n_rows: np.ndarray = top_n_rows.rename(\n columns=column_mapping\n ).to_dict(orient=\"records\")\n\n return [FineTuningParams(**row) for row in top_n_rows]\n\n def _get_best_previous_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n last_session_id: Optional[str] = self.get_previous_iteration_id()\n if initial_id == last_session_id or last_session_id is None:\n return None, True\n else:\n run_id, _ = self._get_best_quality(last_session_id)\n return run_id, False\n\n def _get_best_current_run_id(self) -> Tuple[Optional[str], bool]:\n initial_id: Optional[str] = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n if (\n initial_id == self._tuning_iteration_id\n or self._tuning_iteration_id is None\n ):\n return None, True\n else:\n run_id, _ = self._get_best_quality(self._tuning_iteration_id)\n return run_id, False\n\n @retry_method(name=\"load_model\")\n def get_last_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, no previous iteration in history\"\n )\n return None\n else:\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no previous iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_current_model_url(self) -> Optional[str]:\n run_id, is_initial = self._get_best_current_run_id()\n if is_initial:\n logger.warning(\n \"Can't get the best model URL, current run is initial\"\n )\n return None\n\n if run_id is None:\n logger.warning(\n \"Can't get the best model URL, no iterations \"\n \"finished runs with uploaded model in history\"\n )\n return None\n path = MODEL_ARTIFACT_PATH\n return self._get_artifact_url(run_id, path)\n\n @retry_method(name=\"load_model\")\n def get_last_model(self) -> EmbeddingsModelInterface:\n \"\"\"Get previous iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n run_id, is_initial = self._get_best_previous_run_id()\n if is_initial:\n logger.warning(\n \"Download initial model, no previous iteration in history\"\n )\n return self.download_initial_model()\n\n else:\n if run_id is None:\n logger.warning(\n \"Download initial model, no previous iteration's \"\n \"finished runs with uploaded model in history\"\n )\n return self.download_initial_model()\n else:\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"load_model\")\n def get_current_model(self) -> Optional[EmbeddingsModelInterface]:\n \"\"\"Get current iteration best embedding model.\n\n :return: best embedding model\n \"\"\"\n if self._tuning_iteration is None:\n logger.error(\"No current iteration, can't get any model\")\n return\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n logger.info(\"Download initial model\")\n return self.download_initial_model()\n\n run_id, is_initial = self._get_best_current_run_id()\n model_uri: str = f\"runs:/{run_id}/model\"\n logger.info(f\"Download the model from {model_uri}\")\n model = mlflow.pytorch.load_model(model_uri)\n logger.info(\"Downloading is finished\")\n return model\n\n @retry_method(name=\"search_experiments\")\n def get_previous_iteration_id(self) -> Optional[str]:\n if (\n self._tuning_iteration == INITIAL_EXPERIMENT_NAME\n or self._tuning_iteration is None\n ):\n logger.warning(\n f\"Can't find previous iteration - no current iteration was setup\"\n )\n return None\n\n plugin_name = f\"{self._tuning_iteration.plugin_name}\"\n experiments: List[Experiment] = [\n e\n for e in mlflow.search_experiments()\n if (\n e.name.startswith(EXPERIMENT_PREFIX)\n and e.name.find(plugin_name) != -1\n and e.name != str(self._tuning_iteration)\n )\n ]\n if len(experiments) == 0:\n logger.warning(\"No iteration found\")\n return None\n else:\n return max(\n experiments, key=lambda exp: exp.creation_time\n ).experiment_id\n\n @retry_method(name=\"delete_experiment\")\n def delete_previous_iteration(self):\n experiment_id: Optional[str] = self.get_previous_iteration_id()\n\n logger.info(\"Delete models of previous iteration.\")\n runs = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"]\n run_ids = runs[\"run_id\"].tolist()\n\n for run_id in run_ids:\n self.delete_model(run_id, experiment_id)\n\n if experiment_id is not None:\n logger.info(\n f\"Iteration with ID {experiment_id} is going to be deleted\"\n )\n mlflow.tracking.MlflowClient().rename_experiment(\n experiment_id, INITIAL_EXPERIMENT_NAME + \"_archive\"\n )\n mlflow.delete_experiment(experiment_id)\n else:\n logger.warning(\n \"Can't delete a previous iteration, no previous iteration in history\"\n )\n\n @retry_method(name=\"create_experiment\")\n def set_iteration(self, iteration: FineTuningIteration):\n \"\"\"Start a new fine-tuning session.\n\n :param iteration: fine-tuning iteration info\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n self.finish_iteration()\n\n logger.info(\"Start a new fine-tuning iterations\")\n\n self._tuning_iteration = iteration\n self._tuning_iteration_id = get_experiment_id_by_name(str(iteration))\n if self._tuning_iteration_id is None:\n self._tuning_iteration_id = mlflow.create_experiment(\n str(iteration)\n )\n\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n @retry_method(name=\"start_run\")\n def set_run(self, params: FineTuningParams) -> bool:\n \"\"\"Start a new run with provided fine-tuning params\n\n :param params: provided fine-tuning params\n :return: True if it's a finished run (otherwise False)\n \"\"\"\n convert_value = (\n lambda value: \", \".join(map(str, value))\n if isinstance(value, list)\n else value\n )\n\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n # TODO: implement exception\n raise ValueError(\"You can't start run for initial iteration\")\n\n if self._run is not None:\n self.finish_run()\n\n logger.info(\n f\"Start a new run for iteration {self._tuning_iteration_id} with params:\\n\\t{str(params)}\"\n )\n\n self._run_params = params\n run_name: str = self._run_params.id\n self._run_id = get_run_id_by_name(self._tuning_iteration_id, run_name)\n\n self._run = mlflow.start_run(\n self._run_id, self._tuning_iteration_id, run_name\n )\n if self._run_id is None:\n self._run_id = self._run.info.run_id\n for key, value in dict(self._tuning_iteration).items():\n mlflow.log_param(key, convert_value(value))\n\n for key, value in dict(self._run_params).items():\n mlflow.log_param(key, convert_value(value))\n\n mlflow.log_metric(\"model_uploaded\", 0)\n\n return False\n else:\n return self._run.info.status == \"FINISHED\"\n\n @retry_method(name=\"search_runs\")\n def model_is_uploaded(self) -> bool:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs[\"run_id\"] == self._run_id]\n return runs.shape[0] > 0\n\n @retry_method(name=\"get_experiment\")\n def finish_iteration(self):\n logger.info(f\"Finish current iteration {self._tuning_iteration_id}\")\n self._tuning_iteration = INITIAL_EXPERIMENT_NAME\n self._tuning_iteration_id = get_experiment_id_by_name(\n INITIAL_EXPERIMENT_NAME\n )\n\n if self._tuning_iteration_id is None:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_name=INITIAL_EXPERIMENT_NAME\n )\n self._tuning_iteration_id = (\n self._iteration_experiment.experiment_id\n )\n else:\n self._iteration_experiment = mlflow.set_experiment(\n experiment_id=self._tuning_iteration_id\n )\n\n logger.info(f\"Current iteration is finished\")\n\n @retry_method(name=\"end_run\")\n def finish_run(self):\n logger.info(\n f\"Finish current run {self._tuning_iteration_id} / {self._run_id}\"\n )\n for accumulator in self._accumulators:\n accumulator.clear()\n\n mlflow.end_run()\n\n # Set params to default None\n self._run = None\n self._run_params = None\n self._run_id = None\n\n logger.info(f\"Current run is finished\")\n\n @retry_method(name=\"log_param\")\n def _set_model_as_deleted(self, run_id: str, experiment_id: str):\n with mlflow.start_run(\n run_id=run_id, experiment_id=experiment_id\n ) as run:\n mlflow.log_metric(\"model_deleted\", 1)\n mlflow.log_metric(\"model_uploaded\", 0)\n\n @retry_method(name=\"delete_model\")\n def _delete_model(self, run_id: str, experiment_id: str) -> bool:\n logger.warning(\n f\"Unable to delete a model for run {run_id}, MLFlow has no such functionality, please implement on your own.\"\n )\n return False\n\n @retry_method(name=\"get_run\")\n def delete_model(self, run_id: str, experiment_id: Optional[str] = None):\n experiment_id = (\n self._tuning_iteration_id\n if experiment_id is None\n else experiment_id\n )\n if experiment_id is None:\n raise ValueError(\n f\"No iteration was initialized, unable to delete model.\"\n )\n\n if experiment_id == INITIAL_EXPERIMENT_NAME:\n raise ValueError(f\"Initial model can't be deleted.\")\n\n run_info = None\n try:\n run_info = mlflow.get_run(run_id=run_id)\n except RestException as e:\n if e.get_http_status_code() == 404:\n logger.exception(f\"Run with ID {run_id} doesn't exist.\")\n else:\n raise e\n\n if run_info is not None:\n runs: pd.DataFrame = mlflow.search_runs(\n filter_string=self._get_model_exists_filter()\n )\n runs = runs[runs[\"run_id\"] == run_id]\n if runs.shape[0] == 0:\n logger.warning(\n f\"Run {run_id} has no model being uploaded. Nothing to delete\"\n )\n\n else:\n deleted = None\n try:\n deleted = self._delete_model(run_id, experiment_id)\n except MaxAttemptsReachedException:\n pass\n\n if deleted:\n self._set_model_as_deleted(run_id, experiment_id)\n\n @retry_method(name=\"log_model\")\n def save_model(\n self, model: EmbeddingsModelInterface, best_only: bool = True\n ):\n \"\"\"Save fine-tuned embedding model\n\n :param model: model to be saved\n :param best_only: save only if it's the best (default: True)\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"Can't save not initial model for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n logger.info(\n f\"Save model for {self._tuning_iteration_id} / {self._run_id}\"\n )\n if not best_only:\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n else:\n current_quality = self.get_quality()\n best_run_id, best_quality = self.get_best_quality()\n\n if best_run_id is None or (\n current_quality <= best_quality\n if self.is_loss\n else current_quality >= best_quality\n ):\n mlflow.pytorch.log_model(\n model, \"model\", pip_requirements=self._requirements\n )\n mlflow.log_metric(\"model_uploaded\", 1)\n logger.info(\"Upload is finished\")\n\n if best_run_id is not None:\n self.delete_model(best_run_id)\n else:\n logger.info(\"Not the best run - ignore saving\")\n\n @retry_method(name=\"log_metric\")\n def save_metric(self, metric_value: MetricValue):\n \"\"\"Accumulate and save metric value\n\n :param metric_value: value to be logged\n \"\"\"\n for accumulator in self._accumulators:\n for name, value in accumulator.accumulate(metric_value):\n mlflow.log_metric(name, value)\n\n @retry_method(name=\"search_runs\")\n def get_quality(self) -> float:\n \"\"\"Current run quality value\n\n :return: quality value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n if self._run_id is None:\n raise ValueError(\"There is no current Run\")\n\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[self._tuning_iteration_id]\n )\n quality: np.ndarray = runs[runs.run_id == self._run_id][\n self._metric_field\n ]\n return float(quality) if quality.shape[0] == 1 else float(quality[0])\n\n @retry_method(name=\"search_runs\")\n def _get_best_quality(\n self, experiment_id: str\n ) -> Tuple[Optional[str], float]:\n runs: pd.DataFrame = mlflow.search_runs(\n experiment_ids=[experiment_id],\n filter_string=self._get_model_exists_filter(),\n )\n runs = runs[runs.status == \"FINISHED\"] # and not finished ones\n if runs.shape[0] == 0:\n logger.warning(\n \"No finished experiments found with model uploaded, except initial\"\n )\n return None, 0.0\n\n else:\n value: float = (\n runs[self._metric_field].min()\n if self.is_loss\n else runs[self._metric_field].max()\n )\n best: pd.DataFrame = runs[runs[self._metric_field] == value][\n [\"run_id\", self._metric_field]\n ]\n return list(best.itertuples(index=False, name=None))[0]\n\n def get_best_quality(self) -> Tuple[str, float]:\n \"\"\"Get current fine-tuning iteration best quality\n\n :return: run_id and best metric value\n \"\"\"\n if self._tuning_iteration == INITIAL_EXPERIMENT_NAME:\n raise ValueError(\n f\"No metrics for {INITIAL_EXPERIMENT_NAME} experiment\"\n )\n\n return self._get_best_quality(self._tuning_iteration_id)" }, { "identifier": "FineTuningParams", "path": "embedding_studio/workers/fine_tuning/experiments/finetuning_params.py", "snippet": "class FineTuningParams(BaseModel):\n \"\"\"Params of fine-tuning procedure\n\n :param num_fixed_layers: number of fixed embeddings layers\n :param query_lr: learning rate of query model optimizer\n :param items_lr: learning rate of items model optimizer\n :param query_weight_decay: weight decay of query model optimizer\n :param items_weight_decay: weight decay of items model optimizer\n :param margin: margin from MarginRankingLoss\n :param not_irrelevant_only: use only not irrelevant sessions\n :param negative_downsampling: ratio of negative samples to be used\n :param min_abs_difference_threshold: filter out soft pairs abs(neg_dist - pos_dist) < small value (default: 0.0)\n :param max_abs_difference_threshold: filter out hard pairs abs(neg_dist - pos_dist) > huge value (default: 1.0)\n :param examples_order: order of passing examples to a trainer (default: None)\n \"\"\"\n\n num_fixed_layers: int\n query_lr: float\n items_lr: float\n query_weight_decay: float\n items_weight_decay: float\n margin: float\n not_irrelevant_only: bool\n negative_downsampling: float\n min_abs_difference_threshold: float = 0.0\n max_abs_difference_threshold: float = 1.0\n examples_order: List[ExamplesType] = [ExamplesType.all_examples]\n\n class Config:\n arbitrary_types_allowed = True\n\n @validator(\"examples_order\", pre=True, always=True)\n def validate_examples_order(cls, value):\n if isinstance(value, str):\n value = list(map(int, value.split(\",\")))\n elif isinstance(value, tuple):\n value = list(value)\n return [ExamplesType(v) for v in value]\n\n @validator(\"items_lr\", \"query_lr\", pre=True, always=True)\n def validate_positive_float(cls, value):\n if not (isinstance(value, float) and value > 0):\n raise ValueError(f\"{value} must be a positive float\")\n return value\n\n @validator(\n \"items_weight_decay\", \"query_weight_decay\", pre=True, always=True\n )\n def validate_non_negative_float(cls, value):\n if not (isinstance(value, float) and value >= 0):\n raise ValueError(f\"{value} must be a non-negative float\")\n return value\n\n @validator(\"margin\", pre=True, always=True)\n def validate_non_negative_float_margin(cls, value):\n if not (isinstance(value, float) and value >= 0):\n raise ValueError(f\"{value} must be a non-negative float\")\n return value\n\n @validator(\"num_fixed_layers\", pre=True, always=True)\n def validate_non_negative_int(cls, value):\n if not (isinstance(value, int) and value >= 0):\n raise ValueError(f\"{value} must be a non-negative integer\")\n return value\n\n @root_validator(skip_on_failure=True)\n def validate_example_order(cls, values):\n examples_order = values.get(\"examples_order\")\n if examples_order:\n if isinstance(examples_order, str):\n examples_order = list(map(int, examples_order.split(\",\")))\n elif isinstance(examples_order, tuple):\n examples_order = list(examples_order)\n values[\"examples_order\"] = [\n ExamplesType(v) for v in examples_order\n ]\n return values\n\n @property\n def id(self) -> str:\n # Convert the value to bytes (assuming it's a string)\n value_bytes: bytes = str(self).encode(\"utf-8\")\n\n # Create a hash object\n hash_object = hashlib.sha256()\n\n # Update the hash object with the value\n hash_object.update(value_bytes)\n\n # Get the hexadecimal representation of the hash\n unique_id: str = hash_object.hexdigest()\n\n return unique_id\n\n def __str__(self) -> str:\n vals: List[str] = []\n for key, value in sorted(dict(self).items()):\n value = (\n \",\".join(map(str, value)) if isinstance(value, list) else value\n )\n vals.append(f\"{key}: {value}\")\n\n return \" / \".join(vals)" }, { "identifier": "FineTuningSettings", "path": "embedding_studio/workers/fine_tuning/experiments/finetuning_settings.py", "snippet": "class FineTuningSettings(BaseModel):\n \"\"\"\n\n :param loss_func: loss object for a ranking task\n :param metric_calculators: list of trackable metrics calculators (default: None)\n by default only DistanceShift metric\n :param ranker: ranking function (query, items) -> ranks (defult: cosine similarity)\n :param is_similarity: is ranking function similarity like or distance (default: True)\n :param confidence_calculator: function to calculate results confidences (default: dummy_confidences)\n :param step_size: optimizer steps (default: 500)\n :param gamma: optimizers gamma (default: 0.9)\n :param num_epochs: num of training epochs (default: 10)\n :param batch_size: count of sessions in a batch (default: 1)\n :param test_each_n_sessions: frequency of validation, if value in range [0, 1] - used as ratio (default: -1)\n \"\"\"\n\n loss_func: RankingLossInterface\n metric_calculators: Optional[List[MetricCalculator]] = None\n ranker: Optional[\n Callable[[FloatTensor, FloatTensor], FloatTensor]\n ] = COSINE_SIMILARITY\n is_similarity: Optional[bool] = True\n confidence_calculator: Optional[Callable] = dummy_confidences\n step_size: Optional[int] = 500\n gamma: Optional[float] = 0.9\n num_epochs: Optional[int] = 10\n batch_size: Optional[int] = 1\n test_each_n_sessions: Optional[Union[float, int]] = -1\n\n class Config:\n arbitrary_types_allowed = True" }, { "identifier": "MetricValue", "path": "embedding_studio/workers/fine_tuning/experiments/metrics_accumulator.py", "snippet": "class MetricValue:\n def __init__(self, name: str, value: float):\n if not isinstance(name, str) or len(name) == 0:\n raise ValueError(\"MetricValue's name should not be empty\")\n self._name = name\n\n if not isinstance(value, float):\n raise ValueError(\"MetricValue's value should not be numeric\")\n self._value = value\n\n @property\n def name(self) -> str:\n return self._name\n\n @property\n def value(self) -> float:\n return self._value\n\n def add_prefix(self, prefix: str):\n self._name = f\"{prefix}_{self._name}\"\n return self" } ]
import logging import numpy as np import pytorch_lightning as pl import torch from collections import defaultdict from typing import Callable, List, Optional, Tuple, Union from datasets import DatasetDict from torch import FloatTensor, Tensor from torch.optim import SGD, Optimizer from torch.optim.lr_scheduler import LRScheduler, StepLR from embedding_studio.embeddings.data.clickstream.query_retriever import ( QueryRetriever, ) from embedding_studio.embeddings.data.clickstream.raw_session import ( ClickstreamSession, ) from embedding_studio.embeddings.features.event_confidences import ( dummy_confidences, ) from embedding_studio.embeddings.features.extractor import ( COSINE_SIMILARITY, FeaturesExtractor, ) from embedding_studio.embeddings.features.session_features import ( SessionFeatures, ) from embedding_studio.embeddings.losses.ranking_loss_interface import ( RankingLossInterface, ) from embedding_studio.embeddings.metrics.distance_shift import DistanceShift from embedding_studio.embeddings.metrics.metric import MetricCalculator from embedding_studio.embeddings.models.interface import ( EmbeddingsModelInterface, ) from embedding_studio.workers.fine_tuning.experiments.experiments_tracker import ( ExperimentsManager, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_params import ( FineTuningParams, ) from embedding_studio.workers.fine_tuning.experiments.finetuning_settings import ( FineTuningSettings, ) from embedding_studio.workers.fine_tuning.experiments.metrics_accumulator import ( MetricValue, )
15,373
logger = logging.getLogger(__name__) class EmbeddingsFineTuner(pl.LightningModule): def __init__( self, model: EmbeddingsModelInterface, items_storages: DatasetDict, query_retriever: QueryRetriever,
logger = logging.getLogger(__name__) class EmbeddingsFineTuner(pl.LightningModule): def __init__( self, model: EmbeddingsModelInterface, items_storages: DatasetDict, query_retriever: QueryRetriever,
loss_func: RankingLossInterface,
6
2023-10-31 00:33:13+00:00
24k
facebookresearch/minimax
src/minimax/runners/xp_runner.py
[ { "identifier": "EvalRunner", "path": "src/minimax/runners/eval_runner.py", "snippet": "class EvalRunner:\n def __init__(\n self,\n pop,\n env_names,\n env_kwargs=None,\n n_episodes=10,\n agent_idxs='*',\n render_mode=None):\n\n self.pop = pop\n\n if isinstance(agent_idxs, str):\n if \"*\" in agent_idxs:\n self.agent_idxs = np.arange(pop.n_agents)\n else:\n self.agent_idxs = \\\n np.array([int(x) for x in agent_idxs.split(',')])\n else:\n self.agent_idxs = agent_idxs # assume array\n\n assert np.max(self.agent_idxs) < pop.n_agents, \\\n 'Agent index is out of bounds.'\n\n if isinstance(env_names, str):\n env_names = [\n x.strip() for x in env_names.split(',')\n ]\n\n self.n_episodes = n_episodes\n env_infos = create_envs_for_kwargs(env_names, env_kwargs)\n env_names = []\n self.ext_env_names = []\n env_kwargs = []\n for (name, ext_name, kwargs) in env_infos:\n env_names.append(name)\n self.ext_env_names.append(ext_name)\n env_kwargs.append(kwargs)\n self.n_envs = len(env_names)\n\n self.benvs = []\n self.env_params = []\n self.env_has_solved_rate = []\n for env_name, kwargs in zip(env_names, env_kwargs):\n benv = envs.BatchEnv(\n env_name=env_name,\n n_parallel=n_episodes,\n n_eval=1,\n env_kwargs=kwargs,\n wrappers=['monitor_return', 'monitor_ep_metrics']\n )\n self.benvs.append(benv)\n self.env_params.append(benv.env.params)\n self.env_has_solved_rate.append(benv.env.eval_solved_rate is not None)\n\n self.action_dtype = self.benvs[0].env.action_space().dtype\n\n monitored_metrics = self.benvs[0].env.get_monitored_metrics()\n self.rolling_stats = RollingStats(names=monitored_metrics, window=1)\n self._update_ep_stats = jax.vmap(\n jax.vmap(\n self.rolling_stats.update_stats, in_axes=(0,0,0,None)),\n in_axes=(0,0,0,None))\n\n self.test_return_pre = 'test_return'\n self.test_solved_rate_pre = 'test_solved_rate'\n\n self.render_mode = render_mode\n if render_mode:\n from minimax.envs.viz.grid_viz import GridVisualizer\n self.viz = GridVisualizer()\n self.viz.show()\n\n if render_mode == 'ipython':\n from IPython import display\n self.ipython_display = display\n\n def load_checkpoint_state(self, runner_state, state):\n runner_state = list(runner_state)\n runner_state[1] = runner_state[1].load_state_dict(state[1])\n\n return tuple(runner_state)\n\n @partial(jax.jit, static_argnums=(0,2))\n def _get_transition(\n self,\n rng,\n benv,\n params,\n state,\n obs,\n carry,\n zero_carry,\n extra):\n value, pi_params, next_carry = self.pop.act(params, obs, carry)\n pi = self.pop.get_action_dist(pi_params, dtype=self.action_dtype)\n rng, subrng = jax.random.split(rng)\n action = pi.sample(seed=subrng)\n log_pi = pi.log_prob(action)\n\n rng, *vrngs = jax.random.split(rng, self.pop.n_agents+1)\n\n step_args = (jnp.array(vrngs), state, action, extra)\n (next_obs, \n next_state, \n reward, \n done, \n info, \n extra) = benv.step(*step_args)\n\n # Add transition to storage\n step = (obs, action, reward, done, log_pi, value)\n if carry is not None:\n step += (carry,)\n\n # Zero carry if needed\n if carry is not None:\n next_carry = jax.vmap(_tree_util.pytree_select)(\n done, zero_carry, next_carry)\n\n if self.render_mode:\n self.viz.render(\n benv.env.params, \n jax.tree_util.tree_map(lambda x: x[0][0], state))\n if self.render_mode == 'ipython':\n self.ipython_display.display(self.viz.window.fig)\n self.ipython_display.clear_output(wait=True)\n\n return next_state, next_obs, next_carry, done, info, extra\n\n @partial(jax.jit, static_argnums=(0, 2))\n def _rollout_benv(\n self, \n rng, \n benv,\n params,\n env_params,\n state,\n obs,\n carry,\n zero_carry,\n extra,\n ep_stats):\n\n def _scan_rollout(scan_carry, rng):\n (state, \n obs, \n carry,\n extra, \n ep_stats) = scan_carry\n \n step = \\\n self._get_transition(\n rng,\n benv,\n params, \n state, \n obs, \n carry, \n zero_carry,\n extra)\n\n (next_state, \n next_obs, \n next_carry, \n done, \n info, \n extra) = step\n\n ep_stats = self._update_ep_stats(ep_stats, done, info, 1)\n\n return (next_state, next_obs, next_carry, extra, ep_stats), None\n\n n_steps = benv.env.max_episode_steps()\n rngs = jax.random.split(rng, n_steps)\n (state, \n obs, \n carry, \n extra,\n ep_stats),_ = jax.lax.scan(\n _scan_rollout,\n (state, obs, carry, extra, ep_stats),\n rngs,\n length=n_steps)\n\n return ep_stats\n\n @partial(jax.jit, static_argnums=(0,))\n def run(self, rng, params):\n \"\"\"\n Rollout agents on each env. \n\n For each env, run n_eval episodes in parallel, \n where each is indexed to return in order.\n \"\"\"\n eval_stats = self.fake_run(rng, params)\n rng, *rollout_rngs = jax.random.split(rng, self.n_envs+1)\n for i, (benv, env_param) in enumerate(zip(self.benvs, self.env_params)):\n rng, *reset_rngs = jax.random.split(rng, self.pop.n_agents+1)\n obs, state, extra = benv.reset(jnp.array(reset_rngs))\n\n if self.pop.agent.is_recurrent:\n rng, subrng = jax.random.split(rng)\n zero_carry = self.pop.init_carry(subrng, obs)\n else:\n zero_carry = None\n\n # Reset episodic stats\n ep_stats = self.rolling_stats.reset_stats(\n batch_shape=(self.pop.n_agents, self.n_episodes))\n\n ep_stats = self._rollout_benv(\n rollout_rngs[i],\n benv,\n jax.lax.stop_gradient(params), \n env_param, \n state, \n obs,\n zero_carry,\n zero_carry,\n extra,\n ep_stats)\n \n env_name = self.ext_env_names[i]\n mean_return = ep_stats['return'].mean(1)\n\n if self.env_has_solved_rate[i]:\n mean_solved_rate = jax.vmap(jax.vmap(benv.env.eval_solved_rate))(ep_stats).mean(1)\n\n for idx in self.agent_idxs:\n eval_stats[f'eval/a{idx}:{self.test_return_pre}:{env_name}'] = mean_return[idx].squeeze()\n if self.env_has_solved_rate[i]:\n eval_stats[f'eval/a{idx}:{self.test_solved_rate_pre}:{env_name}'] = mean_solved_rate[idx].squeeze()\n\n return eval_stats\n\n def fake_run(self, rng, params):\n eval_stats = {}\n for i, env_name in enumerate(self.ext_env_names):\n for idx in self.agent_idxs:\n eval_stats.update({\n f'eval/a{idx}:{self.test_return_pre}:{env_name}':0.\n })\n if self.env_has_solved_rate[i]:\n eval_stats.update({\n f'eval/a{idx}:{self.test_solved_rate_pre}:{env_name}':0.,\n })\n\n return eval_stats" }, { "identifier": "DRRunner", "path": "src/minimax/runners/dr_runner.py", "snippet": "class DRRunner:\n\t\"\"\"\n\tOrchestrates rollouts across one or more students. \n\tThe main components at play:\n\t- AgentPop: Manages train state and batched inference logic \n\t\tfor a population of agents.\n\t- BatchEnv: Manages environment step and reset logic, using a \n\t\tpopulaton of agents.\n\t- RolloutStorage: Manages the storing and sampling of collected txns.\n\t- PPO: Handles PPO updates, which take a train state + batch of txns.\n\t\"\"\"\n\tdef __init__(\n\t\tself, \n\t\tenv_name,\n\t\tenv_kwargs,\n\t\tstudent_agents,\n\t\tn_students=1,\n\t\tn_parallel=1,\n\t\tn_eval=1,\n\t\tn_rollout_steps=256,\n\t\tlr=1e-4,\n\t\tlr_final=None,\n\t\tlr_anneal_steps=0,\n\t\tmax_grad_norm=0.5,\n\t\tdiscount=0.99,\n\t\tgae_lambda=0.95,\n\t\tadam_eps=1e-5,\n\t\tnormalize_return=False,\n\t\ttrack_env_metrics=False,\n\t\tn_unroll_rollout=1,\n\t\tn_devices=1,\n\t\trender=False):\n\n\t\tassert len(student_agents) == 1, 'Only one type of student supported.'\n\t\tassert n_parallel % n_devices == 0, 'Num envs must be divisible by num devices.'\n\n\t\tself.n_students = n_students\n\t\tself.n_parallel = n_parallel // n_devices\n\t\tself.n_eval = n_eval\n\t\tself.n_devices = n_devices\n\t\tself.step_batch_size = n_students*n_eval*n_parallel\n\t\tself.n_rollout_steps = n_rollout_steps\n\t\tself.n_updates = 0\n\t\tself.lr = lr\n\t\tself.lr_final = lr if lr_final is None else lr_final\n\t\tself.lr_anneal_steps = lr_anneal_steps\n\t\tself.max_grad_norm = max_grad_norm\n\t\tself.adam_eps = adam_eps\n\t\tself.normalize_return = normalize_return\n\t\tself.track_env_metrics = track_env_metrics\n\t\tself.n_unroll_rollout = n_unroll_rollout\n\t\tself.render = render\n\n\t\tself.student_pop = AgentPop(student_agents[0], n_agents=n_students)\n\n\t\tself.env, self.env_params = envs.make(\n\t\t\tenv_name, \n\t\t\tenv_kwargs=env_kwargs\n\t\t)\n\t\tself._action_shape = self.env.action_space().shape\n\n\t\tself.benv = envs.BatchEnv(\n\t\t\tenv_name=env_name,\n\t\t\tn_parallel=self.n_parallel,\n\t\t\tn_eval=self.n_eval,\n\t\t\tenv_kwargs=env_kwargs,\n\t\t\twrappers=['monitor_return', 'monitor_ep_metrics']\n\t\t)\n\t\tself.action_dtype = self.benv.env.action_space().dtype\n\n\t\tself.student_rollout = RolloutStorage(\n\t\t\tdiscount=discount,\n\t\t\tgae_lambda=gae_lambda,\n\t\t\tn_steps=n_rollout_steps,\n\t\t\tn_agents=n_students,\n\t\t\tn_envs=self.n_parallel,\n\t\t\tn_eval=self.n_eval,\n\t\t\taction_space=self.env.action_space(),\n\t\t\tobs_space=self.env.observation_space(),\n\t\t\tagent=self.student_pop.agent,\n\t\t)\n\n\t\tmonitored_metrics = self.benv.env.get_monitored_metrics()\n\t\tself.rolling_stats = RollingStats(\n\t\t\tnames=monitored_metrics,\n\t\t\twindow=10,\n\t\t)\n\t\tself._update_ep_stats = jax.vmap(jax.vmap(self.rolling_stats.update_stats))\n\n\t\tif self.render:\n\t\t\tfrom envs.viz.grid_viz import GridVisualizer\n\t\t\tself.viz = GridVisualizer()\n\t\t\tself.viz.show()\n\n\tdef reset(self, rng):\n\t\tself.n_updates = 0\n\n\t\tn_parallel = self.n_parallel*self.n_devices\n\n\t\trngs, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs), n_parallel=n_parallel)\n\t\tdummy_obs = jax.tree_util.tree_map(lambda x: x[0], obs) # for one agent only\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tif self.student_pop.agent.is_recurrent:\n\t\t\tcarry = self.student_pop.init_carry(subrng, obs)\n\t\t\tself.zero_carry = jax.tree_map(lambda x: x.at[:,:self.n_parallel].get(), carry)\n\t\telse:\n\t\t\tcarry = None\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tparams = self.student_pop.init_params(subrng, dummy_obs)\n\n\t\tschedule_fn = optax.linear_schedule(\n\t\t\tinit_value=-float(self.lr),\n\t\t\tend_value=-float(self.lr_final),\n\t\t\ttransition_steps=self.lr_anneal_steps,\n\t\t)\n\n\t\ttx = optax.chain(\n\t\t\toptax.clip_by_global_norm(self.max_grad_norm),\n\t\t\toptax.adam(learning_rate=float(self.lr), eps=self.adam_eps)\n\t\t)\n\n\t\ttrain_state = VmapTrainState.create(\n\t\t\tapply_fn=self.student_pop.agent.evaluate,\n\t\t\tparams=params,\n\t\t\ttx=tx\n\t\t)\n\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=(self.n_students, n_parallel*self.n_eval))\n\n\t\tstart_state = state\n\n\t\treturn (\n\t\t\trng, \n\t\t\ttrain_state, \n\t\t\tstate,\n\t\t\tstart_state, # Used to track metrics from starting state\n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats\n\t\t)\n\n\tdef get_checkpoint_state(self, state):\n\t\t_state = list(state)\n\t\t_state[1] = state[1].state_dict\n\n\t\treturn _state\n\n\tdef load_checkpoint_state(self, runner_state, state):\n\t\trunner_state = list(runner_state)\n\t\trunner_state[1] = runner_state[1].load_state_dict(state[1])\n\n\t\treturn tuple(runner_state)\n\n\t@partial(jax.jit, static_argnums=(0,2))\n\tdef _get_transition(\n\t\tself, \n\t\trng, \n\t\tpop, \n\t\tparams, \n\t\trollout, \n\t\tstate, \n\t\tstart_state, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\textra=None):\n\t\t# Sample action\n\t\tvalue, pi_params, next_carry = pop.act(params, obs, carry, done)\n\n\t\tpi = pop.get_action_dist(pi_params, dtype=self.action_dtype)\n\t\trng, subrng = jax.random.split(rng)\n\t\taction = pi.sample(seed=subrng)\n\t\tlog_pi = pi.log_prob(action)\n\n\t\trng, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\t(next_obs, \n\t\t next_state, \n\t\t reward, \n\t\t done, \n\t\t info, \n\t\t extra) = self.benv.step(jnp.array(vrngs), state, action, extra)\n\n\t\tnext_start_state = jax.vmap(_tree_util.pytree_select)(\n\t\t\tdone, next_state, start_state\n\t\t)\n\n\t\t# Add transition to storage\n\t\tstep = (obs, action, reward, done, log_pi, value)\n\t\tif carry is not None:\n\t\t\tstep += (carry,)\n\n\t\trollout = self.student_rollout.append(rollout, *step)\n\n\t\tif self.render:\n\t\t\tself.viz.render(\n\t\t\t\tself.benv.env.params, \n\t\t\t\tjax.tree_util.tree_map(lambda x: x[0][0], state))\n\n\t\treturn (\n\t\t\trollout, \n\t\t\tnext_state,\n\t\t\tnext_start_state, \n\t\t\tnext_obs, \n\t\t\tnext_carry, \n\t\t\tdone, \n\t\t\tinfo, \n\t\t\textra\n\t\t)\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _rollout_students(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate, \n\t\tstart_state, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\textra=None, \n\t\tep_stats=None):\n\t\trollout = self.student_rollout.reset()\n\n\t\trngs = jax.random.split(rng, self.n_rollout_steps)\n\n\t\tdef _scan_rollout(scan_carry, rng):\n\t\t\trollout, state, start_state, obs, carry, done, extra, ep_stats, train_state = scan_carry \n\n\t\t\tnext_scan_carry = \\\n\t\t\t\tself._get_transition(\n\t\t\t\t\trng, \n\t\t\t\t\tself.student_pop, \n\t\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\t\trollout, \n\t\t\t\t\tstate,\n\t\t\t\t\tstart_state, \n\t\t\t\t\tobs, \n\t\t\t\t\tcarry,\n\t\t\t\t\tdone, \n\t\t\t\t\textra)\n\t\t\t(rollout, \n\t\t\t next_state,\n\t\t\t next_start_state, \n\t\t\t next_obs, \n\t\t\t next_carry, \n\t\t\t done, \n\t\t\t info, \n\t\t\t extra) = next_scan_carry\n\n\t\t\tep_stats = self._update_ep_stats(ep_stats, done, info)\n\n\t\t\treturn (\n\t\t\t\trollout, \n\t\t\t\tnext_state,\n\t\t\t\tnext_start_state,\n\t\t\t\tnext_obs, \n\t\t\t\tnext_carry,\n\t\t\t\tdone,\n\t\t\t\textra, \n\t\t\t\tep_stats,\n\t\t\t\ttrain_state), None\n\n\t\t(rollout, \n\t\t state, \n\t\t start_state, \n\t\t obs, \n\t\t carry, \n\t\t done,\n\t\t extra, \n\t\t ep_stats,\n\t\t train_state), _ = jax.lax.scan(\n\t\t\t_scan_rollout,\n\t\t\t(rollout, \n\t\t\t state, \n\t\t\t start_state,\n\t\t\t obs, \n\t\t\t carry, \n\t\t\t done,\n\t\t\t extra, \n\t\t\t ep_stats,\n\t\t\t train_state),\n\t\t\trngs,\n\t\t\tlength=self.n_rollout_steps,\n\t\t)\n\n\t\treturn rollout, state, start_state, obs, carry, extra, ep_stats, train_state\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _compile_stats(self, update_stats, ep_stats, env_metrics=None):\n\t\tstats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(\n\t\t\t{k:ep_stats[k] for k in self.rolling_stats.names}\n\t\t)\n\t\tstats.update(update_stats)\n\n\t\tif self.n_students > 1:\n\t\t\t_stats = {}\n\t\t\tfor i in range(self.n_students):\n\t\t\t\t_student_stats = jax.tree_util.tree_map(lambda x: x[i], stats) # for agent0\n\t\t\t\t_stats.update({f'a{i}/{k}':v for k,v in _student_stats.items()})\n\t\t\tstats = _stats\n\n\t\tif self.track_env_metrics:\n\t\t\tmean_env_metrics = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(env_metrics)\n\t\t\tmean_env_metrics = {f'env/{k}':v for k,v in mean_env_metrics.items()}\n\n\t\t\tif self.n_students > 1:\n\t\t\t\t_env_metrics = {}\n\t\t\t\tfor i in range(self.n_students):\n\t\t\t\t\t_student_env_metrics = jax.tree_util.tree_map(lambda x: x[i], mean_env_metrics) # for agent0\n\t\t\t\t\t_env_metrics.update({f'{k}_a{i}':v for k,v in _student_env_metrics.items()})\n\t\t\t\tmean_env_metrics = _env_metrics\n\n\t\t\tstats.update(mean_env_metrics)\n\n\t\tif self.n_students == 1:\n\t\t\tstats = jax.tree_map(lambda x: x[0], stats)\n\n\t\tif self.n_devices > 1:\n\t\t\tstats = jax.tree_map(lambda x: jax.lax.pmean(x, 'device'), stats)\n\n\t\treturn stats\n\n\tdef get_shmap_spec(self):\n\t\trunner_state_size = len(inspect.signature(self.run).parameters)\n\t\tin_spec = [P(None,'device'),]*(runner_state_size)\n\t\tout_spec = [P(None,'device'),]*(runner_state_size)\n\n\t\tin_spec[:2] = [P(None),]*2\n\t\tin_spec = tuple(in_spec)\n\t\tout_spec = (P(None),) + in_spec\n\n\t\treturn in_spec, out_spec\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef run(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate, \n\t\tstart_state,\n\t\tobs, \n\t\tcarry=None, \n\t\textra=None, \n\t\tep_stats=None):\n\t\t\"\"\"\n\t\tPerform one update step: rollout all students and teachers + update with PPO\n\t\t\"\"\"\n\t\tif self.n_devices > 1:\n\t\t\trng = jax.random.fold_in(rng, jax.lax.axis_index('device'))\n\n\t\trng, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\trollout_batch_shape = (self.n_students, self.n_parallel*self.n_eval)\n\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs))\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=rollout_batch_shape)\n\n\t\trollout_start_state = state\n\n\t\tdone = jnp.zeros(rollout_batch_shape, dtype=jnp.bool_)\n\t\trng, subrng = jax.random.split(rng)\n\t\trollout, state, start_state, obs, carry, extra, ep_stats, train_state = \\\n\t\t\tself._rollout_students(\n\t\t\t\tsubrng, \n\t\t\t\ttrain_state, \n\t\t\t\tstate, \n\t\t\t\tstart_state,\n\t\t\t\tobs, \n\t\t\t\tcarry, \n\t\t\t\tdone,\n\t\t\t\textra, \n\t\t\t\tep_stats\n\t\t\t)\n\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tself.student_pop.get_value(\n\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\tobs, \n\t\t\t\tcarry,\n\t\t\t)\n\t\t)\n\n\t\t# PPOAgent vmaps over the train state and batch. Batch must be N x EM\n\t\trng, subrng = jax.random.split(rng)\n\t\ttrain_state, update_stats = self.student_pop.update(subrng, train_state, train_batch)\n\n\t\t# Collect env metrics\n\t\tif self.track_env_metrics:\n\t\t\tenv_metrics = self.benv.get_env_metrics(rollout_start_state)\n\t\telse:\n\t\t\tenv_metrics = None\n\n\t\tstats = self._compile_stats(update_stats, ep_stats, env_metrics)\n\t\tstats.update(dict(n_updates=train_state.n_updates[0]))\n\n\t\ttrain_state = train_state.increment()\n\t\tself.n_updates += 1\n\n\t\treturn (\n\t\t\tstats, \n\t\t\trng, \n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstart_state, \n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats\n\t\t)" }, { "identifier": "PAIREDRunner", "path": "src/minimax/runners/paired_runner.py", "snippet": "class PAIREDRunner:\n\t\"\"\"\n\tOrchestrates rollouts across one or more students and teachers. \n\tThe main components at play:\n\t- AgentPop: Manages train state and batched inference logic \n\t\tfor a population of agents.\n\t- BatchUEDEnv: Manages environment step and reset logic for a \n\t\tpopulation of agents batched over a pair of student and \n\t\tteacher MDPs.\n\t- RolloutStorage: Manages the storing and sampling of collected txns.\n\t- PPO: Handles PPO updates, which take a train state + batch of txns.\n\t\"\"\"\n\tdef __init__(\n\t\tself, \n\t\tenv_name,\n\t\tenv_kwargs,\n\t\tued_env_kwargs,\n\t\tstudent_agents,\n\t\tn_students=2,\n\t\tn_parallel=1,\n\t\tn_eval=1,\n\t\tn_rollout_steps=250,\n\t\tlr=1e-4,\n\t\tlr_final=None,\n\t\tlr_anneal_steps=0,\n\t\tmax_grad_norm=0.5,\n\t\tdiscount=0.99,\n\t\tgae_lambda=0.95,\n\t\tadam_eps=1e-5,\n\t\tteacher_lr=None,\n\t\tteacher_lr_final=None,\n\t\tteacher_lr_anneal_steps=None,\n\t\tteacher_discount=0.99,\n\t\tteacher_gae_lambda=0.95,\n\t\tteacher_agents=None,\n\t\tued_score='relative_regret',\n\t\ttrack_env_metrics=False,\n\t\tn_unroll_rollout=1,\n\t\trender=False,\n\t\tn_devices=1):\n\t\tassert n_parallel % n_devices == 0, 'Num envs must be divisible by num devices.'\n\n\t\tued_score = UEDScore[ued_score.upper()]\n\n\t\tassert len(student_agents) == 1, \\\n\t\t\t'Only one type of student supported.'\n\t\tassert not (n_students > 2 and ued_score in [UEDScore.RELATIVE_REGRET, UEDScore.MEAN_RELATIVE_REGRET]), \\\n\t\t\t'Standard PAIRED uses only 2 students.'\n\t\tassert teacher_agents is None or len(teacher_agents) == 1, \\\n\t\t\t'Only one type of teacher supported.'\n\n\t\tself.n_students = n_students\n\t\tself.n_parallel = n_parallel // n_devices\n\t\tself.n_eval = n_eval\n\t\tself.n_devices = n_devices\n\t\tself.step_batch_size = n_students*n_eval*n_parallel\n\t\tself.n_rollout_steps = n_rollout_steps\n\t\tself.n_updates = 0\n\t\tself.lr = lr\n\t\tself.lr_final = lr if lr_final is None else lr_final\n\t\tself.lr_anneal_steps = lr_anneal_steps\n\t\tself.teacher_lr = \\\n\t\t\tlr if teacher_lr is None else lr\n\t\tself.teacher_lr_final = \\\n\t\t\tself.lr_final if teacher_lr_final is None else teacher_lr_final\n\t\tself.teacher_lr_anneal_steps = \\\n\t\t\tlr_anneal_steps if teacher_lr_anneal_steps is None else teacher_lr_anneal_steps\n\t\tself.max_grad_norm = max_grad_norm\n\t\tself.adam_eps = adam_eps\n\t\tself.ued_score = ued_score\n\t\tself.track_env_metrics = track_env_metrics\n\n\t\tself.n_unroll_rollout = n_unroll_rollout\n\t\tself.render = render\n\n\t\tself.student_pop = AgentPop(student_agents[0], n_agents=n_students)\n\n\t\tif teacher_agents is not None:\n\t\t\tself.teacher_pop = AgentPop(teacher_agents[0], n_agents=1)\n\n\t\t# This ensures correct partial-episodic bootstrapping by avoiding\n\t\t# any termination purely due to timeouts.\n\t\t# env_kwargs.max_episode_steps = self.n_rollout_steps + 1\n\t\tself.benv = envs.BatchUEDEnv(\n\t\t\tenv_name=env_name,\n\t\t\tn_parallel=self.n_parallel,\n\t\t\tn_eval=n_eval,\n\t\t\tenv_kwargs=env_kwargs,\n\t\t\tued_env_kwargs=ued_env_kwargs,\n\t\t\twrappers=['monitor_return', 'monitor_ep_metrics'],\n\t\t\tued_wrappers=[]\n\t\t)\n\t\tself.teacher_n_rollout_steps = \\\n\t\t\tself.benv.env.ued_max_episode_steps()\n\n\t\tself.student_rollout = RolloutStorage(\n\t\t\tdiscount=discount,\n\t\t\tgae_lambda=gae_lambda,\n\t\t\tn_steps=n_rollout_steps,\n\t\t\tn_agents=n_students,\n\t\t\tn_envs=self.n_parallel,\n\t\t\tn_eval=self.n_eval,\n\t\t\taction_space=self.benv.env.action_space(),\n\t\t\tobs_space=self.benv.env.observation_space(),\n\t\t\tagent=self.student_pop.agent\n\t\t)\n\n\t\tself.teacher_rollout = RolloutStorage(\n\t\t\tdiscount=teacher_discount,\n\t\t\tgae_lambda=teacher_gae_lambda,\n\t\t\tn_steps=self.teacher_n_rollout_steps,\n\t\t\tn_agents=1,\n\t\t\tn_envs=self.n_parallel,\n\t\t\tn_eval=1,\n\t\t\taction_space=self.benv.env.ued_action_space(),\n\t\t\tobs_space=self.benv.env.ued_observation_space(),\n\t\t\tagent=self.teacher_pop.agent,\n\t\t)\n\n\t\tued_monitored_metrics = ('return',)\n\t\tself.ued_rolling_stats = RollingStats(\t\n\t\t\tnames=ued_monitored_metrics,\n\t\t\twindow=10,\n\t\t)\n\t\t\n\t\tmonitored_metrics = self.benv.env.get_monitored_metrics()\n\t\tself.rolling_stats = RollingStats(\n\t\t\tnames=monitored_metrics,\n\t\t\twindow=10,\n\t\t)\n\n\t\tself._update_ep_stats = jax.vmap(jax.vmap(self.rolling_stats.update_stats))\n\t\tself._update_ued_ep_stats = jax.vmap(jax.vmap(self.ued_rolling_stats.update_stats))\n\n\t\tif self.render:\n\t\t\tfrom envs.viz.grid_viz import GridVisualizer\n\t\t\tself.viz = GridVisualizer()\n\t\t\tself.viz.show()\n\n\tdef reset(self, rng):\n\t\tself.n_updates = 0\n\n\t\tn_parallel = self.n_parallel*self.n_devices\n\n\t\trng, student_rng, teacher_rng = jax.random.split(rng,3)\n\t\tstudent_info = self._reset_pop(\n\t\t\t\tstudent_rng, \n\t\t\t\tself.student_pop, \n\t\t\t\tpartial(self.benv.reset, sub_batch_size=n_parallel*self.n_eval),\n\t\t\t\tn_parallel_ep=n_parallel*self.n_eval,\n\t\t\t\tlr_init=self.lr,\n\t\t\t\tlr_final=self.lr_final,\n\t\t\t\tlr_anneal_steps=self.lr_anneal_steps)\n\n\t\tteacher_info = self._reset_pop(\n\t\t\tteacher_rng, \n\t\t\tself.teacher_pop, \n\t\t\tpartial(self.benv.reset_teacher, n_parallel=n_parallel),\n\t\t\tn_parallel_ep=n_parallel,\n\t\t\tlr_init=self.teacher_lr,\n\t\t\tlr_final=self.teacher_lr_final,\n\t\t\tlr_anneal_steps=self.teacher_lr_anneal_steps)\n\n\t\treturn (\n\t\t\trng,\n\t\t\t*student_info,\n\t\t\t*teacher_info\n\t\t)\n\n\tdef _reset_pop(\n\t\tself, \n\t\trng, \n\t\tpop, \n\t\tenv_reset_fn, \n\t\tn_parallel_ep=1,\n\t\tlr_init=3e-4,\n\t\tlr_final=3e-4,\n\t\tlr_anneal_steps=0):\n\t\trng, *vrngs = jax.random.split(rng, pop.n_agents+1)\n\t\treset_out = env_reset_fn(jnp.array(vrngs))\n\t\tif len(reset_out) == 2:\n\t\t\tobs, state = reset_out\n\t\telse:\n\t\t\tobs, state, extra = reset_out\n\t\tdummy_obs = jax.tree_util.tree_map(lambda x: x[0], obs) # for one agent only\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tif pop.agent.is_recurrent:\n\t\t\tcarry = pop.init_carry(subrng, obs)\n\t\telse:\n\t\t\tcarry = None\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tparams = pop.init_params(subrng, dummy_obs)\n\n\t\tschedule_fn = optax.linear_schedule(\n\t\t\tinit_value=-float(lr_init),\n\t\t\tend_value=-float(lr_final),\n\t\t\ttransition_steps=lr_anneal_steps,\n\t\t)\n\n\t\ttx = optax.chain(\n\t\t\toptax.clip_by_global_norm(self.max_grad_norm),\n\t\t\toptax.scale_by_adam(eps=self.adam_eps),\n\t\t\toptax.scale_by_schedule(schedule_fn),\n\t\t)\n\n\t\ttrain_state = VmapTrainState.create(\n\t\t\tapply_fn=pop.agent.evaluate,\n\t\t\tparams=params,\n\t\t\ttx=tx\n\t\t)\n\t\t\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=(pop.n_agents,n_parallel_ep))\n\n\t\treturn train_state, state, obs, carry, ep_stats\n\n\tdef get_checkpoint_state(self, state):\n\t\t_state = list(state)\n\t\t_state[1] = state[1].state_dict\n\t\t_state[6] = state[6].state_dict\n\n\t\treturn _state\n\n\tdef load_checkpoint_state(self, runner_state, state):\n\t\trunner_state = list(runner_state)\n\t\trunner_state[1] = runner_state[1].load_state_dict(state[1])\n\t\trunner_state[6] = runner_state[6].load_state_dict(state[6])\n\n\t\treturn tuple(runner_state)\n\n\t@partial(jax.jit, static_argnums=(0,2,3))\n\tdef _get_transition(\n\t\tself,\n\t\trng, \n\t\tpop, \n\t\trollout_mgr, \n\t\trollout, \n\t\tparams, \n\t\tstate, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\treset_state=None,\n\t\textra=None):\n\t\t# Sample action\n\t\tvalue, pi_params, next_carry = pop.act(params, obs, carry, done)\n\t\tpi = pop.get_action_dist(pi_params)\n\t\trng, subrng = jax.random.split(rng)\n\t\taction = pi.sample(seed=subrng)\n\t\tlog_pi = pi.log_prob(action)\n\n\t\trng, *vrngs = jax.random.split(rng, pop.n_agents+1)\n\n\t\tif pop is self.student_pop:\n\t\t\tstep_fn = self.benv.step_student\n\t\telse:\n\t\t\tstep_fn = self.benv.step_teacher\n\t\tstep_args = (jnp.array(vrngs), state, action)\n\n\t\tif reset_state is not None: # Needed for student to reset to same instance\n\t\t\tstep_args += (reset_state,)\n\n\t\tif extra is not None:\n\t\t\tstep_args += (extra,)\n\t\t\tnext_obs, next_state, reward, done, info, extra = step_fn(*step_args)\n\t\telse:\n\t\t\tnext_obs, next_state, reward, done, info = step_fn(*step_args)\n\n\t\t# Add transition to storage\n\t\tstep = (obs, action, reward, done, log_pi, value)\n\t\tif carry is not None:\n\t\t\tstep += (carry,)\n\n\t\trollout = rollout_mgr.append(rollout, *step)\n\n\t\tif self.render and pop is self.student_pop:\n\t\t\tself.viz.render(\n\t\t\t\tself.benv.env.env.params, \n\t\t\t\tjax.tree_util.tree_map(lambda x: x[0][0], state))\n\n\t\treturn rollout, next_state, next_obs, next_carry, done, info, extra\n\n\t@partial(jax.jit, static_argnums=(0,2,3,4))\n\tdef _rollout(\n\t\tself, \n\t\trng, \n\t\tpop, \n\t\trollout_mgr,\n\t\tn_steps,\n\t\tparams, \n\t\tstate, \n\t\tobs, \n\t\tcarry, \n\t\tdone,\n\t\treset_state=None, \n\t\textra=None, \n\t\tep_stats=None):\n\t\trngs = jax.random.split(rng, n_steps)\n\n\t\trollout = rollout_mgr.reset()\n\n\t\tdef _scan_rollout(scan_carry, rng):\n\t\t\t(rollout, \n\t\t\t state, \n\t\t\t obs, \n\t\t\t carry,\n\t\t\t done, \n\t\t\t extra, \n\t\t\t ep_stats) = scan_carry\n\t\t\t\n\t\t\tnext_scan_carry = \\\n\t\t\t\tself._get_transition(\n\t\t\t\t\trng,\n\t\t\t\t\tpop, \n\t\t\t\t\trollout_mgr,\n\t\t\t\t\trollout,\n\t\t\t\t\tparams, \n\t\t\t\t\tstate, \n\t\t\t\t\tobs, \n\t\t\t\t\tcarry, \n\t\t\t\t\tdone,\n\t\t\t\t\treset_state, \n\t\t\t\t\textra)\n\n\t\t\t(rollout, \n\t\t\t next_state, \n\t\t\t next_obs, \n\t\t\t next_carry, \n\t\t\t done, \n\t\t\t info, \n\t\t\t extra) = next_scan_carry\n\n\t\t\tif ep_stats is not None:\n\t\t\t\t_ep_stats_update_fn = self._update_ep_stats \\\n\t\t\t\t\tif pop is self.student_pop else self._update_ued_ep_stats\n\n\t\t\t\tep_stats = _ep_stats_update_fn(ep_stats, done, info)\n\n\t\t\treturn (rollout, next_state, next_obs, next_carry, done, extra, ep_stats), None\n\n\t\t(rollout, state, obs, carry, done, extra, ep_stats), _ = jax.lax.scan(\n\t\t\t_scan_rollout,\n\t\t\t(rollout, state, obs, carry, done, extra, ep_stats),\n\t\t\trngs,\n\t\t\tlength=n_steps,\n\t\t\tunroll=self.n_unroll_rollout\n\t\t)\n\n\t\treturn rollout, state, obs, carry, extra, ep_stats\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _compile_stats(self, \n\t\tupdate_stats, ep_stats, \n\t\tued_update_stats, ued_ep_stats,\n\t\tenv_metrics=None,\n\t\tgrad_stats=None, ued_grad_stats=None):\n\t\tmean_returns_by_student = jax.vmap(lambda x: x.mean())(ep_stats['return'])\n\t\tmean_returns_by_teacher = jax.vmap(lambda x: x.mean())(ued_ep_stats['return'])\n\n\t\tmean_ep_stats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(\n\t\t\t{k:ep_stats[k] for k in self.rolling_stats.names}\n\t\t)\n\t\tued_mean_ep_stats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(\n\t\t\t{k:ued_ep_stats[k] for k in self.ued_rolling_stats.names}\n\t\t)\n\n\t\tstudent_stats = {\n\t\t\tf'mean_{k}':v for k,v in mean_ep_stats.items()\n\t\t}\n\t\tstudent_stats.update(update_stats)\n\n\t\tstats = {}\n\t\tfor i in range(self.n_students):\n\t\t\t_student_stats = jax.tree_util.tree_map(lambda x: x[i], student_stats) # for agent0\n\t\t\tstats.update({f'{k}_a{i}':v for k,v in _student_stats.items()})\n\n\t\tteacher_stats = {\n\t\t\tf'mean_{k}_tch':v for k,v in ued_mean_ep_stats.items()\n\t\t}\n\t\tteacher_stats.update({\n\t\t\tf'{k}_tch':v[0] for k,v in ued_update_stats.items()\n\t\t})\n\t\tstats.update(teacher_stats)\n\n\t\tif self.track_env_metrics:\n\t\t\tpassable_mask = env_metrics.pop('passable')\n\t\t\tmean_env_metrics = jax.tree_util.tree_map(\n\t\t\t\tlambda x: (x*passable_mask).sum()/passable_mask.sum(), \n\t\t\t\tenv_metrics\n\t\t\t)\n\t\t\tmean_env_metrics.update({'passable_ratio': passable_mask.mean()})\n\t\t\tstats.update({\n\t\t\t\tf'env/{k}':v for k,v in mean_env_metrics.items()\n\t\t\t})\n\n\t\tif self.n_devices > 1:\n\t\t\tstats = jax.tree_map(lambda x: jax.lax.pmean(x, 'device'), stats)\n\n\t\treturn stats\n\n\tdef get_shmap_spec(self):\n\t\trunner_state_size = len(inspect.signature(self.run).parameters)\n\t\tin_spec = [P(None,'device'),]*(runner_state_size)\n\t\tout_spec = [P(None,'device'),]*(runner_state_size)\n\n\t\tin_spec[:2] = [P(None),]*2\n\t\tin_spec[6] = P(None)\n\t\tin_spec = tuple(in_spec)\n\t\tout_spec = (P(None),) + in_spec\n\n\t\treturn in_spec, out_spec\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef run(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate,\n\t\tobs,\n\t\tcarry,\n\t\tep_stats,\n\t\tued_train_state,\n\t\tued_state,\n\t\tued_obs,\n\t\tued_carry,\n\t\tued_ep_stats):\n\t\t\"\"\"\n\t\tPerform one update step: rollout teacher + students\n\t\t\"\"\"\n\t\tif self.n_devices > 1:\n\t\t\trng = jax.random.fold_in(rng, jax.lax.axis_index('device'))\n\n\t\t# === Reset teacher env + rollout teacher\n\t\trng, *vrngs = jax.random.split(rng, self.teacher_pop.n_agents+1)\n\t\tued_reset_out = self.benv.reset_teacher(jnp.array(vrngs))\n\t\tif len(ued_reset_out) > 2:\n\t\t\tued_obs, ued_state, ued_extra = ued_reset_out\n\t\telse:\n\t\t\tued_obs, ued_state = ued_reset_out\n\t\t\tued_extra = None\n\n\t\t# Reset UED ep_stats\n\t\tif self.ued_rolling_stats is not None:\n\t\t\tued_ep_stats = self.ued_rolling_stats.reset_stats(\n\t\t\t\tbatch_shape=(1,self.n_parallel))\n\t\telse:\n\t\t\tued_ep_stats = None\n\n\t\ttch_rollout_batch_shape = (1,self.n_parallel*self.n_eval)\n\t\tdone = jnp.zeros(tch_rollout_batch_shape, dtype=jnp.bool_)\n\t\trng, subrng = jax.random.split(rng)\n\t\tued_rollout, ued_state, ued_obs, ued_carry, _, ued_ep_stats = \\\n\t\t\tself._rollout(\n\t\t\t\tsubrng,\n\t\t\t\tself.teacher_pop,\n\t\t\t\tself.teacher_rollout,\n\t\t\t\tself.teacher_n_rollout_steps,\n\t\t\t\tjax.lax.stop_gradient(ued_train_state.params), \n\t\t\t\tued_state, \n\t\t\t\tued_obs, \n\t\t\t\tued_carry,\n\t\t\t\tdone, \n\t\t\t\textra=ued_extra, \n\t\t\t\tep_stats=ued_ep_stats\n\t\t\t)\n\n\t\t# === Reset student to new envs + rollout students\n\t\trng, *vrngs = jax.random.split(rng, self.teacher_pop.n_agents+1)\n\t\tobs, state, extra = jax.tree_util.tree_map(\n\t\t\tlambda x:x.squeeze(0), self.benv.reset_student(\n\t\t\t\tjnp.array(vrngs),\n\t\t\t\tued_state, \n\t\t\t\tself.student_pop.n_agents))\n\t\treset_state = state\n\n\t\t# Reset student ep_stats\n\t\tst_rollout_batch_shape = (self.n_students,self.n_parallel*self.n_eval)\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=st_rollout_batch_shape)\n\n\t\tdone = jnp.zeros(st_rollout_batch_shape, dtype=jnp.bool_)\n\t\trng, subrng = jax.random.split(rng)\n\t\trollout, state, obs, carry, extra, ep_stats = \\\n\t\t\tself._rollout(\n\t\t\t\tsubrng, \n\t\t\t\tself.student_pop,\n\t\t\t\tself.student_rollout,\n\t\t\t\tself.n_rollout_steps,\n\t\t\t\tjax.lax.stop_gradient(train_state.params),\n\t\t\t\tstate, \n\t\t\t\tobs, \n\t\t\t\tcarry, \n\t\t\t\tdone,\n\t\t\t\treset_state=reset_state, \n\t\t\t\textra=extra, \n\t\t\t\tep_stats=ep_stats)\n\n\t\t# === Update student with PPO\n\t\t# PPOAgent vmaps over the train state and batch. Batch must be N x EM\n\t\tstudent_rollout_last_value = self.student_pop.get_value(\n\t\t\tjax.lax.stop_gradient(train_state.params), obs, carry\n\t\t)\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tstudent_rollout_last_value\n\t\t)\n\n\t\trng, subrng = jax.random.split(rng)\n\t\ttrain_state, update_stats = self.student_pop.update(subrng, train_state, train_batch)\n\n\t\t# === Update teacher with PPO\n\t\t# - Compute returns per env per agent\n\t\t# - Compute batched returns based on returns per env per agent\n\t\tued_score, _ = compute_ued_scores(self.ued_score, train_batch, self.n_eval)\n\t\tued_rollout = self.teacher_rollout.set_final_reward(ued_rollout, ued_score)\n\t\tued_train_batch = self.teacher_rollout.get_batch(\n\t\t\tued_rollout, \n\t\t\tjnp.zeros((1, self.n_parallel)) # Last step terminates episode\n\t\t)\n\n\t\tued_ep_stats = self._update_ued_ep_stats(\n\t\t\tued_ep_stats, \n\t\t\tjnp.ones((1,len(ued_score),1), dtype=jnp.bool_),\n\t\t\t{'return': jnp.expand_dims(ued_score, (0,-1))}\n\t\t)\n\n\t\t# Update teacher, batch must be 1 x Ex1\n\t\trng, subrng = jax.random.split(rng)\n\t\tued_train_state, ued_update_stats = self.teacher_pop.update(subrng, ued_train_state, ued_train_batch)\n\n\t\t# --------------------------------------------------\n\t\t# Collect metrics\n\t\tif self.track_env_metrics:\n\t\t\tenv_metrics = self.benv.get_env_metrics(reset_state)\n\t\telse:\n\t\t\tenv_metrics = None\n\n\t\tgrad_stats, ued_grad_stats = None, None\n\n\t\tstats = self._compile_stats(\n\t\t\tupdate_stats, ep_stats, \n\t\t\tued_update_stats, ued_ep_stats,\n\t\t\tenv_metrics,\n\t\t\tgrad_stats, ued_grad_stats)\n\t\tstats.update(dict(n_updates=train_state.n_updates[0]))\n\n\t\ttrain_state = train_state.increment()\n\t\tued_train_state = ued_train_state.increment()\n\t\tself.n_updates += 1\n\n\t\treturn (\n\t\t\tstats, \n\t\t\trng,\n\t\t\ttrain_state, state, obs, carry, ep_stats,\n\t\t\tued_train_state, ued_state, ued_obs, ued_carry, ued_ep_stats\n\t\t)" }, { "identifier": "PLRRunner", "path": "src/minimax/runners/plr_runner.py", "snippet": "class PLRRunner(DRRunner):\n\tdef __init__(\n\t\tself, \n\t\t*,\n\t\treplay_prob=0.5,\n\t\tbuffer_size=100,\n\t\tstaleness_coef=0.3,\n\t\tuse_score_ranks=True,\n\t\ttemp=1.0,\n\t\tmin_fill_ratio=0.5,\n\t\tuse_robust_plr=False,\n\t\tuse_parallel_eval=False,\n\t\tued_score='l1_value_loss',\n\t\tforce_unique=False, # Slower if True\n\t\tmutation_fn=None,\n\t\tn_mutations=0,\n\t\tmutation_criterion='batch',\n\t\tmutation_subsample_size=1,\n\t\t**kwargs):\n\t\tuse_mutations = mutation_fn is not None\n\t\tif use_parallel_eval:\n\t\t\treplay_prob = 1.0 # Replay every rollout cycle\n\t\t\tmutation_criterion = 'batch' # Force batch mutations (no UED scores)\n\t\t\tself._n_parallel_batches = 3 if use_mutations else 2\n\t\t\tkwargs['n_parallel'] *= self._n_parallel_batches\n\n\t\tsuper().__init__(**kwargs)\n\n\t\tself.replay_prob = replay_prob\n\t\tself.buffer_size = buffer_size\n\t\tself.staleness_coef = staleness_coef\n\t\tself.temp = temp\n\t\tself.use_score_ranks = use_score_ranks\n\t\tself.min_fill_ratio = min_fill_ratio\n\t\tself.use_robust_plr = use_robust_plr\n\t\tself.use_parallel_eval = use_parallel_eval\n\t\tself.ued_score = UEDScore[ued_score.upper()]\n\n\t\tself.use_mutations = use_mutations\n\t\tif self.use_mutations:\n\t\t\tself.mutation_fn = envs.get_mutator(self.benv.env_name, mutation_fn)\n\t\telse:\n\t\t\tself.mutation_fn = None\n\t\tself.n_mutations = n_mutations\n\t\tself.mutation_criterion = MutationCriterion[mutation_criterion.upper()]\n\t\tself.mutation_subsample_size = mutation_subsample_size\n\n\t\tself.force_unique = force_unique\n\t\tif force_unique:\n\t\t\tself.comparator_fn = envs.get_comparator(self.benv.env_name)\n\t\telse:\n\t\t\tself.comparator_fn = None\n\n\t\tif mutation_fn is not None and mutation_criterion != 'batch':\n\t\t\tassert self.n_parallel % self.mutation_subsample_size == 0, \\\n\t\t\t\t'Number of parallel envs must be divisible by mutation subsample size.'\n\n\tdef reset(self, rng):\n\t\trunner_state = list(super().reset(rng))\n\t\trng = runner_state[0]\n\t\trunner_state[0], subrng = jax.random.split(rng)\n\t\texample_state = self.benv.env.reset(rng)[1]\n\n\t\tself.plr_mgr = PopPLRManager(\n\t\t\tn_agents=self.n_students,\n\t\t\texample_level=example_state,\n\t\t\tued_score=self.ued_score,\n\t\t\treplay_prob=self.replay_prob,\n\t\t\tbuffer_size=self.buffer_size,\n\t\t\tstaleness_coef=self.staleness_coef,\n\t\t\ttemp=self.temp,\n\t\t\tuse_score_ranks=self.use_score_ranks,\n\t\t\tmin_fill_ratio=self.min_fill_ratio,\n\t\t\tuse_robust_plr=self.use_robust_plr,\n\t\t\tuse_parallel_eval=self.use_parallel_eval,\n\t\t\tcomparator_fn=self.comparator_fn,\n\t\t\tn_devices=self.n_devices\n\t\t)\n\t\tplr_buffer = self.plr_mgr.reset(self.n_students)\n\n\t\ttrain_state = runner_state[1]\n\t\ttrain_state = train_state.replace(plr_buffer=plr_buffer)\n\t\tif self.n_devices == 1:\n\t\t\trunner_state[1] = train_state\n\t\telse:\n\t\t\tplr_buffer = jax.tree_map(lambda x: x.repeat(self.n_devices, 1), plr_buffer) # replicate plr buffer\n\t\t\trunner_state += (plr_buffer,) # Return PLR buffer directly to make shmap easier\n\n\t\tself.dummy_eval_output = self._create_dummy_eval_output(train_state)\n\n\t\treturn tuple(runner_state)\n\n\tdef _create_dummy_eval_output(self, train_state):\n\t\trng, *vrngs = jax.random.split(jax.random.PRNGKey(0), self.n_students+1)\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs))\n\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=(self.n_students, self.n_parallel*self.n_eval))\n\n\t\tued_scores = jnp.zeros((self.n_students, self.n_parallel))\n\n\t\tif self.student_pop.agent.is_recurrent:\n\t\t\tcarry = self.zero_carry\n\t\telse:\n\t\t\tcarry = None\n\t\trollout = self.student_rollout.reset()\n\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tself.student_pop.get_value(\n\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\tobs, \n\t\t\t\tcarry,\n\t\t\t)\n\t\t)\n\n\t\treturn (\n\t\t\trng,\n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstate,\n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats,\n\t\t\tstate,\n\t\t\ttrain_batch,\n\t\t\tued_scores\n\t\t)\n\n\t@partial(jax.jit, static_argnums=(0,8))\n\tdef _eval_and_update_plr(\n\t\t\tself,\n\t\t\trng,\n\t\t\tlevels,\n\t\t\tlevel_idxs, \n\t\t\ttrain_state, \n\t\t\tupdate_plr,\n\t\t\tparent_idxs=None,\n\t\t\tdupe_mask=None,\n\t\t\tfake=False):\n\t\t# Collect rollout and optionally update plr buffer\n\t\t# Returns train_batch and ued_scores\n\t\t# Perform rollout: @todo: pmap this\n\t\tif fake:\n\t\t\tdummy_eval_output = list(self.dummy_eval_output)\n\t\t\tdummy_eval_output[1] = train_state\n\t\t\treturn tuple(dummy_eval_output)\n\n\t\trollout_batch_shape = (self.n_students, self.n_parallel*self.n_eval)\n\t\tobs, state, extra = self.benv.set_state(levels)\n\t\tep_stats = self.rolling_stats.reset_stats(\n\t\t\tbatch_shape=rollout_batch_shape)\n\n\t\trollout_start_state = state\n\n\t\tdone = jnp.zeros(rollout_batch_shape, dtype=jnp.bool_)\n\t\tif self.student_pop.agent.is_recurrent:\n\t\t\tcarry = self.zero_carry\n\t\telse:\n\t\t\tcarry = None\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tstart_state = state\n\t\trollout, state, start_state, obs, carry, extra, ep_stats, train_state = \\\n\t\t\tself._rollout_students(\n\t\t\t\tsubrng, \n\t\t\t\ttrain_state, \n\t\t\t\tstate, \n\t\t\t\tstart_state,\n\t\t\t\tobs, \n\t\t\t\tcarry, \n\t\t\t\tdone,\n\t\t\t\textra, \n\t\t\t\tep_stats\n\t\t\t)\n\n\t\ttrain_batch = self.student_rollout.get_batch(\n\t\t\trollout, \n\t\t\tself.student_pop.get_value(\n\t\t\t\tjax.lax.stop_gradient(train_state.params), \n\t\t\t\tobs, \n\t\t\t\tcarry\n\t\t\t)\n\t\t)\n\n\t\t# Update PLR buffer\n\t\tif self.ued_score == UEDScore.MAX_MC:\n\t\t\tmax_returns = jax.vmap(lambda x,y: x.at[y].get())(train_state.plr_buffer.max_returns, level_idxs)\n\t\t\tmax_returns = jnp.where(\n\t\t\t\tjnp.greater_equal(level_idxs, 0),\n\t\t\t\tmax_returns,\n\t\t\t\tjnp.full_like(max_returns, -jnp.inf)\n\t\t\t)\n\t\t\tued_info = {'max_returns': max_returns}\n\t\telse:\n\t\t\tued_info = None\n\t\tued_scores, ued_score_info = compute_ued_scores(\n\t\t\tself.ued_score, train_batch, self.n_eval, info=ued_info, ignore_val=-jnp.inf, per_agent=True)\n\t\tnext_plr_buffer = self.plr_mgr.update(\n\t\t\ttrain_state.plr_buffer, \n\t\t\tlevels=levels, \n\t\t\tlevel_idxs=level_idxs, \n\t\t\tued_scores=ued_scores,\n\t\t\tdupe_mask=dupe_mask, \n\t\t\tinfo=ued_score_info, \n\t\t\tignore_val=-jnp.inf,\n\t\t\tparent_idxs=parent_idxs)\n\n\t\tnext_plr_buffer = jax.vmap(\n\t\t\tlambda update, new, prev: jax.tree_map(\n\t\t\t\tlambda x, y: jax.lax.select(update, x, y), new, prev)\n\t\t)(update_plr, next_plr_buffer, train_state.plr_buffer)\n\n\t\ttrain_state = train_state.replace(plr_buffer=next_plr_buffer)\n\n\t\treturn (\n\t\t\trng,\n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstart_state, \n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats,\n\t\t\trollout_start_state,\n\t\t\ttrain_batch,\n\t\t\tued_scores,\n\t\t)\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _mutate_levels(self, rng, levels, level_idxs, ued_scores=None):\n\t\tif not self.use_mutations:\n\t\t\treturn levels, level_idxs, jnp.full_like(level_idxs, -1)\n\n\t\tdef upsample_levels(levels, level_idxs, subsample_idxs):\n\t\t\tsubsample_idxs = subsample_idxs.repeat(self.n_parallel//self.mutation_subsample_size, -1)\n\t\t\tparent_idxs = level_idxs.take(subsample_idxs)\n\t\t\tlevels = jax.vmap(\n\t\t\t\tlambda x, y: jax.tree_map(lambda _x: jnp.array(_x).take(y, 0), x)\n\t\t\t)(levels, parent_idxs)\n\t\t\t\n\t\t\treturn levels, parent_idxs\n\n\t\tif self.mutation_criterion == MutationCriterion.BATCH:\n\t\t\tparent_idxs = level_idxs\n\n\t\tif self.mutation_criterion == MutationCriterion.EASY:\n\t\t\t_, top_level_idxs = jax.lax.approx_min_k(ued_scores, self.mutation_subsample_size)\n\t\t\tlevels, parent_idxs = upsample_levels(levels, level_idxs, top_level_idxs)\n\n\t\telif self.mutation_criterion == MutationCriterion.HARD:\n\t\t\t_, top_level_idxs = jax.lax.approx_max_k(ued_scores, self.mutation_subsample_size)\n\t\t\tlevels, parent_idxs = upsample_levels(levels, level_idxs, top_level_idxs)\n\n\t\tn_parallel = level_idxs.shape[-1]\n\t\tvrngs = jax.vmap(lambda subrng: jax.random.split(subrng, n_parallel))(\n\t\t\tjax.random.split(rng, self.n_students)\n\t\t)\n\n\t\tmutated_levels = jax.vmap(\n\t\t\tlambda *args: jax.vmap(self.mutation_fn, in_axes=(0,None,0,None))(*args),\n\t\t\tin_axes=(0,None,0,None)\n\t\t)(vrngs, self.benv.env_params, levels, self.n_mutations)\n\n\t\t# Mutated levels do not have existing idxs in the PLR buffer.\n\t\tmutated_level_idxs = jnp.full((self.n_students, n_parallel), -1)\n\n\t\treturn mutated_levels, mutated_level_idxs, parent_idxs\n\n\tdef _efficient_grad_update(self, rng, train_state, train_batch, is_replay):\n\t\t# PPOAgent vmaps over the train state and batch. Batch must be N x EM\n\t\tskip_grad_update = jnp.logical_and(self.use_robust_plr, ~is_replay)\n\n\t\tif self.n_students == 1:\n\t\t\ttrain_state, stats = jax.lax.cond(\n\t\t\t\tskip_grad_update[0],\n\t\t\t\tpartial(self.student_pop.update, fake=True),\n\t\t\t\tself.student_pop.update,\n\t\t\t\t*(rng, train_state, train_batch)\n\t\t\t)\n\t\telif self.n_students > 1: # Have to vmap all students + take only students that need updates\n\t\t\t_, dummy_stats = jax.vmap(lambda *_: self.student_pop.agent.get_empty_update_stats())(np.arange(self.n_students))\n\t\t\t_train_state, stats = self.student.update(rng, train_state, train_batch)\n\t\t\ttrain_state, stats = jax.vmap(lambda cond,x,y: \\\n\t\t\t\t\tjax.tree_map(lambda _cond,_x,_y: jax.lax.select(_cond,_x,_y), cond, x, y))(\n\t\t\t\t\t\tis_replay, (train_state, stats), (_train_state, dummy_stats)\n\t\t\t\t\t)\n\n\t\treturn train_state, stats\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef _compile_stats(self, update_stats, ep_stats, env_metrics=None, plr_stats=None):\n\t\tstats = super()._compile_stats(update_stats, ep_stats, env_metrics)\n\n\t\tif plr_stats is not None:\n\t\t\tplr_stats = jax.vmap(lambda info: jax.tree_map(lambda x: x.mean(), info))(plr_stats)\n\n\t\tif self.n_students > 1:\n\t\t\t_plr_stats = {}\n\t\t\tfor i in range(self.n_students):\n\t\t\t\t_student_plr_stats = jax.tree_util.tree_map(lambda x: x[i], plr_stats) # for agent0\n\t\t\t\t_plr_stats.update({f'{k}_a{i}':v for k,v in _student_plr_stats.items()})\n\t\t\tplr_stats = _plr_stats\n\t\telse:\n\t\t\tplr_stats = jax.tree_map(lambda x: x[0], plr_stats) \n\n\t\tstats.update({f'plr_{k}':v for k,v in plr_stats.items()})\n\n\t\tif self.n_devices > 1:\n\t\t\tstats = jax.tree_map(lambda x: jax.lax.pmean(x, 'device'), stats)\n\n\t\treturn stats\n\n\t@partial(jax.jit, static_argnums=(0,))\n\tdef run(\n\t\tself, \n\t\trng, \n\t\ttrain_state, \n\t\tstate, \n\t\tstart_state,\n\t\tobs, \n\t\tcarry=None, \n\t\textra=None, \n\t\tep_stats=None,\n\t\tplr_buffer=None):\n\t\t# If device sharded, load sharded PLR buffer into train state\n\t\tif self.n_devices > 1:\n\t\t\trng = jax.random.fold_in(rng, jax.lax.axis_index('device'))\n\t\t\ttrain_state = train_state.replace(plr_buffer=plr_buffer)\n\n\t\t# Sample next training levels via PLR\n\t\trng, *vrngs = jax.random.split(rng, self.n_students+1)\n\t\tobs, state, extra = self.benv.reset(jnp.array(vrngs), self.n_parallel, 1)\n\n\t\tif self.use_parallel_eval:\n\t\t\tn_level_samples = self.n_parallel//self._n_parallel_batches\n\t\t\tnew_levels = jax.tree_map(lambda x: x.at[:,n_level_samples:2*n_level_samples].get(), state)\n\t\telse:\n\t\t\tn_level_samples = self.n_parallel\n\t\t\tnew_levels = state\n\n\t\trng, subrng = jax.random.split(rng)\n\t\tlevels, level_idxs, is_replay, next_plr_buffer = \\\n\t\t\tself.plr_mgr.sample(subrng, train_state.plr_buffer, new_levels, n_level_samples)\n\t\ttrain_state = train_state.replace(plr_buffer=next_plr_buffer)\n\n\t\t# If use_parallel_eval=True, need to combine replay and non-replay levels together\n\t\t# Need to mutate levels as well\n\t\tparent_idxs = jnp.full((self.n_students, self.n_parallel), -1)\n\t\tif self.use_parallel_eval: # Parallel ACCEL\n\t\t\tnew_level_idxs = jnp.full_like(parent_idxs, -1)\n\n\t\t\t_all_levels = jax.vmap(\n\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=n_level_samples, src_len=n_level_samples),\n\t\t\t\t)(state, levels)\n\t\t\t_all_level_idxs = jax.vmap(\n\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=n_level_samples, src_len=n_level_samples),\n\t\t\t\t)(new_level_idxs, level_idxs)\n\n\t\t\tif self.use_mutations:\n\t\t\t\trng, subrng = jax.random.split(rng)\n\t\t\t\tmutated_levels, mutated_level_idxs, _parent_idxs = self._mutate_levels(subrng, levels, level_idxs)\n\t\t\t\t\n\t\t\t\tfallback_levels = jax.tree_map(lambda x: x.at[:,-n_level_samples:].get(), state)\n\t\t\t\tfallback_level_idxs = jnp.full_like(mutated_level_idxs, -1)\n\n\t\t\t\tmutated_levels = jax.vmap(\n\t\t\t\t\tlambda cond,x,y: jax.tree_map(\n\t\t\t\t\t\tlambda _x,_y: jax.lax.select(cond,_x,_y), x, y\n\t\t\t\t\t))(is_replay, mutated_levels, fallback_levels)\n\n\t\t\t\tmutated_level_idxs = jax.vmap(\n\t\t\t\t\tlambda cond,x,y: jax.tree_map(\n\t\t\t\t\t\tlambda _x,_y: jax.lax.select(cond,_x,_y), x, y\n\t\t\t\t\t))(is_replay, mutated_level_idxs, fallback_level_idxs)\n\n\t\t\t\t_parent_idxs = jax.vmap(\n\t\t\t\t\tlambda cond,x,y: jax.tree_map(\n\t\t\t\t\t\tlambda _x,_y: jax.lax.select(cond,_x,_y), x, y\n\t\t\t\t\t))(is_replay, _parent_idxs, fallback_level_idxs)\n\t\t\n\t\t\t\tmutated_levels_start_idx = 2*n_level_samples\n\t\t\t\t_all_levels = jax.vmap(\n\t\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=mutated_levels_start_idx, src_len=n_level_samples),\n\t\t\t\t\t)(_all_levels, mutated_levels)\n\t\t\t\t_all_level_idxs = jax.vmap(\n\t\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=mutated_levels_start_idx, src_len=n_level_samples),\n\t\t\t\t\t)(_all_level_idxs, mutated_level_idxs)\n\t\t\t\tparent_idxs = jax.vmap(\n\t\t\t\t\tlambda x,y: _tree_util.pytree_merge(x,y, start_idx=mutated_levels_start_idx, src_len=n_level_samples),\n\t\t\t\t\t)(parent_idxs, _parent_idxs)\n\n\t\t\tlevels = _all_levels\n\t\t\tlevel_idxs = _all_level_idxs\n\n\t\t# dedupe levels, move into PLR buffer logic\n\t\tif self.force_unique:\n\t\t\tlevel_idxs, dupe_mask = self.plr_mgr.dedupe_levels(next_plr_buffer, levels, level_idxs)\n\t\telse:\n\t\t\tdupe_mask = None \n\n\t\t# Evaluate levels + update PLR\n\t\tresult = self._eval_and_update_plr(\n\t\t\trng, levels, level_idxs, train_state, update_plr=jnp.array([True]*self.n_students), parent_idxs=parent_idxs, dupe_mask=dupe_mask)\n\t\trng, train_state, state, start_state, obs, carry, extra, ep_stats, \\\n\t\t\trollout_start_state, train_batch, ued_scores = result\n\n\t\tif self.use_parallel_eval:\n\t\t\treplay_start_idx = self.n_eval*n_level_samples\n\t\t\treplay_end_idx = 2*replay_start_idx\n\t\t\ttrain_batch = jax.vmap(\n\t\t\t\tlambda x: jax.tree_map(\n\t\t\t\t\tlambda _x: _x.at[:,replay_start_idx:replay_end_idx].get(), x)\n\t\t\t\t)(train_batch)\n\n\t\t# Gradient update\n\t\trng, subrng = jax.random.split(rng)\n\t\ttrain_state, update_stats = self._efficient_grad_update(subrng, train_state, train_batch, is_replay)\n\n\t\t# Mutation step\n\t\tuse_mutations = jnp.logical_and(self.use_mutations, is_replay)\n\t\tuse_mutations = jnp.logical_and(use_mutations, not self.use_parallel_eval) # Already mutated above in parallel\n\t\trng, arng, brng = jax.random.split(rng, 3)\n\n\t\tmutated_levels, mutated_level_idxs, parent_idxs = jax.lax.cond(\n\t\t\tuse_mutations.any(),\n\t\t\tself._mutate_levels,\n\t\t\tlambda *_: (levels, level_idxs, jnp.full_like(level_idxs, -1)),\n\t\t\t*(arng, levels, level_idxs, ued_scores)\n\t\t)\n\n\t\tmutated_dupe_mask = jnp.zeros_like(mutated_level_idxs, dtype=jnp.bool_)\n\t\tif self.force_unique: # Should move into update plr logic\n\t\t\tmutated_level_idxs, mutated_dupe_mask = jax.lax.cond(\n\t\t\t\tuse_mutations.any(),\n\t\t\t\tself.plr_mgr.dedupe_levels,\n\t\t\t\tlambda *_: (mutated_level_idxs, mutated_dupe_mask),\n\t\t\t\t*(next_plr_buffer, mutated_levels, mutated_level_idxs)\n\t\t\t)\n\n\t\tmutation_eval_result = jax.lax.cond(\n\t\t\tuse_mutations.any(),\n\t\t\tself._eval_and_update_plr,\n\t\t\tpartial(self._eval_and_update_plr, fake=True),\n\t\t\t*(brng, mutated_levels, mutated_level_idxs, train_state, use_mutations, parent_idxs, mutated_dupe_mask)\n\t\t)\n\t\ttrain_state = mutation_eval_result[1]\n\n\t\t# Collect training env metrics\n\t\tif self.track_env_metrics:\n\t\t\tenv_metrics = self.benv.get_env_metrics(levels)\n\t\telse:\n\t\t\tenv_metrics = None\n\n\t\tplr_stats = self.plr_mgr.get_metrics(train_state.plr_buffer)\n\n\t\tstats = self._compile_stats(update_stats, ep_stats, env_metrics, plr_stats)\n\n\t\tif self.n_devices > 1:\n\t\t\tplr_buffer = train_state.plr_buffer\n\t\t\ttrain_state = train_state.replace(plr_buffer=None)\n\n\t\ttrain_state = train_state.increment()\n\t\tstats.update(dict(n_updates=train_state.n_updates[0]))\n\n\t\treturn (\n\t\t\tstats, \n\t\t\trng, \n\t\t\ttrain_state, \n\t\t\tstate, \n\t\t\tstart_state, \n\t\t\tobs, \n\t\t\tcarry, \n\t\t\textra, \n\t\t\tep_stats,\n\t\t\tplr_buffer\n\t\t)" } ]
import copy import time import numpy as np import jax import minimax.envs as envs import minimax.models as models import minimax.agents as agents from functools import partial from collections import defaultdict from jax.sharding import Mesh, PartitionSpec as P from jax.experimental import mesh_utils from jax.experimental.shard_map import shard_map from .eval_runner import EvalRunner from .dr_runner import DRRunner from .paired_runner import PAIREDRunner from .plr_runner import PLRRunner from minimax.util.rl import UEDScore, PopPLRManager
16,396
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class RunnerInfo: def __init__( self, runner_cls, is_ued=False): self.runner_cls = runner_cls self.is_ued = is_ued RUNNER_INFO = { 'dr': RunnerInfo( runner_cls=DRRunner, ), 'plr': RunnerInfo( runner_cls=PLRRunner, ), 'paired': RunnerInfo(
""" Copyright (c) Meta Platforms, Inc. and affiliates. All rights reserved. This source code is licensed under the license found in the LICENSE file in the root directory of this source tree. """ class RunnerInfo: def __init__( self, runner_cls, is_ued=False): self.runner_cls = runner_cls self.is_ued = is_ued RUNNER_INFO = { 'dr': RunnerInfo( runner_cls=DRRunner, ), 'plr': RunnerInfo( runner_cls=PLRRunner, ), 'paired': RunnerInfo(
runner_cls=PAIREDRunner,
2
2023-10-28 12:12:01+00:00
24k
nv-tlabs/vid2player3d
uhc/smpllib/smpl_local_robot.py
[ { "identifier": "Skeleton", "path": "uhc/khrylib/mocap/skeleton_local.py", "snippet": "class Skeleton:\n def __init__(\n self, template_dir=\"/hdd/zen/dev/copycat/Copycat/assets/bigfoot_template_v1.pkl\"\n ):\n self.bones = []\n self.name2bone = {}\n self.mass_scale = 1.0\n self.len_scale = 1.0\n self.dof_name = [\"x\", \"y\", \"z\"]\n self.root = None\n self.template_geoms = None\n if osp.isfile(template_dir):\n self.template_geoms = joblib.load(template_dir)\n\n def forward_bvh(self, bone):\n if bone.parent:\n # bone.pos = bone.parent.pos + bone.offset\n bone.pos = bone.offset\n else:\n bone.pos = bone.offset\n for bone_c in bone.child:\n self.forward_bvh(bone_c)\n\n def load_from_offsets(\n self,\n offsets,\n parents,\n scale,\n jrange,\n exclude_bones=None,\n channels=None,\n spec_channels=None,\n ):\n if channels is None:\n channels = [\"x\", \"y\", \"z\"]\n if exclude_bones is None:\n exclude_bones = {}\n if spec_channels is None:\n spec_channels = dict()\n\n joint_names = list(\n filter(lambda x: all([t not in x for t in exclude_bones]), offsets.keys())\n )\n dof_ind = {\"x\": 0, \"y\": 1, \"z\": 2}\n self.len_scale = scale\n self.root = Bone()\n self.root.id = 0\n self.root.name = joint_names[0]\n self.root.channels = channels\n self.name2bone[self.root.name] = self.root\n self.bones.append(self.root)\n for i, joint in enumerate(joint_names[1:]):\n bone = Bone()\n bone.id = i + 1\n bone.name = joint\n \n bone.channels = (\n spec_channels[joint] if joint in spec_channels.keys() else channels\n )\n bone.dof_index = [dof_ind[x] for x in bone.channels]\n bone.offset = np.array(offsets[joint]) * self.len_scale\n bone.lb = np.rad2deg(jrange[joint][:, 0])\n bone.ub = np.rad2deg(jrange[joint][:, 1])\n\n\n self.bones.append(bone)\n self.name2bone[joint] = bone\n for bone in self.bones[1:]:\n parent_name = parents[bone.name]\n # print(parent_name)\n if parent_name in self.name2bone.keys():\n bone_p = self.name2bone[parent_name]\n bone_p.child.append(bone)\n bone.parent = bone_p\n\n self.forward_bvh(self.root)\n # import pdb\n # pdb.set_trace()\n for bone in self.bones:\n if len(bone.child) == 0:\n bone.end = bone.pos.copy() + 0.002\n for c_bone, p_bone in parents.items():\n if p_bone == bone.name:\n bone.end += np.array(offsets[c_bone]) * self.len_scale\n break\n else:\n bone.end = sum([bone_c.pos for bone_c in bone.child]) / len(bone.child)\n\n def write_xml(\n self,\n fname,\n template_fname=\"/hdd/zen/dev/copycat/Copycat/assets/mujoco_models/template/humanoid_template_local.xml\",\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n joints = worldbody.findall(\".//joint\")\n for joint in joints[1:]:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"1\"\n SubElement(actuators, \"motor\", attr)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", {\"njmax\": \"700\", \"nconmax\": \"200\"})\n tree.write(fname, pretty_print=True)\n\n def write_str(\n self,\n template_fname=\"/hdd/zen/dev/copycat/Copycat/assets/mujoco_models/template/humanoid_template_local.xml\",\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n joints = worldbody.findall(\".//joint\")\n for joint in joints[1:]:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"500\"\n SubElement(actuators, \"motor\", attr)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", {\"njmax\": \"700\", \"nconmax\": \"200\"})\n\n return etree.tostring(tree, pretty_print=False)\n\n def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):\n attr = dict()\n attr[\"name\"] = bone.name\n attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n attr[\"user\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.end + offset))\n node = SubElement(parent_node, \"body\", attr)\n\n # write joints\n if bone.parent is None:\n j_attr = dict()\n j_attr[\"name\"] = bone.name\n # j_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n j_attr[\"limited\"] = \"false\"\n j_attr[\"type\"] = \"free\"\n j_attr[\"armature\"] = \"0\"\n j_attr[\"damping\"] = \"0\"\n # j_attr[\"stiffness\"] = \"500\"\n SubElement(node, \"joint\", j_attr)\n else:\n for i in range(len(bone.dof_index)):\n ind = bone.dof_index[i]\n axis = bone.orient[:, ind]\n j_attr = dict()\n j_attr[\"name\"] = bone.name + \"_\" + self.dof_name[ind]\n j_attr[\"type\"] = \"hinge\"\n j_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n j_attr[\"axis\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*axis)\n j_attr[\"stiffness\"] = \"500\"\n j_attr[\"damping\"] = \"50\"\n j_attr[\"armature\"] = \"0.02\"\n\n if i < len(bone.lb):\n j_attr[\"range\"] = \"{0:.4f} {1:.4f}\".format(bone.lb[i], bone.ub[i])\n else:\n j_attr[\"range\"] = \"-180.0 180.0\"\n if j_attr[\"name\"] in ref_angles.keys():\n j_attr[\"ref\"] = f\"{ref_angles[j_attr['name']]:.1f}\"\n\n SubElement(node, \"joint\", j_attr)\n\n # write geometry\n if self.template_geoms is None or len(self.template_geoms[bone.name]) == 0:\n if bone.parent is None:\n g_attr = dict()\n g_attr[\"size\"] = \"0.0300\"\n g_attr[\"type\"] = \"sphere\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n else:\n e1 = np.zeros(3)\n e2 = bone.end.copy() + offset\n g_attr = dict()\n g_attr[\"size\"] = \"0.0100\"\n if bone.name.endswith(\"3\"):\n g_attr[\"type\"] = \"sphere\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(\n *(bone.pos + offset)\n )\n else:\n g_attr[\"type\"] = \"capsule\"\n g_attr[\n \"fromto\"\n ] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f} {4:.4f} {5:.4f}\".format(\n *np.concatenate([e1, e2])\n )\n\n g_attr[\"contype\"] = \"1\"\n g_attr[\"conaffinity\"] = \"1\"\n\n else:\n g_attr = dict()\n template_attributes = self.template_geoms[bone.name][0]\n g_attr[\"type\"] = template_attributes[\"type\"]\n # g_attr[\"contype\"] = template_attributes[\"contype\"]\n # g_attr[\"conaffinity\"] = template_attributes[\"conaffinity\"]\n g_attr[\"contype\"] = \"1\"\n g_attr[\"conaffinity\"] = \"1\"\n g_attr[\"density\"] = \"500\"\n e1 = np.zeros(3)\n e2 = bone.end.copy() + offset\n # template_attributes[\"start\"]\n if g_attr[\"type\"] == \"capsule\":\n g_attr[\n \"fromto\"\n ] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f} {4:.4f} {5:.4f}\".format(\n *np.concatenate(\n [e1, e2]\n )\n )\n g_attr[\"size\"] = \"{0:.4f}\".format(*template_attributes[\"size\"])\n elif g_attr[\"type\"] == \"box\":\n # g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(\n # *template_attributes[\"start\"]\n # )\n multiplier = np.linalg.norm(e2 - e1) / 0.0945\n pos = (e1 + e2) / 2\n if bone.name == \"L_Toe\" or bone.name == \"R_Toe\":\n pos[1] += 0.05\n \n\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*pos)\n\n g_attr[\"size\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(\n *template_attributes[\"size\"] * multiplier\n )\n g_attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(\n *template_attributes[\"rot\"]\n )\n elif g_attr[\"type\"] == \"sphere\":\n g_attr[\"size\"] = \"{0:.4f}\".format(*template_attributes[\"size\"])\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(\n *np.zeros(3)\n )\n SubElement(node, \"geom\", g_attr)\n\n\n # write child bones\n for bone_c in bone.child:\n self.write_xml_bodynode(bone_c, node, offset, ref_angles)" }, { "identifier": "Skeleton", "path": "uhc/khrylib/mocap/skeleton_mesh_local.py", "snippet": "class Skeleton:\n def __init__(self, geom_dir, rel_geom_dir):\n self.geom_dir = geom_dir\n self.rel_geom_dir = rel_geom_dir\n self.bones = []\n self.name2bone = {}\n self.mass_scale = 1.0\n self.len_scale = 1.0\n self.root = None\n self.equalities = None\n self.exclude_contacts = None\n self.collision_groups = None\n self.simple_geom = False\n self.buffer_dict = {\"njmax\": \"2500\", \"nconmax\": \"500\"}\n\n def forward_bones(self, bone):\n if bone.parent:\n # bone.pos = bone.parent.pos + bone.offset\n bone.pos = bone.offset\n for bone_c in bone.child:\n self.forward_bones(bone_c)\n\n def load_from_offsets(\n self,\n offsets,\n parents,\n axes,\n channels,\n jrange,\n sites,\n scale,\n equalities,\n exclude_contacts=None,\n collision_groups=None,\n conaffinity=None,\n simple_geom=False,\n color_dict=None,\n ):\n if exclude_contacts is None:\n exclude_contacts = []\n if collision_groups is None:\n collision_groups = {}\n self.exclude_contacts = exclude_contacts\n self.collision_groups = {}\n self.conaffinity = {}\n self.color_dict = color_dict\n\n for group, bones in collision_groups.items():\n for bone in bones:\n self.collision_groups[bone] = group\n\n for group, bones in conaffinity.items():\n for bone in bones:\n self.conaffinity[bone] = group\n\n self.simple_geom = simple_geom\n\n joint_names = list(offsets.keys())\n dof_ind = {\"x\": 0, \"y\": 1, \"z\": 2}\n self.equalities = equalities\n self.len_scale = scale\n self.root = Bone()\n self.root.id = 0\n self.root.name = joint_names[0]\n self.root.orient = axes[joint_names[0]]\n self.root.pos = offsets[joint_names[0]]\n self.root.sites = sites.get(joint_names[0], [])\n self.name2bone[self.root.name] = self.root\n self.bones.append(self.root)\n\n for i, joint in enumerate(joint_names[1:]):\n bone = Bone()\n bone.id = i + 1\n bone.name = joint\n bone.channels = channels[joint]\n bone.dof_index = [dof_ind[x[0]] for x in bone.channels]\n bone.offset = offsets[joint] * self.len_scale\n bone.orient = axes[joint]\n bone.lb = np.rad2deg(jrange[joint][:, 0])\n bone.ub = np.rad2deg(jrange[joint][:, 1])\n bone.sites = sites.get(joint, [])\n self.bones.append(bone)\n self.name2bone[joint] = bone\n\n for bone in self.bones[1:]:\n parent_name = parents[bone.name]\n if parent_name in self.name2bone.keys():\n bone_p = self.name2bone[parent_name]\n bone_p.child.append(bone)\n bone.parent = bone_p\n\n self.forward_bones(self.root)\n for bone in self.bones:\n if len(bone.child) == 0:\n bone.ends.append(bone.pos.copy())\n else:\n for bone_c in bone.child:\n bone.ends.append(bone_c.pos.copy())\n\n def write_str(\n self,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n tree = self.construct_tree(ref_angles=ref_angles,\n offset=offset,\n template_fname=template_fname)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", self.buffer_dict)\n return etree.tostring(tree, pretty_print=True)\n\n def write_xml(\n self,\n fname,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n tree = self.construct_tree(ref_angles=ref_angles,\n offset=offset,\n template_fname=template_fname)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", self.buffer_dict)\n # create sensors\n # sensor = tree.getroot().find(\"sensor\")\n # for bone in self.bones:\n # SubElement(sensor, 'framelinvel', {'objtype': 'body', 'objname': bone.name})\n # for bone in self.bones:\n # SubElement(sensor, 'frameangvel', {'objtype': 'body', 'objname': bone.name})\n # for bone in self.bones:\n # SubElement(sensor, 'framelinvel', {'objtype': 'xbody', 'objname': bone.name})\n\n tree.write(fname, pretty_print=True)\n\n def construct_tree(\n self,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create meshes\n asset = tree.getroot().find(\"asset\")\n for bone in self.bones:\n if os.path.exists(f\"{self.geom_dir}/geom/{bone.name}.stl\"):\n attr = {\n \"file\": f\"{self.rel_geom_dir}/geom/{bone.name}.stl\",\n \"name\": f\"{bone.name}_mesh\"\n }\n SubElement(asset, \"mesh\", attr)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n\n joints = worldbody.findall(\".//joint\")\n for joint in joints:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"1\"\n SubElement(actuators, \"motor\", attr)\n\n # create exclude contacts\n c_node = tree.getroot().find(\"contact\")\n for bname1, bname2 in self.exclude_contacts:\n attr = {\"body1\": bname1, \"body2\": bname2}\n SubElement(c_node, \"exclude\", attr)\n # create equalities\n eq_node = tree.getroot().find(\"equality\")\n for eq_joints in self.equalities.values():\n for j1 in range(len(eq_joints) - 1):\n for j2 in range(j1 + 1, len(eq_joints)):\n jname1, jcoeff1 = eq_joints[j1]\n jname2, jcoeff2 = eq_joints[j2]\n coeff = jcoeff1 / jcoeff2\n attr = {\n \"joint1\": jname1,\n \"joint2\": jname2,\n \"polycoef\": f\"0 {coeff:.6f} 0 0 0\",\n }\n SubElement(eq_node, \"joint\", attr)\n return tree\n\n def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):\n attr = dict()\n attr[\"name\"] = bone.name\n attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n quat = quaternion_from_matrix(bone.orient)\n attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*quat)\n node = SubElement(parent_node, \"body\", attr)\n\n # write joints\n if bone.parent is None:\n j_attr = dict()\n j_attr[\"name\"] = bone.name\n # j_attr[\"limited\"] = \"false\"\n # j_attr[\"type\"] = \"free\"\n # j_attr[\"armature\"] = \"0.02\"\n # j_attr[\"damping\"] = \"50\"\n # j_attr[\"stiffness\"] = \"500\"\n # j_attr[\"frictionloss\"] = \"0\"\n \n SubElement(node, \"freejoint\", j_attr)\n else:\n\n for i in range(len(bone.channels)):\n ind = bone.dof_index[i]\n axis = bone.orient[:, ind]\n j_attr = dict()\n \n \n j_attr[\"name\"] = bone.name + \"_\" + bone.channels[i]\n j_attr[\"type\"] = \"hinge\"\n j_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos +\n offset))\n j_attr[\"axis\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*axis)\n\n\n j_attr[\"stiffness\"] = str(GAINS[bone.name][0])\n j_attr[\"damping\"] = str(GAINS[bone.name][1])\n j_attr[\"armature\"] = \"0.02\"\n \n if i < len(bone.lb):\n j_attr[\"range\"] = \"{0:.4f} {1:.4f}\".format(\n bone.lb[i], bone.ub[i])\n else:\n j_attr[\"range\"] = \"-180.0 180.0\"\n if j_attr[\"name\"] in ref_angles.keys():\n j_attr[\"ref\"] = f\"{ref_angles[j_attr['name']]:.1f}\"\n SubElement(node, \"joint\", j_attr)\n\n # write sites\n for s_name, s_pos, s_quat in bone.sites:\n s_attr = {\"name\": s_name}\n s_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(s_pos + offset))\n s_attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*s_quat)\n s_attr[\"type\"] = \"sphere\"\n s_attr[\"size\"] = \"0.03\"\n SubElement(node, \"site\", s_attr)\n\n # write geometry\n geom_path = f\"{self.geom_dir}/geom/{bone.name}.stl\"\n \n if not self.simple_geom:\n assert os.path.exists(geom_path)\n if os.path.exists(geom_path):\n g_attr = {\"type\": \"mesh\", \"mesh\": f\"{bone.name}_mesh\"}\n if bone.name in self.collision_groups.keys():\n g_attr[\"density\"] = \"900\"\n # g_attr[\"density\"] = \"400\"\n # g_attr[\"density\"] = \"1000\"\n \n g_attr[\"contype\"] = str(self.collision_groups[bone.name])\n g_attr[\"conaffinity\"] = str(self.conaffinity[bone.name])\n\n # g_attr[\"solimp\"] = \"0.9 0.95 0.001 0.5 2\"\n # g_attr[\"solref\"] = \"0.02 1\"\n # g_attr[\"size\"] = str(10)\n # g_attr[\"friction\"] = \"0.000000000005 0.000000000005 0.1\"\n if not self.color_dict is None:\n g_attr[\"rgba\"] = self.color_dict[bone.name]\n\n # if bone.name in [\"L_Ankle\", \"R_Ankle\", \"L_Toe\", \"R_Toe\"]:\n # g_attr[\"friction\"] = \"5 500 500\"\n # g_attr[\"solimp\"] = \"0.9 0.95 0.001 0.5 2\"\n # g_attr[\"solref\"] = \"0.02 1\"\n # g_attr[\"margin\"] = \"0.0000000000000000001\"\n\n # g_attr[\"solimp\"] = \"0.9 0.99 0.0001 0.5 2\"\n # g_attr[\"solref\"] = \"0.001 0.5\"\n # g_attr[\"condim\"] = \"6\"\n # g_attr[\"friction\"] = \"0 0 0\"\n\n SubElement(node, \"geom\", g_attr)\n else:\n for end in bone.ends:\n g_attr = dict()\n e1 = bone.pos + offset\n e2 = end + offset\n v = e2 - e1\n if np.linalg.norm(v) > 1e-6:\n v /= np.linalg.norm(v)\n e1 += v * 0.02\n e2 -= v * 0.02\n g_attr[\"type\"] = \"capsule\"\n g_attr[\n \"fromto\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f} {4:.4f} {5:.4f}\".format(\n *np.concatenate([e1, e2]))\n else:\n g_attr[\"type\"] = \"sphere\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*bone.pos)\n g_attr[\"size\"] = \"0.0300\" if self.simple_geom else \"0.0100\"\n if not self.simple_geom:\n g_attr[\"contype\"] = \"0\"\n g_attr[\"conaffinity\"] = \"0\"\n elif bone.name in self.collision_groups.keys():\n group = str(self.collision_groups[bone.name])\n g_attr[\"contype\"] = group\n g_attr[\"conaffinity\"] = group\n SubElement(node, \"geom\", g_attr)\n\n # write child bones\n for bone_c in bone.child:\n self.write_xml_bodynode(bone_c, node, offset, ref_angles)" }, { "identifier": "SMPL_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPL_Parser(_SMPL):\n def __init__(self, *args, **kwargs):\n \"\"\"SMPL model constructor\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n data_struct: Strct\n A struct object. If given, then the parameters of the model are\n read from the object. Otherwise, the model tries to read the\n parameters from the given `model_path`. (default = None)\n create_global_orient: bool, optional\n Flag for creating a member variable for the global orientation\n of the body. (default = True)\n global_orient: torch.tensor, optional, Bx3\n The default value for the global orientation variable.\n (default = None)\n create_body_pose: bool, optional\n Flag for creating a member variable for the pose of the body.\n (default = True)\n body_pose: torch.tensor, optional, Bx(Body Joints * 3)\n The default value for the body pose variable.\n (default = None)\n create_betas: bool, optional\n Flag for creating a member variable for the shape space\n (default = True).\n betas: torch.tensor, optional, Bx10\n The default value for the shape member variable.\n (default = None)\n create_transl: bool, optional\n Flag for creating a member variable for the translation\n of the body. (default = True)\n transl: torch.tensor, optional, Bx3\n The default value for the transl variable.\n (default = None)\n dtype: torch.dtype, optional\n The data type for the created variables\n batch_size: int, optional\n The batch size used for creating the member variables\n joint_mapper: object, optional\n An object that re-maps the joints. Useful if one wants to\n re-order the SMPL joints to some other convention (e.g. MSCOCO)\n (default = None)\n gender: str, optional\n Which gender to load\n vertex_ids: dict, optional\n A dictionary containing the indices of the extra vertices that\n will be selected\n \"\"\"\n super(SMPL_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPL_BONE_ORDER_NAMES\n\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"x\", \"y\", \"z\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n\n # self.contype = {\n # 3: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 1: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n # self.conaffinity = {\n # 1: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 3: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n self.zero_pose = torch.zeros(1, 72).float()\n\n self.register_buffer('children_map', self._parents_to_children(self.parents))\n\n def _parents_to_children(self, parents):\n self.SPINE3_IDX = 9\n children = torch.ones_like(parents) * -1\n for i in range(24):\n if parents[i] != -1 and children[parents[i]] < 0:\n children[parents[i]] = i\n\n children[self.SPINE3_IDX] = -3\n children[0] = 3\n children[self.SPINE3_IDX] = SMPL_BONE_ORDER_NAMES.index('Neck')\n return children\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPL_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None, root_trans=None, root_scale=None):\n \"\"\"\n Pose should be batch_size x 72\n \"\"\"\n if pose.shape[1] != 72:\n pose = pose.reshape(-1, 72)\n\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n if th_betas.shape[-1] == 16:\n th_betas = th_betas[:, :10]\n\n batch_size = pose.shape[0]\n \n smpl_output = self.forward(\n betas=th_betas,\n transl=th_trans,\n body_pose=pose[:, 3:],\n global_orient=pose[:, :3],\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints[:, :24]\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n if root_trans is not None:\n if root_scale is None:\n root_scale = torch.ones_like(root_trans[:, 0])\n cur_root_trans = joints[:, [0], :]\n vertices[:] = (vertices - cur_root_trans) * root_scale[:, None, None] + root_trans[:, None, :]\n joints[:] = (joints - cur_root_trans) * root_scale[:, None, None] + root_trans[:, None, :]\n return vertices, joints\n\n def get_offsets(self, betas=torch.zeros(1, 10).float()):\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i] for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]] for i in range(len(names_smpl))\n }\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels, self.joint_range\n\n def get_mesh_offsets(self, zero_pose=None, betas=torch.zeros(1, 10), scale=None, flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n if scale is not None:\n verts *= scale\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr[0].numpy()\n if scale is not None:\n joint_pos *= scale\n joint_offsets = {\n joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )\n\n def get_mesh_offsets_batch(self, betas=torch.zeros(1, 10), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose.repeat(betas.shape[0], 1), th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr\n joint_offsets = {\n joint_names[c]: (joint_pos[:, c] - joint_pos[:, p]) if c > 0 else joint_pos[:, c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n skin_weights = self.lbs_weights\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLH_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLH_Parser(_SMPLH):\n def __init__(self, *args, **kwargs):\n super(SMPLH_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLH_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n L_hand_pose=pose[:, 66:111],\n R_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, betas=torch.zeros(1, 16).float()):\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i] for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]] for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, betas=torch.zeros(1, 16), flatfoot = False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLX_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLX_Parser(_SMPLX):\n def __init__(self, *args, **kwargs):\n super(SMPLX_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n self.joint_to_use = [\n SMPLX_BONE_ORDER_NAMES.index(i) for i in SMPLH_BONE_ORDER_NAMES\n ]\n self.parents_to_use = np.concatenate([np.arange(0, 22), np.arange(25, 55)])\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLX_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n \n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n left_hand_pose=pose[:, 66:111],\n right_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # return vertices, joints\n return vertices, joints\n\n def get_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i] for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]] for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n # joint_names = self.joint_names\n joint_names = SMPLX_BONE_ORDER_NAMES\n verts, Jtr = self.get_joints_verts(self.zero_pose)\n \n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n # print(\n # joint_pos.shape,\n # smpl_joint_parents.shape,\n # len(self.parents_to_use),\n # self.parents.cpu().numpy().shape,\n # )\n joint_offsets = {\n joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n if joint_names[c] in self.joint_names\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n if joint_names[i] in self.joint_names\n }\n\n verts = verts[0].numpy()\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()[:, self.parents_to_use]\n return (\n verts,\n joint_pos,\n skin_weights,\n self.joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "quadric_mesh_decimation", "path": "uhc/utils/geom.py", "snippet": "def quadric_mesh_decimation(fname, reduction_rate, verbose=False):\n reader = vtkSTLReader()\n reader.SetFileName(fname)\n reader.Update()\n inputPoly = reader.GetOutput()\n\n decimate = vtkQuadricDecimation()\n decimate.SetInputData(inputPoly)\n decimate.SetTargetReduction(reduction_rate)\n decimate.Update()\n decimatedPoly = vtkPolyData()\n decimatedPoly.ShallowCopy(decimate.GetOutput())\n\n if verbose:\n print(\n f\"Mesh Decimation: (points, faces) goes from ({inputPoly.GetNumberOfPoints(), inputPoly.GetNumberOfPolys()}) \"\n f\"to ({decimatedPoly.GetNumberOfPoints(), decimatedPoly.GetNumberOfPolys()})\"\n )\n\n stlWriter = vtkSTLWriter()\n stlWriter.SetFileName(fname)\n stlWriter.SetFileTypeToBinary()\n stlWriter.SetInputData(decimatedPoly)\n stlWriter.Write()" }, { "identifier": "flags", "path": "uhc/utils/flags.py", "snippet": "class Flags(object):\n def __init__(self, *items):" } ]
import os import sys import time import argparse import torch import os.path as osp import mujoco_py import numpy as np import math import uuid import atexit import shutil from copy import deepcopy from lxml.etree import XMLParser, parse, Element, SubElement from lxml import etree from io import BytesIO from scipy.spatial import ConvexHull from stl import mesh from mujoco_py import load_model_from_path, MjSim, MjViewer from uhc.khrylib.mocap.skeleton_local import Skeleton from uhc.khrylib.mocap.skeleton_mesh_local import Skeleton as SkeletonMesh from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from uhc.utils.geom import quadric_mesh_decimation from uhc.utils.flags import flags
14,571
self.param_specs["bone_ang"]["lb"] = max( self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"].get("min", -np.inf), ) self.param_specs["bone_ang"]["ub"] = min( self.param_specs["bone_ang"]["ub"], self.param_specs["bone_ang"].get("max", np.inf), ) bone_ang = normalize_range( bone_ang, self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) param_list.append(np.array([bone_ang])) for joint in self.joints: joint.get_params(param_list, get_name, pad_zeros) for geom in self.geoms: geom.get_params(param_list, get_name, pad_zeros) if not get_name: self.param_inited = True if demap_params and not get_name and len(param_list) > 0: params = self.robot.demap_params(np.concatenate(param_list)) return params def set_params(self, params, pad_zeros=False, map_params=False): if map_params: params = self.robot.map_params(params) if self.bone_offset is not None and "offset" in self.param_specs: if self.param_specs["offset"]["type"] in {"xz", "xy"}: offset = denormalize_range( params[:2], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 if self.param_specs["offset"]["type"] == "xz": self.bone_offset[[0, 2]] = offset elif self.param_specs["offset"]["type"] == "xy": self.bone_offset[[0, 1]] = offset params = params[2:] else: offset = denormalize_range( params[:3], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 self.bone_offset[:] = offset params = params[3:] if self.bone_offset is not None and "bone_len" in self.param_specs: bone_len = denormalize_range( params[0].item(), self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"]["ub"], ) bone_len = max(bone_len, 1e-4) params = params[1:] elif self.bone_offset is not None: bone_len = np.linalg.norm(self.bone_offset) if self.bone_offset is not None and "bone_ang" in self.param_specs: bone_ang = denormalize_range( params[0].item(), self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) params = params[1:] elif self.bone_offset is not None: bone_ang = math.atan2(self.bone_offset[2], self.bone_offset[0]) if "bone_len" in self.param_specs or "bone_ang" in self.param_specs: self.bone_offset = np.array( [bone_len * math.cos(bone_ang), 0, bone_len * math.sin(bone_ang)] ) for joint in self.joints: params = joint.set_params(params, pad_zeros) for geom in self.geoms: params = geom.set_params(params, pad_zeros) # rebuild bone, geom, joint self.rebuild() return params class Robot: def __init__(self, cfg, data_dir="data/smpl", model_xml_path=None, masterfoot=False, create_default_skeleton=False, clean_up=False): self.bodies = [] self.weight = 0 self.height = 0 self.cfg = cfg if model_xml_path is not None: self.set_model_xml_path(model_xml_path) else: self.model_xml_path = None self.param_mapping = cfg.get("param_mapping", "clip") self.smpl_model = cfg.get("model", "smpl") self.mesh = cfg.get("mesh", False) self.gender = cfg.get("gender", "neutral") self.flatfoot = cfg.get("flatfoot", True) self.rel_joint_lm = cfg.get( "rel_joint_lm", True ) # Rolling this out worldwide!! self.masterfoot = masterfoot self.param_specs = self.cfg.get("body_params", {}) self.hull_dict = {} self.beta = ( torch.zeros((1, 10)).float() if self.smpl_model == "smpl" else torch.zeros((1, 16)).float() ) if self.smpl_model == "smpl":
sys.path.append(os.getcwd()) def parse_vec(string): return np.fromstring(string, sep=" ") def parse_fromto(string): fromto = np.fromstring(string, sep=" ") return fromto[:3], fromto[3:] def normalize_range(value, lb, ub): return (value - lb) / (ub - lb) * 2 - 1 def denormalize_range(value, lb, ub): return (value + 1) * 0.5 * (ub - lb) + lb def vec_to_polar(v): phi = math.atan2(v[1], v[0]) theta = math.acos(v[2]) return np.array([theta, phi]) def polar_to_vec(p): v = np.zeros(3) v[0] = math.sin(p[0]) * math.cos(p[1]) v[1] = math.sin(p[0]) * math.sin(p[1]) v[2] = math.cos(p[0]) return v def in_hull(hull, queries): tolerance = 1e-3 if len(queries.shape) == 1: queries = queries[ None, ] return np.all( np.add(np.dot(queries, hull.equations[:, :-1].T), hull.equations[:, -1]) <= tolerance, axis=1, ) def get_joint_geometries( smpl_verts, smpl_jts, skin_weights, joint_names, geom_dir, scale_dict={}, suffix = None, verbose=False, min_num_vert = 50, ): vert_to_joint = skin_weights.argmax(axis=1) hull_dict = {} # create joint geometries os.makedirs(geom_dir, exist_ok=True) for jind, jname in enumerate(joint_names): vind = np.where(vert_to_joint == jind)[0] if len(vind) == 0: print(f"{jname} has no vertices!") continue vert = (smpl_verts[vind] - smpl_jts[jind]) * scale_dict.get(jname, 1) hull = ConvexHull(vert) norm_verts = vert - smpl_jts[jind] norm_hull = ConvexHull(norm_verts) hull_dict[jname] = { "norm_hull": norm_hull, "norm_verts": norm_verts, "verts": vert, "hull": hull, } # print(jname, hull.simplices.shape[0]) center = vert[hull.vertices].mean(axis=0) jgeom = mesh.Mesh(np.zeros(hull.simplices.shape[0], dtype=mesh.Mesh.dtype)) for i, f in enumerate(hull.simplices): for j in range(3): jgeom.vectors[i][j] = vert[f[j], :] # check if the face's normal is facing outward normal = np.cross( jgeom.vectors[i][1] - jgeom.vectors[i][0], jgeom.vectors[i][2] - jgeom.vectors[i][0], ) out_vec = jgeom.vectors[i].mean(axis=0) - center if np.dot(normal, out_vec) < 0: jgeom.vectors[i] = jgeom.vectors[i][[0, 2, 1]] # flip the face if suffix is None: fname = f"{geom_dir}/{jname}.stl" else: fname = f"{geom_dir}/{jname}_{suffix}.stl" jgeom.save(fname) # mesh simplification with vtk # min_num_vert = 50 min_num_vert = 50 cur_num_vert = len(hull.vertices) reduction_rate = min(0.9, 1.0 - min_num_vert / cur_num_vert) quadric_mesh_decimation(fname, reduction_rate, verbose=verbose) return hull_dict class Joint: def __init__(self, node, body): self.node = node self.body = body self.cfg = body.cfg self.local_coord = body.local_coord self.name = node.attrib["name"] self.type = node.attrib["type"] if "type" in node.attrib else "free" if self.type == "hinge": self.range = np.deg2rad(parse_vec(node.attrib.get("range", "-360 360"))) actu_node = ( body.tree.getroot().find("actuator").find(f'motor[@joint="{self.name}"]') ) if actu_node is not None: self.actuator = Actuator(actu_node, self) else: self.actuator = None self.parse_param_specs() self.param_inited = False # tunable parameters self.pos = parse_vec("0 0 0") if self.type == "hinge": self.axis = vec_to_polar(parse_vec(node.attrib["axis"])) if self.local_coord: self.pos += body.pos self.damping = ( parse_vec(node.attrib["damping"]) if "damping" in node.attrib else np.array([0]) ) self.stiffness = ( parse_vec(node.attrib["stiffness"]) if "stiffness" in node.attrib else np.array([0]) ) self.armature = ( parse_vec(node.attrib["armature"]) if "armature" in node.attrib else np.array([0.01]) ) self.frictionloss = ( parse_vec(node.attrib["frictionloss"]) if "frictionloss" in node.attrib else np.array([0]) ) # import ipdb; ipdb.set_trace() # assert np.all(self.pos == body.pos) def __repr__(self): return "joint_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("joint_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) def sync_node(self, rename=False, index=0): pos = self.pos - self.body.pos if self.local_coord else self.pos if rename: self.name = self.body.name + "_joint_" + str(index) self.node.attrib["name"] = self.name if self.type == "hinge": axis_vec = polar_to_vec(self.axis) self.node.attrib["axis"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in axis_vec] ) self.node.attrib["pos"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in pos] ) self.node.attrib["damping"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.damping] ) self.node.attrib["stiffness"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.stiffness] ) self.node.attrib["armature"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.armature] ) elif self.type == "free": pass if self.actuator is not None: self.actuator.sync_node() # if self.name != "Pelvis": # self.node.attrib["frictionloss"] = " ".join( # [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.frictionloss] # ) # if np.sum([self.name.startswith(i) for i in ["L_Knee", "R_Knee", "L_Ankle", "R_Ankle", "L_Toe", "R_Toe"]]): # self.node.attrib["frictionloss"] = "500" # self.node.attrib["stiffness"] = "5" # self.node.attrib["damping"] = "5" # if self.name != "Pelvis": # self.node.attrib["frictionloss"] = "5000" def get_params(self, param_list, get_name=False, pad_zeros=False): if "axis" in self.param_specs: if self.type == "hinge": if get_name: param_list += ["axis_theta", "axis_phi"] else: axis = normalize_range( self.axis, np.array([0, -2 * np.pi]), np.array([np.pi, 2 * np.pi]), ) param_list.append(axis) elif pad_zeros: param_list.append(np.zeros(2)) if self.actuator is not None: self.actuator.get_params(param_list, get_name) elif pad_zeros: param_list.append( np.zeros(3 if self.type == "free" else 1) ) # ZL currently a workaround for supporting 3D joints if "damping" in self.param_specs: if get_name: param_list.append("damping") else: if not self.param_inited and self.param_specs["damping"].get( "rel", False ): self.param_specs["damping"]["lb"] += self.damping self.param_specs["damping"]["ub"] += self.damping self.param_specs["damping"]["lb"] = max( self.param_specs["damping"]["lb"], self.param_specs["damping"].get("min", -np.inf), ) self.param_specs["damping"]["ub"] = min( self.param_specs["damping"]["ub"], self.param_specs["damping"].get("max", np.inf), ) damping = normalize_range( self.damping, self.param_specs["damping"]["lb"], self.param_specs["damping"]["ub"], ) param_list.append(damping.flatten()) if "armature" in self.param_specs: if get_name: param_list.append("armature") else: if not self.param_inited and self.param_specs["armature"].get( "rel", False ): self.param_specs["armature"]["lb"] += self.armature self.param_specs["armature"]["ub"] += self.armature self.param_specs["armature"]["lb"] = max( self.param_specs["armature"]["lb"], self.param_specs["armature"].get("min", -np.inf), ) self.param_specs["armature"]["ub"] = min( self.param_specs["armature"]["ub"], self.param_specs["armature"].get("max", np.inf), ) armature = normalize_range( self.armature, self.param_specs["armature"]["lb"], self.param_specs["armature"]["ub"], ) param_list.append(armature.flatten()) if "stiffness" in self.param_specs: if get_name: param_list.append("stiffness") else: if not self.param_inited and self.param_specs["stiffness"].get( "rel", False ): self.param_specs["stiffness"]["lb"] += self.stiffness self.param_specs["stiffness"]["ub"] += self.stiffness self.param_specs["stiffness"]["lb"] = max( self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"].get("min", -np.inf), ) self.param_specs["stiffness"]["ub"] = min( self.param_specs["stiffness"]["ub"], self.param_specs["stiffness"].get("max", np.inf), ) stiffness = normalize_range( self.stiffness, self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"]["ub"], ) param_list.append(stiffness.flatten()) if "frictionloss" in self.param_specs: if get_name: param_list.append("frictionloss") else: if not self.param_inited and self.param_specs["frictionloss"].get( "rel", False ): self.param_specs["frictionloss"]["lb"] += self.frictionloss self.param_specs["frictionloss"]["ub"] += self.frictionloss self.param_specs["frictionloss"]["lb"] = max( self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"].get("min", -np.inf), ) self.param_specs["frictionloss"]["ub"] = min( self.param_specs["frictionloss"]["ub"], self.param_specs["frictionloss"].get("max", np.inf), ) frictionloss = normalize_range( self.frictionloss, self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"]["ub"], ) param_list.append(frictionloss.flatten()) if not get_name: self.param_inited = True # import ipdb; ipdb.set_trace() def set_params(self, params, pad_zeros=False): if "axis" in self.param_specs: if self.type == "hinge": self.axis = denormalize_range( params[:2], np.array([0, -2 * np.pi]), np.array([np.pi, 2 * np.pi]) ) params = params[2:] elif pad_zeros: params = params[2:] if self.actuator is not None: params = self.actuator.set_params(params) elif pad_zeros: params = params[1:] # Order of this matters!!! Should always be damping, aramature, stiffness (the order they are read) if "damping" in self.param_specs: self.damping = denormalize_range( params[[0]], self.param_specs["damping"]["lb"], self.param_specs["damping"]["ub"], ) params = params[1:] if "armature" in self.param_specs: self.armature = denormalize_range( params[[0]], self.param_specs["armature"]["lb"], self.param_specs["armature"]["ub"], ) params = params[1:] if "stiffness" in self.param_specs: self.stiffness = denormalize_range( params[[0]], self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"]["ub"], ) params = params[1:] if "frictionloss" in self.param_specs: self.frictionloss = denormalize_range( params[[0]], self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"]["ub"], ) params = params[1:] return params class Geom: def __init__(self, node, body): self.node = node self.body = body self.cfg = body.cfg self.local_coord = body.local_coord self.name = node.attrib.get("name", "") self.type = node.attrib["type"] self.density = ( parse_vec(node.attrib["density"]) / 1000 if "density" in node.attrib else np.array([1]) ) self.parse_param_specs() self.param_inited = False # tunable parameters # self.size = ( # parse_vec(node.attrib["size"]) if "size" in node.attrib else np.array([0]) # ) self.size = ( parse_vec(node.attrib["size"]) if "size" in node.attrib else np.array([1, 1, 1]) ) if self.type == "box": self.start = self.end = self.pos = parse_vec(node.attrib["pos"]) self.pos_delta = np.array([0, 0, 0]) self.rot = parse_vec(node.attrib["quat"]) elif self.type == "sphere": self.pos_delta = np.array([0, 0, 0]) self.start = self.end = self.pos = parse_vec(node.attrib["pos"]) elif self.type == "capsule": self.start, self.end = parse_fromto(node.attrib["fromto"]) elif self.type == "mesh": self.start, self.end = body.pos.copy(), body.pos.copy() if self.local_coord: self.start += body.pos self.end += body.pos if body.bone_start is None: self.bone_start = self.start.copy() body.bone_start = self.bone_start.copy() else: self.bone_start = body.bone_start.copy() self.ext_start = np.linalg.norm( self.bone_start - self.start ) ## Geom extension from bone start def __repr__(self): return "geom_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("geom_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): if self.type == "box": specs["lb"] = np.array([specs["lb"]] * 3) elif self.type == "capsule": specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): if self.type == "box": specs["lb"] = np.array([specs["lb"]] * 3) elif self.type == "capsule": specs["lb"] = np.array(specs["lb"]) def update_start(self): if self.type == "capsule": vec = self.bone_start - self.end self.start = self.bone_start + vec * (self.ext_start / np.linalg.norm(vec)) def sync_node(self): # self.node.attrib['name'] = self.name self.node.attrib.pop("name", None) if not self.size is None: self.node.attrib["size"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.size] ) self.node.attrib["density"] = " ".join( [f"{x * 1000:.6f}".rstrip("0").rstrip(".") for x in self.density] ) # if self.type == "capsule": # start = self.start - self.body.pos if self.local_coord else self.start # end = self.end - self.body.pos if self.local_coord else self.end # self.node.attrib["fromto"] = " ".join( # [ # f"{x:.6f}".rstrip("0").rstrip(".") # for x in np.concatenate([start, end]) # ] # ) # elif self.type == "box" or self.type == "sphere": # # self.node.attrib["pos"] = " ".join( # # [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.pos + self.pos_delta] # # ) # import ipdb; ipdb.set_trace() # pass def get_params(self, param_list, get_name=False, pad_zeros=False): if "size" in self.param_specs: if get_name: param_list.append("size") else: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh" ): if not self.param_inited and self.param_specs["size"].get( "rel", False ): self.param_specs["size"]["lb"] += self.size self.param_specs["size"]["ub"] += self.size self.param_specs["size"]["lb"] = max( self.param_specs["size"]["lb"], self.param_specs["size"].get("min", -np.inf), ) self.param_specs["size"]["ub"] = min( self.param_specs["size"]["ub"], self.param_specs["size"].get("max", np.inf), ) size = normalize_range( self.size, self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) param_list.append(size.flatten()) if pad_zeros and self.type == "capsule": param_list.append( np.zeros(2) ) # capsule has needs to be 3 for GNN elif pad_zeros: param_list.append(np.zeros(self.size.shape)) if "ext_start" in self.param_specs: if get_name: param_list.append("ext_start") else: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" ): if not self.param_inited and self.param_specs["ext_start"].get( "rel", False ): self.param_specs["ext_start"]["lb"] += self.ext_start self.param_specs["ext_start"]["ub"] += self.ext_start self.param_specs["ext_start"]["lb"] = max( self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"].get("min", -np.inf), ) self.param_specs["ext_start"]["ub"] = min( self.param_specs["ext_start"]["ub"], self.param_specs["ext_start"].get("max", np.inf), ) ext_start = normalize_range( self.ext_start, self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"]["ub"], ) param_list.append(ext_start.flatten()) elif pad_zeros: param_list.append(np.zeros(self.size.shape)) if "density" in self.param_specs: if get_name: param_list.append("density") else: if not self.param_inited and self.param_specs["density"].get( "rel", False ): self.param_specs["density"]["lb"] += self.density self.param_specs["density"]["ub"] += self.density self.param_specs["density"]["lb"] = max( self.param_specs["density"]["lb"], self.param_specs["density"].get("min", -np.inf), ) self.param_specs["density"]["ub"] = min( self.param_specs["density"]["ub"], self.param_specs["density"].get("max", np.inf), ) density = normalize_range( self.density, self.param_specs["density"]["lb"], self.param_specs["density"]["ub"], ) param_list.append(density.flatten()) # if pad_zeros: # param_list.append(np.zeros(self.density.shape)) if "pos_delta" in self.param_specs: if get_name: param_list.append("pos_delta") else: if self.type == "box" or self.type == "sphere": if not self.param_inited and self.param_specs["pos_delta"].get( "rel", False ): self.param_specs["pos_delta"]["lb"] += self.density self.param_specs["pos_delta"]["ub"] += self.density self.param_specs["pos_delta"]["lb"] = max( self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"].get("min", -np.inf), ) self.param_specs["pos_delta"]["ub"] = min( self.param_specs["pos_delta"]["ub"], self.param_specs["pos_delta"].get("max", np.inf), ) pos_delta = normalize_range( self.pos_delta, self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"]["ub"], ) param_list.append(pos_delta.flatten()) elif pad_zeros: param_list.append(np.zeros(3)) if not get_name: self.param_inited = True def set_params(self, params, pad_zeros=False): if "size" in self.param_specs: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh" ): if len(self.size) == 1: self.size = denormalize_range( params[[0]], self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) params = params[1:] elif len(self.size) == 3: self.size = denormalize_range( np.array(params[:3]), self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) params = params[3:] elif pad_zeros: params = params[1:] if "ext_start" in self.param_specs: if self.type == "capsule" or self.type == "box" or self.type == "sphere": self.ext_start = denormalize_range( params[[0]], self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"]["ub"], ) params = params[1:] elif pad_zeros: params = params[1:] if "density" in self.param_specs: if ( self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh" ): self.density = denormalize_range( params[[0]], self.param_specs["density"]["lb"], self.param_specs["density"]["ub"], ) params = params[1:] elif pad_zeros: params = params[1:] if "pos_delta" in self.param_specs: if self.type == "box" or self.type == "sphere": self.pos_delta = denormalize_range( np.array(params[:3]), self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"]["ub"], ) params = params[3:] elif pad_zeros: params = params[3:] return params class Actuator: def __init__(self, node, joint): self.node = node self.joint = joint self.cfg = joint.cfg self.joint_name = node.attrib["joint"] self.name = self.joint_name self.parse_param_specs() self.param_inited = False # tunable parameters self.gear = float(node.attrib["gear"]) def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("actuator_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) def sync_node(self): self.node.attrib["gear"] = f"{self.gear:.6f}".rstrip("0").rstrip(".") self.name = self.joint.name self.node.attrib["name"] = self.name self.node.attrib["joint"] = self.joint.name def get_params(self, param_list, get_name=False): if "gear" in self.param_specs: if get_name: param_list.append("gear") else: if not self.param_inited and self.param_specs["gear"].get("rel", False): self.param_specs["gear"]["lb"] += self.gear self.param_specs["gear"]["ub"] += self.gear self.param_specs["gear"]["lb"] = max( self.param_specs["gear"]["lb"], self.param_specs["gear"].get("min", -np.inf), ) self.param_specs["gear"]["ub"] = min( self.param_specs["gear"]["ub"], self.param_specs["gear"].get("max", np.inf), ) gear = normalize_range( self.gear, self.param_specs["gear"]["lb"], self.param_specs["gear"]["ub"], ) param_list.append(np.array([gear])) if not get_name: self.param_inited = True def set_params(self, params): if "gear" in self.param_specs: self.gear = denormalize_range( params[0].item(), self.param_specs["gear"]["lb"], self.param_specs["gear"]["ub"], ) params = params[1:] return params class Body: def __init__(self, node, parent_body, robot, cfg, new_body=False): self.node = node self.parent = parent_body self.new_body = new_body if parent_body is not None: parent_body.child.append(self) parent_body.cind += 1 self.depth = parent_body.depth + 1 else: self.depth = 0 self.robot = robot self.cfg = cfg self.tree = robot.tree self.local_coord = robot.local_coord self.name = ( node.attrib["name"] if "name" in node.attrib else self.parent.name + f"_child{len(self.parent.child)}" ) self.child = [] self.cind = 0 self.pos = parse_vec(node.attrib["pos"]) if self.local_coord and parent_body is not None: self.pos += parent_body.pos if cfg.get("init_root_from_geom", False): self.bone_start = None if parent_body is None else self.pos.copy() else: self.bone_start = self.pos.copy() self.joints = [Joint(x, self) for x in node.findall('joint[@type="hinge"]')] + \ [Joint(x, self) for x in node.findall('joint[@type="free"]')] + \ [Joint(x, self) for x in node.findall('freejoint')] # self.geoms = [Geom(x, self) for x in node.findall('geom[@type="capsule"]')] supported_geoms = self.cfg.get("supported_geoms", ["capsule", "box"]) self.geoms = [ Geom(x, self) for geom_type in supported_geoms for x in node.findall(f'geom[@type="{geom_type}"]') ] # self.geoms = [Geom(x, self) for x in node.findall('geom[@type="capsule"]')] + [Geom(x, self) for x in node.findall('geom[@type="sphere"]')] + [Geom(x, self) for x in node.findall('geom[@type="box"]')] self.parse_param_specs() self.param_inited = False # parameters self.bone_end = None self.bone_offset = None def __repr__(self): return "body_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("body_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) if name == "bone_ang": specs["lb"] = np.deg2rad(specs["lb"]) specs["ub"] = np.deg2rad(specs["ub"]) def reindex(self): if self.parent is None: self.index = "0" else: ind = self.parent.child.index(self) + 1 pname = "" if self.parent.index == "0" else self.parent.index self.index = str(ind) + pname if self.new_body: self.name = self.index def init(self): if len(self.child) > 0: bone_ends = [x.bone_start for x in self.child] else: bone_ends = [x.end for x in self.geoms] if len(bone_ends) > 0: self.bone_end = np.mean(np.stack(bone_ends), axis=0) self.bone_offset = self.bone_end - self.bone_start def get_actuator_name(self): for joint in self.joints: if joint.actuator is not None: return joint.actuator.name def get_joint_range(self): assert len(self.joints) == 1 return self.joints[0].range def sync_node(self): pos = ( self.pos - self.parent.pos if self.local_coord and self.parent is not None else self.pos ) self.node.attrib["name"] = self.name self.node.attrib["pos"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in pos] ) for idx, joint in enumerate(self.joints): joint.sync_node(rename=self.new_body, index=idx) for geom in self.geoms: geom.sync_node() def sync_geom(self): for geom in self.geoms: geom.bone_start = self.bone_start.copy() # geom.end = self.bone_end.copy() # geom.update_start() def sync_joint(self): if self.parent is not None: for joint in self.joints: joint.pos = self.pos.copy() def rebuild(self): if self.parent is not None: # self.bone_start = self.parent.bone_end.copy() self.pos = self.bone_start.copy() if self.bone_offset is not None: self.bone_end = self.bone_start + self.bone_offset if self.parent is None and self.cfg.get("no_root_offset", False): self.bone_end = self.bone_start self.sync_geom() self.sync_joint() def get_params( self, param_list, get_name=False, pad_zeros=False, demap_params=False ): if self.bone_offset is not None and "offset" in self.param_specs: if get_name: if self.param_specs["offset"]["type"] == "xz": param_list += ["offset_x", "offset_z"] elif self.param_specs["offset"]["type"] == "xy": param_list += ["offset_x", "offset_y"] else: param_list += ["offset_x", "offset_y", "offset_z"] else: if self.param_specs["offset"]["type"] == "xz": offset = self.bone_offset[[0, 2]] elif self.param_specs["offset"]["type"] == "xy": offset = self.bone_offset[[0, 1]] else: offset = self.bone_offset if not self.param_inited and self.param_specs["offset"].get( "rel", False ): self.param_specs["offset"]["lb"] += offset self.param_specs["offset"]["ub"] += offset self.param_specs["offset"]["lb"] = np.maximum( self.param_specs["offset"]["lb"], self.param_specs["offset"].get( "min", np.full_like(offset, -np.inf) ), ) self.param_specs["offset"]["ub"] = np.minimum( self.param_specs["offset"]["ub"], self.param_specs["offset"].get( "max", np.full_like(offset, np.inf) ), ) offset = normalize_range( offset, self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) param_list.append(offset.flatten()) if self.bone_offset is not None and "bone_len" in self.param_specs: if get_name: param_list += ["bone_len"] else: bone_len = np.linalg.norm(self.bone_offset) if not self.param_inited and self.param_specs["bone_len"].get( "rel", False ): self.param_specs["bone_len"]["lb"] += bone_len self.param_specs["bone_len"]["ub"] += bone_len self.param_specs["bone_len"]["lb"] = max( self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"].get("min", -np.inf), ) self.param_specs["bone_len"]["ub"] = min( self.param_specs["bone_len"]["ub"], self.param_specs["bone_len"].get("max", np.inf), ) bone_len = normalize_range( bone_len, self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"]["ub"], ) param_list.append(np.array([bone_len])) if self.bone_offset is not None and "bone_ang" in self.param_specs: if get_name: param_list += ["bone_ang"] else: bone_ang = math.atan2(self.bone_offset[2], self.bone_offset[0]) if not self.param_inited and self.param_specs["bone_ang"].get( "rel", False ): self.param_specs["bone_ang"]["lb"] += bone_ang self.param_specs["bone_ang"]["ub"] += bone_ang self.param_specs["bone_ang"]["lb"] = max( self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"].get("min", -np.inf), ) self.param_specs["bone_ang"]["ub"] = min( self.param_specs["bone_ang"]["ub"], self.param_specs["bone_ang"].get("max", np.inf), ) bone_ang = normalize_range( bone_ang, self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) param_list.append(np.array([bone_ang])) for joint in self.joints: joint.get_params(param_list, get_name, pad_zeros) for geom in self.geoms: geom.get_params(param_list, get_name, pad_zeros) if not get_name: self.param_inited = True if demap_params and not get_name and len(param_list) > 0: params = self.robot.demap_params(np.concatenate(param_list)) return params def set_params(self, params, pad_zeros=False, map_params=False): if map_params: params = self.robot.map_params(params) if self.bone_offset is not None and "offset" in self.param_specs: if self.param_specs["offset"]["type"] in {"xz", "xy"}: offset = denormalize_range( params[:2], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 if self.param_specs["offset"]["type"] == "xz": self.bone_offset[[0, 2]] = offset elif self.param_specs["offset"]["type"] == "xy": self.bone_offset[[0, 1]] = offset params = params[2:] else: offset = denormalize_range( params[:3], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 self.bone_offset[:] = offset params = params[3:] if self.bone_offset is not None and "bone_len" in self.param_specs: bone_len = denormalize_range( params[0].item(), self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"]["ub"], ) bone_len = max(bone_len, 1e-4) params = params[1:] elif self.bone_offset is not None: bone_len = np.linalg.norm(self.bone_offset) if self.bone_offset is not None and "bone_ang" in self.param_specs: bone_ang = denormalize_range( params[0].item(), self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) params = params[1:] elif self.bone_offset is not None: bone_ang = math.atan2(self.bone_offset[2], self.bone_offset[0]) if "bone_len" in self.param_specs or "bone_ang" in self.param_specs: self.bone_offset = np.array( [bone_len * math.cos(bone_ang), 0, bone_len * math.sin(bone_ang)] ) for joint in self.joints: params = joint.set_params(params, pad_zeros) for geom in self.geoms: params = geom.set_params(params, pad_zeros) # rebuild bone, geom, joint self.rebuild() return params class Robot: def __init__(self, cfg, data_dir="data/smpl", model_xml_path=None, masterfoot=False, create_default_skeleton=False, clean_up=False): self.bodies = [] self.weight = 0 self.height = 0 self.cfg = cfg if model_xml_path is not None: self.set_model_xml_path(model_xml_path) else: self.model_xml_path = None self.param_mapping = cfg.get("param_mapping", "clip") self.smpl_model = cfg.get("model", "smpl") self.mesh = cfg.get("mesh", False) self.gender = cfg.get("gender", "neutral") self.flatfoot = cfg.get("flatfoot", True) self.rel_joint_lm = cfg.get( "rel_joint_lm", True ) # Rolling this out worldwide!! self.masterfoot = masterfoot self.param_specs = self.cfg.get("body_params", {}) self.hull_dict = {} self.beta = ( torch.zeros((1, 10)).float() if self.smpl_model == "smpl" else torch.zeros((1, 16)).float() ) if self.smpl_model == "smpl":
self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral", create_transl=False)
2
2023-10-30 20:43:43+00:00
24k
masked-spacetime-hashing/msth
nerfstudio/data/datamanagers/base_datamanager.py
[ { "identifier": "CameraOptimizerConfig", "path": "nerfstudio/cameras/camera_optimizers.py", "snippet": "class CameraOptimizerConfig(InstantiateConfig):\n \"\"\"Configuration of optimization for camera poses.\"\"\"\n\n _target: Type = field(default_factory=lambda: CameraOptimizer)\n\n mode: Literal[\"off\", \"SO3xR3\", \"SE3\"] = \"off\"\n \"\"\"Pose optimization strategy to use. If enabled, we recommend SO3xR3.\"\"\"\n\n position_noise_std: float = 0.0\n \"\"\"Noise to add to initial positions. Useful for debugging.\"\"\"\n\n orientation_noise_std: float = 0.0\n \"\"\"Noise to add to initial orientations. Useful for debugging.\"\"\"\n\n optimizer: AdamOptimizerConfig = AdamOptimizerConfig(lr=6e-4, eps=1e-15)\n \"\"\"ADAM parameters for camera optimization.\"\"\"\n\n scheduler: SchedulerConfig = ExponentialDecaySchedulerConfig(max_steps=10000)\n \"\"\"Learning rate scheduler for camera optimizer..\"\"\"\n\n param_group: tyro.conf.Suppress[str] = \"camera_opt\"\n \"\"\"Name of the parameter group used for pose optimization. Can be any string that doesn't conflict with other\n groups.\"\"\"" }, { "identifier": "CameraType", "path": "nerfstudio/cameras/cameras.py", "snippet": "class CameraType(Enum):\n \"\"\"Supported camera types.\"\"\"\n\n PERSPECTIVE = auto()\n FISHEYE = auto()\n EQUIRECTANGULAR = auto()" }, { "identifier": "RayBundle", "path": "nerfstudio/cameras/rays.py", "snippet": "class RayBundle(TensorDataclass):\n \"\"\"A bundle of ray parameters.\"\"\"\n\n # TODO(ethan): make sure the sizes with ... are correct\n origins: TensorType[..., 3]\n \"\"\"Ray origins (XYZ)\"\"\"\n directions: TensorType[..., 3]\n \"\"\"Unit ray direction vector\"\"\"\n pixel_area: TensorType[..., 1]\n \"\"\"Projected area of pixel a distance 1 away from origin\"\"\"\n camera_indices: Optional[TensorType[..., 1]] = None\n \"\"\"Camera indices\"\"\"\n nears: Optional[TensorType[..., 1]] = None\n \"\"\"Distance along ray to start sampling\"\"\"\n fars: Optional[TensorType[..., 1]] = None\n \"\"\"Rays Distance along ray to stop sampling\"\"\"\n metadata: Optional[Dict[str, TensorType[\"num_rays\", \"latent_dims\"]]] = None\n \"\"\"Additional metadata or data needed for interpolation, will mimic shape of rays\"\"\"\n times: Optional[TensorType[..., 1]] = None\n \"\"\"Times at which rays are sampled\"\"\"\n\n def set_camera_indices(self, camera_index: int) -> None:\n \"\"\"Sets all of the the camera indices to a specific camera index.\n\n Args:\n camera_index: Camera index.\n \"\"\"\n self.camera_indices = torch.ones_like(self.origins[..., 0:1]).long() * camera_index\n\n def __len__(self) -> int:\n num_rays = torch.numel(self.origins) // self.origins.shape[-1]\n return num_rays\n\n def sample(self, num_rays: int) -> \"RayBundle\":\n \"\"\"Returns a RayBundle as a subset of rays.\n\n Args:\n num_rays: Number of rays in output RayBundle\n\n Returns:\n RayBundle with subset of rays.\n \"\"\"\n assert num_rays <= len(self)\n indices = random.sample(range(len(self)), k=num_rays)\n return self[indices]\n\n def get_row_major_sliced_ray_bundle(self, start_idx: int, end_idx: int) -> \"RayBundle\":\n \"\"\"Flattens RayBundle and extracts chunk given start and end indices.\n\n Args:\n start_idx: Start index of RayBundle chunk.\n end_idx: End index of RayBundle chunk.\n\n Returns:\n Flattened RayBundle with end_idx-start_idx rays.\n\n \"\"\"\n return self.flatten()[start_idx:end_idx]\n\n def get_ray_samples(\n self,\n bin_starts: TensorType[\"bs\":..., \"num_samples\", 1],\n bin_ends: TensorType[\"bs\":..., \"num_samples\", 1],\n spacing_starts: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_ends: Optional[TensorType[\"bs\":..., \"num_samples\", 1]] = None,\n spacing_to_euclidean_fn: Optional[Callable] = None,\n ) -> RaySamples:\n \"\"\"Produces samples for each ray by projection points along the ray direction. Currently samples uniformly.\n\n Args:\n bin_starts: Distance from origin to start of bin.\n bin_ends: Distance from origin to end of bin.\n\n Returns:\n Samples projected along ray.\n \"\"\"\n deltas = bin_ends - bin_starts\n if self.camera_indices is not None:\n camera_indices = self.camera_indices[..., None]\n else:\n camera_indices = None\n\n shaped_raybundle_fields = self[..., None]\n\n frustums = Frustums(\n origins=shaped_raybundle_fields.origins, # [..., 1, 3]\n directions=shaped_raybundle_fields.directions, # [..., 1, 3]\n starts=bin_starts, # [..., num_samples, 1]\n ends=bin_ends, # [..., num_samples, 1]\n pixel_area=shaped_raybundle_fields.pixel_area, # [..., 1, 1]\n )\n\n ray_samples = RaySamples(\n frustums=frustums,\n camera_indices=camera_indices, # [..., 1, 1]\n deltas=deltas, # [..., num_samples, 1]\n spacing_starts=spacing_starts, # [..., num_samples, 1]\n spacing_ends=spacing_ends, # [..., num_samples, 1]\n spacing_to_euclidean_fn=spacing_to_euclidean_fn,\n metadata=shaped_raybundle_fields.metadata,\n times=None if self.times is None else self.times[..., None], # [..., 1, 1]\n )\n\n return ray_samples" }, { "identifier": "InstantiateConfig", "path": "nerfstudio/configs/base_config.py", "snippet": "class InstantiateConfig(PrintableConfig): # pylint: disable=too-few-public-methods\n \"\"\"Config class for instantiating an the class specified in the _target attribute.\"\"\"\n\n _target: Type\n\n def setup(self, **kwargs) -> Any:\n \"\"\"Returns the instantiated object using the config.\"\"\"\n return self._target(self, **kwargs)" }, { "identifier": "ARKitScenesDataParserConfig", "path": "nerfstudio/data/dataparsers/arkitscenes_dataparser.py", "snippet": "class ARKitScenesDataParserConfig(DataParserConfig):\n \"\"\"ARKitScenes dataset config.\n ARKitScenes dataset (http://github.com/apple/ARKitScenes) is a large-scale 3D dataset of indoor scenes.\n This dataparser uses 3D deteciton subset of the ARKitScenes dataset.\n \"\"\"\n\n _target: Type = field(default_factory=lambda: ARKitScenes)\n \"\"\"target class to instantiate\"\"\"\n data: Path = Path(\"data/ARKitScenes/3dod/Validation/41069021\")\n \"\"\"Path to ARKitScenes folder with densely extracted scenes.\"\"\"\n scene_scale: float = 1.0\n \"\"\"How much to scale the region of interest by.\"\"\"\n center_poses: bool = True\n \"\"\"Whether to center the poses.\"\"\"\n scale_poses: bool = True\n \"\"\"Whether to automatically scale the poses to fit in +/- 1 bounding box.\"\"\"\n train_split_fraction: float = 0.9\n \"\"\"The fraction of images to use for training. The remaining images are for eval.\"\"\"\n depth_unit_scale_factor: float = 1e-3\n \"\"\"Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion.\"\"\"" }, { "identifier": "DataparserOutputs", "path": "nerfstudio/data/dataparsers/base_dataparser.py", "snippet": "class DataparserOutputs:\n \"\"\"Dataparser outputs for the which will be used by the DataManager\n for creating RayBundle and RayGT objects.\"\"\"\n\n image_filenames: List[Path]\n \"\"\"Filenames for the images.\"\"\"\n cameras: Cameras\n \"\"\"Camera object storing collection of camera information in dataset.\"\"\"\n alpha_color: Optional[TensorType[3]] = None\n \"\"\"Color of dataset background.\"\"\"\n scene_box: SceneBox = SceneBox()\n \"\"\"Scene box of dataset. Used to bound the scene or provide the scene scale depending on model.\"\"\"\n mask_filenames: Optional[List[Path]] = None\n \"\"\"Filenames for any masks that are required\"\"\"\n metadata: Dict[str, Any] = to_immutable_dict({})\n \"\"\"Dictionary of any metadata that be required for the given experiment.\n Will be processed by the InputDataset to create any additional tensors that may be required.\n \"\"\"\n dataparser_transform: TensorType[3, 4] = torch.eye(4)[:3, :]\n \"\"\"Transform applied by the dataparser.\"\"\"\n dataparser_scale: float = 1.0\n \"\"\"Scale applied by the dataparser.\"\"\"\n\n def as_dict(self) -> dict:\n \"\"\"Returns the dataclass as a dictionary.\"\"\"\n return vars(self)\n\n def save_dataparser_transform(self, path: Path):\n \"\"\"Save dataparser transform to json file. Some dataparsers will apply a transform to the poses,\n this method allows the transform to be saved so that it can be used in other applications.\n\n Args:\n path: path to save transform to\n \"\"\"\n data = {\n \"transform\": self.dataparser_transform.tolist(),\n \"scale\": float(self.dataparser_scale),\n }\n if not path.parent.exists():\n path.parent.mkdir(parents=True)\n with open(path, \"w\", encoding=\"UTF-8\") as file:\n json.dump(data, file, indent=4)" }, { "identifier": "BlenderDataParserConfig", "path": "nerfstudio/data/dataparsers/blender_dataparser.py", "snippet": "class BlenderDataParserConfig(DataParserConfig):\n \"\"\"Blender dataset parser config\"\"\"\n\n _target: Type = field(default_factory=lambda: Blender)\n \"\"\"target class to instantiate\"\"\"\n data: Path = Path(\"data/blender/lego\")\n \"\"\"Directory specifying location of data.\"\"\"\n scale_factor: float = 1.0\n \"\"\"How much to scale the camera origins by.\"\"\"\n alpha_color: str = \"white\"\n \"\"\"alpha color of background\"\"\"" }, { "identifier": "DNeRFDataParserConfig", "path": "nerfstudio/data/dataparsers/dnerf_dataparser.py", "snippet": "class DNeRFDataParserConfig(DataParserConfig):\n \"\"\"D-NeRF dataset parser config\"\"\"\n\n _target: Type = field(default_factory=lambda: DNeRF)\n \"\"\"target class to instantiate\"\"\"\n data: Path = Path(\"data/dnerf/lego\")\n \"\"\"Directory specifying location of data.\"\"\"\n scale_factor: float = 1.0\n \"\"\"How much to scale the camera origins by.\"\"\"\n alpha_color: str = \"white\"\n \"\"\"alpha color of background\"\"\"" }, { "identifier": "DycheckDataParserConfig", "path": "nerfstudio/data/dataparsers/dycheck_dataparser.py", "snippet": "class DycheckDataParserConfig(DataParserConfig):\n \"\"\"Dycheck (https://arxiv.org/abs/2210.13445) dataset parser config\"\"\"\n\n _target: Type = field(default_factory=lambda: Dycheck)\n \"\"\"target class to instantiate\"\"\"\n data: Path = Path(\"data/iphone/mochi-high-five\")\n \"\"\"Directory specifying location of data.\"\"\"\n scale_factor: float = 5.0\n \"\"\"How much to scale the camera origins by.\"\"\"\n alpha_color: str = \"white\"\n \"\"\"alpha color of background\"\"\"\n downscale_factor: int = 1\n \"\"\"How much to downscale images.\"\"\"\n scene_box_bound: float = 1.5\n \"\"\"Boundary of scene box.\"\"\"" }, { "identifier": "InstantNGPDataParserConfig", "path": "nerfstudio/data/dataparsers/instant_ngp_dataparser.py", "snippet": "class InstantNGPDataParserConfig(DataParserConfig):\n \"\"\"Instant-NGP dataset parser config\"\"\"\n\n _target: Type = field(default_factory=lambda: InstantNGP)\n \"\"\"target class to instantiate\"\"\"\n data: Path = Path(\"data/ours/posterv2\")\n \"\"\"Directory or explicit json file path specifying location of data.\"\"\"\n scene_scale: float = 0.3333\n \"\"\"How much to scale the scene.\"\"\"" }, { "identifier": "MinimalDataParserConfig", "path": "nerfstudio/data/dataparsers/minimal_dataparser.py", "snippet": "class MinimalDataParserConfig(DataParserConfig):\n \"\"\"Minimal dataset config\"\"\"\n\n _target: Type = field(default_factory=lambda: MinimalDataParser)\n \"\"\"target class to instantiate\"\"\"\n data: Path = Path(\"/home/nikhil/nerfstudio-main/tests/data/lego_test/minimal_parser\")" }, { "identifier": "NerfstudioDataParserConfig", "path": "nerfstudio/data/dataparsers/nerfstudio_dataparser.py", "snippet": "class NerfstudioDataParserConfig(DataParserConfig):\n \"\"\"Nerfstudio dataset config\"\"\"\n\n _target: Type = field(default_factory=lambda: Nerfstudio)\n \"\"\"target class to instantiate\"\"\"\n data: Path = Path(\"data/nerfstudio/poster\")\n \"\"\"Directory or explicit json file path specifying location of data.\"\"\"\n scale_factor: float = 1.0\n \"\"\"How much to scale the camera origins by.\"\"\"\n downscale_factor: Optional[int] = None\n \"\"\"How much to downscale images. If not set, images are chosen such that the max dimension is <1600px.\"\"\"\n scene_scale: float = 1.0\n \"\"\"How much to scale the region of interest by.\"\"\"\n orientation_method: Literal[\"pca\", \"up\", \"none\"] = \"up\"\n \"\"\"The method to use for orientation.\"\"\"\n center_poses: bool = True\n \"\"\"Whether to center the poses.\"\"\"\n auto_scale_poses: bool = True\n \"\"\"Whether to automatically scale the poses to fit in +/- 1 bounding box.\"\"\"\n train_split_fraction: float = 0.9\n \"\"\"The fraction of images to use for training. The remaining images are for eval.\"\"\"\n depth_unit_scale_factor: float = 1e-3\n \"\"\"Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion.\"\"\"\n\n \"\"\" feng \"\"\"\n train_val_json_split: bool = False\n \"\"\" /feng \"\"\"" }, { "identifier": "NuScenesDataParserConfig", "path": "nerfstudio/data/dataparsers/nuscenes_dataparser.py", "snippet": "class NuScenesDataParserConfig(DataParserConfig):\n \"\"\"NuScenes dataset config.\n NuScenes (https://www.nuscenes.org/nuscenes) is an autonomous driving dataset containing 1000 20s clips.\n Each clip was recorded with a suite of sensors including 6 surround cameras.\n It also includes 3D cuboid annotations around objects.\n We optionally use these cuboids to mask dynamic objects by specifying the mask_dir flag.\n To create these masks use scripts/datasets/process_nuscenes_masks.py.\n \"\"\"\n\n _target: Type = field(default_factory=lambda: NuScenes)\n \"\"\"target class to instantiate\"\"\"\n data: Path = Path(\"scene-0103\") # TODO: rename to scene but keep checkpoint saving name?\n \"\"\"Name of the scene.\"\"\"\n data_dir: Path = Path(\"/mnt/local/NuScenes\")\n \"\"\"Path to NuScenes dataset.\"\"\"\n version: Literal[\"v1.0-mini\", \"v1.0-trainval\"] = \"v1.0-mini\"\n \"\"\"Dataset version.\"\"\"\n cameras: Tuple[Literal[\"FRONT\", \"FRONT_LEFT\", \"FRONT_RIGHT\", \"BACK\", \"BACK_LEFT\", \"BACK_RIGHT\"], ...] = (\"FRONT\",)\n \"\"\"Which cameras to use.\"\"\"\n mask_dir: Optional[Path] = None\n \"\"\"Path to masks of dynamic objects.\"\"\"\n\n train_split_fraction: float = 0.9\n \"\"\"The percent of images to use for training. The remaining images are for eval.\"\"\"\n\n verbose: bool = False\n \"\"\"Load dataset with verbose messaging\"\"\"" }, { "identifier": "PhototourismDataParserConfig", "path": "nerfstudio/data/dataparsers/phototourism_dataparser.py", "snippet": "class PhototourismDataParserConfig(DataParserConfig):\n \"\"\"Phototourism dataset parser config\"\"\"\n\n _target: Type = field(default_factory=lambda: Phototourism)\n \"\"\"target class to instantiate\"\"\"\n data: Path = Path(\"data/phototourism/brandenburg-gate\")\n \"\"\"Directory specifying location of data.\"\"\"\n scale_factor: float = 3.0\n \"\"\"How much to scale the camera origins by.\"\"\"\n alpha_color: str = \"white\"\n \"\"\"alpha color of background\"\"\"\n train_split_fraction: float = 0.9\n \"\"\"The fraction of images to use for training. The remaining images are for eval.\"\"\"\n scene_scale: float = 1.0\n \"\"\"How much to scale the region of interest by.\"\"\"\n orientation_method: Literal[\"pca\", \"up\", \"none\"] = \"up\"\n \"\"\"The method to use for orientation.\"\"\"\n auto_scale_poses: bool = True\n \"\"\"Whether to automatically scale the poses to fit in +/- 1 bounding box.\"\"\"\n center_poses: bool = True\n \"\"\"Whether to center the poses.\"\"\"" }, { "identifier": "ScanNetDataParserConfig", "path": "nerfstudio/data/dataparsers/scannet_dataparser.py", "snippet": "class ScanNetDataParserConfig(DataParserConfig):\n \"\"\"ScanNet dataset config.\n ScanNet dataset (https://www.scan-net.org/) is a large-scale 3D dataset of indoor scenes.\n This dataparser assumes that the dense stream was extracted from .sens files.\n Expected structure of scene directory:\n - color/\n - depth/\n - intrinsic/\n - pose/\n \"\"\"\n\n _target: Type = field(default_factory=lambda: ScanNet)\n \"\"\"target class to instantiate\"\"\"\n data: Path = Path(\"data/scannet/scene0423_02\")\n \"\"\"Path to ScanNet folder with densely extracted scenes.\"\"\"\n scene_scale: float = 1.0\n \"\"\"How much to scale the region of interest by.\"\"\"\n center_poses: bool = True\n \"\"\"Whether to center the poses.\"\"\"\n scale_poses: bool = True\n \"\"\"Whether to automatically scale the poses to fit in +/- 1 bounding box.\"\"\"\n train_split_fraction: float = 0.9\n \"\"\"The fraction of images to use for training. The remaining images are for eval.\"\"\"\n depth_unit_scale_factor: float = 1e-3\n \"\"\"Scales the depth values to meters. Default value is 0.001 for a millimeter to meter conversion.\"\"\"" }, { "identifier": "SDFStudioDataParserConfig", "path": "nerfstudio/data/dataparsers/sdfstudio_dataparser.py", "snippet": "class SDFStudioDataParserConfig(DataParserConfig):\n \"\"\"Scene dataset parser config\"\"\"\n\n _target: Type = field(default_factory=lambda: SDFStudio)\n \"\"\"target class to instantiate\"\"\"\n data: Path = Path(\"data/DTU/scan65\")\n \"\"\"Directory specifying location of data.\"\"\"\n include_mono_prior: bool = False\n \"\"\"whether or not to load monocular depth and normal \"\"\"\n include_foreground_mask: bool = False\n \"\"\"whether or not to load foreground mask\"\"\"\n downscale_factor: int = 1\n scene_scale: float = 2.0\n \"\"\"\n Sets the bounding cube to have edge length of this size.\n The longest dimension of the Friends axis-aligned bbox will be scaled to this value.\n \"\"\"\n skip_every_for_val_split: int = 1\n \"\"\"sub sampling validation images\"\"\"\n auto_orient: bool = False" }, { "identifier": "Sitcoms3DDataParserConfig", "path": "nerfstudio/data/dataparsers/sitcoms3d_dataparser.py", "snippet": "class Sitcoms3DDataParserConfig(DataParserConfig):\n \"\"\"sitcoms3D dataset parser config\"\"\"\n\n _target: Type = field(default_factory=lambda: Sitcoms3D)\n \"\"\"target class to instantiate\"\"\"\n data: Path = Path(\"data/sitcoms3d/TBBT-big_living_room\")\n \"\"\"Directory specifying location of data.\"\"\"\n include_semantics: bool = True\n \"\"\"whether or not to include loading of semantics data\"\"\"\n downscale_factor: int = 4\n scene_scale: float = 2.0\n \"\"\"\n Sets the bounding cube to have edge length of this size.\n The longest dimension of the Sitcoms3D axis-aligned bbox will be scaled to this value.\n \"\"\"" }, { "identifier": "InputDataset", "path": "nerfstudio/data/datasets/base_dataset.py", "snippet": "class InputDataset(Dataset):\n \"\"\"Dataset that returns images.\n\n Args:\n dataparser_outputs: description of where and how to read input images.\n scale_factor: The scaling factor for the dataparser outputs\n \"\"\"\n\n def __init__(self, dataparser_outputs: DataparserOutputs, scale_factor: float = 1.0):\n super().__init__()\n self._dataparser_outputs = dataparser_outputs\n self.has_masks = dataparser_outputs.mask_filenames is not None\n self.scale_factor = scale_factor\n self.scene_box = deepcopy(dataparser_outputs.scene_box)\n self.metadata = deepcopy(dataparser_outputs.metadata)\n self.cameras = deepcopy(dataparser_outputs.cameras)\n self.cameras.rescale_output_resolution(scaling_factor=scale_factor)\n\n def __len__(self):\n return len(self._dataparser_outputs.image_filenames)\n\n def get_numpy_image(self, image_idx: int) -> npt.NDArray[np.uint8]:\n \"\"\"Returns the image of shape (H, W, 3 or 4).\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n image_filename = self._dataparser_outputs.image_filenames[image_idx]\n pil_image = Image.open(image_filename)\n if self.scale_factor != 1.0:\n width, height = pil_image.size\n newsize = (int(width * self.scale_factor), int(height * self.scale_factor))\n pil_image = pil_image.resize(newsize, resample=Image.BILINEAR)\n image = np.array(pil_image, dtype=\"uint8\") # shape is (h, w) or (h, w, 3 or 4)\n if len(image.shape) == 2:\n image = image[:, :, None].repeat(3, axis=2)\n assert len(image.shape) == 3\n assert image.dtype == np.uint8\n assert image.shape[2] in [3, 4], f\"Image shape of {image.shape} is in correct.\"\n return image\n\n def get_image(self, image_idx: int) -> TensorType[\"image_height\", \"image_width\", \"num_channels\"]:\n \"\"\"Returns a 3 channel image.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n image = torch.from_numpy(self.get_numpy_image(image_idx).astype(\"float32\") / 255.0)\n if self._dataparser_outputs.alpha_color is not None and image.shape[-1] == 4:\n assert image.shape[-1] == 4\n image = image[:, :, :3] * image[:, :, -1:] + self._dataparser_outputs.alpha_color * (1.0 - image[:, :, -1:])\n else:\n image = image[:, :, :3]\n return image\n\n def get_data(self, image_idx: int) -> Dict:\n \"\"\"Returns the ImageDataset data as a dictionary.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n image = self.get_image(image_idx)\n data = {\"image_idx\": image_idx}\n data[\"image\"] = image\n if self.has_masks:\n mask_filepath = self._dataparser_outputs.mask_filenames[image_idx]\n data[\"mask\"] = get_image_mask_tensor_from_path(filepath=mask_filepath, scale_factor=self.scale_factor)\n assert (\n data[\"mask\"].shape[:2] == data[\"image\"].shape[:2]\n ), f\"Mask and image have different shapes. Got {data['mask'].shape[:2]} and {data['image'].shape[:2]}\"\n metadata = self.get_metadata(data)\n data.update(metadata)\n return data\n\n # pylint: disable=no-self-use\n def get_metadata(self, data: Dict) -> Dict:\n \"\"\"Method that can be used to process any additional metadata that may be part of the model inputs.\n\n Args:\n image_idx: The image index in the dataset.\n \"\"\"\n del data\n return {}\n\n def __getitem__(self, image_idx: int) -> Dict:\n data = self.get_data(image_idx)\n return data\n\n @property\n def image_filenames(self) -> List[Path]:\n \"\"\"\n Returns image filenames for this dataset.\n The order of filenames is the same as in the Cameras object for easy mapping.\n \"\"\"\n\n return self._dataparser_outputs.image_filenames" }, { "identifier": "EquirectangularPixelSampler", "path": "nerfstudio/data/pixel_samplers.py", "snippet": "class EquirectangularPixelSampler(PixelSampler): # pylint: disable=too-few-public-methods\n \"\"\"Samples 'pixel_batch's from 'image_batch's. Assumes images are\n equirectangular and the sampling is done uniformly on the sphere.\n\n Args:\n num_rays_per_batch: number of rays to sample per batch\n keep_full_image: whether or not to include a reference to the full image in returned batch\n \"\"\"\n\n # overrides base method\n def sample_method( # pylint: disable=no-self-use\n self,\n batch_size: int,\n num_images: int,\n image_height: int,\n image_width: int,\n mask: Optional[TensorType] = None,\n device: Union[torch.device, str] = \"cpu\",\n ) -> TensorType[\"batch_size\", 3]:\n\n if isinstance(mask, torch.Tensor):\n # Note: if there is a mask, sampling reduces back to uniform sampling, which gives more\n # sampling weight to the poles of the image than the equators.\n # TODO(kevinddchen): implement the correct mask-sampling method.\n\n indices = super().sample_method(batch_size, num_images, image_height, image_width, mask=mask, device=device)\n else:\n\n # We sample theta uniformly in [0, 2*pi]\n # We sample phi in [0, pi] according to the PDF f(phi) = sin(phi) / 2.\n # This is done by inverse transform sampling.\n # http://corysimon.github.io/articles/uniformdistn-on-sphere/\n num_images_rand = torch.rand(batch_size, device=device)\n phi_rand = torch.acos(1 - 2 * torch.rand(batch_size, device=device)) / torch.pi\n theta_rand = torch.rand(batch_size, device=device)\n indices = torch.floor(\n torch.stack((num_images_rand, phi_rand, theta_rand), dim=-1)\n * torch.tensor([num_images, image_height, image_width], device=device)\n ).long()\n\n return indices" }, { "identifier": "PatchPixelSampler", "path": "nerfstudio/data/pixel_samplers.py", "snippet": "class PatchPixelSampler(PixelSampler): # pylint: disable=too-few-public-methods\n \"\"\"Samples 'pixel_batch's from 'image_batch's. Samples square patches\n from the images randomly. Useful for patch-based losses.\n\n Args:\n num_rays_per_batch: number of rays to sample per batch\n keep_full_image: whether or not to include a reference to the full image in returned batch\n patch_size: side length of patch. This must be consistent in the method\n config in order for samples to be reshaped into patches correctly.\n \"\"\"\n\n def __init__(self, num_rays_per_batch: int, keep_full_image: bool = False, **kwargs) -> None:\n self.patch_size = kwargs[\"patch_size\"]\n num_rays = (num_rays_per_batch // (self.patch_size**2)) * (self.patch_size**2)\n super().__init__(num_rays, keep_full_image, **kwargs)\n\n # overrides base method\n def sample_method( # pylint: disable=no-self-use\n self,\n batch_size: int,\n num_images: int,\n image_height: int,\n image_width: int,\n mask: Optional[TensorType] = None,\n device: Union[torch.device, str] = \"cpu\",\n ) -> TensorType[\"batch_size\", 3]:\n\n if mask:\n # Note: if there is a mask, sampling reduces back to uniform sampling\n indices = super().sample_method(batch_size, num_images, image_height, image_width, mask=mask, device=device)\n else:\n sub_bs = batch_size // (self.patch_size**2)\n indices = torch.rand((sub_bs, 3), device=device) * torch.tensor(\n [num_images, image_height - self.patch_size, image_width - self.patch_size],\n device=device,\n )\n\n indices = indices.view(sub_bs, 1, 1, 3).broadcast_to(sub_bs, self.patch_size, self.patch_size, 3).clone()\n\n yys, xxs = torch.meshgrid(\n torch.arange(self.patch_size, device=device), torch.arange(self.patch_size, device=device)\n )\n indices[:, ..., 1] += yys\n indices[:, ..., 2] += xxs\n\n indices = torch.floor(indices).long()\n indices = indices.flatten(0, 2)\n\n return indices" }, { "identifier": "PixelSampler", "path": "nerfstudio/data/pixel_samplers.py", "snippet": "class PixelSampler: # pylint: disable=too-few-public-methods\n \"\"\"Samples 'pixel_batch's from 'image_batch's.\n\n Args:\n num_rays_per_batch: number of rays to sample per batch\n keep_full_image: whether or not to include a reference to the full image in returned batch\n \"\"\"\n\n def __init__(self, num_rays_per_batch: int, keep_full_image: bool = False, **kwargs) -> None:\n self.kwargs = kwargs\n self.num_rays_per_batch = num_rays_per_batch\n self.keep_full_image = keep_full_image\n\n def set_num_rays_per_batch(self, num_rays_per_batch: int):\n \"\"\"Set the number of rays to sample per batch.\n\n Args:\n num_rays_per_batch: number of rays to sample per batch\n \"\"\"\n self.num_rays_per_batch = num_rays_per_batch\n\n def sample_method( # pylint: disable=no-self-use\n self,\n batch_size: int,\n num_images: int,\n image_height: int,\n image_width: int,\n mask: Optional[TensorType] = None,\n device: Union[torch.device, str] = \"cpu\",\n ) -> TensorType[\"batch_size\", 3]:\n \"\"\"\n Naive pixel sampler, uniformly samples across all possible pixels of all possible images.\n\n Args:\n batch_size: number of samples in a batch\n num_images: number of images to sample over\n mask: mask of possible pixels in an image to sample from.\n \"\"\"\n if isinstance(mask, torch.Tensor):\n nonzero_indices = torch.nonzero(mask[..., 0], as_tuple=False)\n chosen_indices = random.sample(range(len(nonzero_indices)), k=batch_size)\n indices = nonzero_indices[chosen_indices]\n else:\n indices = torch.floor(\n torch.rand((batch_size, 3), device=device)\n * torch.tensor([num_images, image_height, image_width], device=device)\n ).long()\n\n return indices\n\n def collate_image_dataset_batch(self, batch: Dict, num_rays_per_batch: int, keep_full_image: bool = False):\n \"\"\"\n Operates on a batch of images and samples pixels to use for generating rays.\n Returns a collated batch which is input to the Graph.\n It will sample only within the valid 'mask' if it's specified.\n\n Args:\n batch: batch of images to sample from\n num_rays_per_batch: number of rays to sample per batch\n keep_full_image: whether or not to include a reference to the full image in returned batch\n \"\"\"\n\n device = batch[\"image\"].device\n num_images, image_height, image_width, _ = batch[\"image\"].shape\n\n if \"mask\" in batch:\n indices = self.sample_method(\n num_rays_per_batch, num_images, image_height, image_width, mask=batch[\"mask\"], device=device\n )\n else:\n indices = self.sample_method(num_rays_per_batch, num_images, image_height, image_width, device=device)\n\n c, y, x = (i.flatten() for i in torch.split(indices, 1, dim=-1))\n collated_batch = {\n key: value[c, y, x] for key, value in batch.items() if key != \"image_idx\" and value is not None\n }\n\n assert collated_batch[\"image\"].shape == (num_rays_per_batch, 3), collated_batch[\"image\"].shape\n\n # Needed to correct the random indices to their actual camera idx locations.\n indices[:, 0] = batch[\"image_idx\"][c]\n collated_batch[\"indices\"] = indices # with the abs camera indices\n\n if keep_full_image:\n collated_batch[\"full_image\"] = batch[\"image\"]\n\n return collated_batch\n\n def collate_image_dataset_batch_list(self, batch: Dict, num_rays_per_batch: int, keep_full_image: bool = False):\n \"\"\"\n Does the same as collate_image_dataset_batch, except it will operate over a list of images / masks inside\n a list.\n\n We will use this with the intent of DEPRECIATING it as soon as we find a viable alternative.\n The intention will be to replace this with a more efficient implementation that doesn't require a for loop, but\n since pytorch's ragged tensors are still in beta (this would allow for some vectorization), this will do.\n\n Args:\n batch: batch of images to sample from\n num_rays_per_batch: number of rays to sample per batch\n keep_full_image: whether or not to include a reference to the full image in returned batch\n \"\"\"\n\n device = batch[\"image\"][0].device\n num_images = len(batch[\"image\"])\n\n # only sample within the mask, if the mask is in the batch\n all_indices = []\n all_images = []\n\n if \"mask\" in batch:\n num_rays_in_batch = num_rays_per_batch // num_images\n for i in range(num_images):\n\n image_height, image_width, _ = batch[\"image\"][i].shape\n\n if i == num_images - 1:\n num_rays_in_batch = num_rays_per_batch - (num_images - 1) * num_rays_in_batch\n\n indices = self.sample_method(\n num_rays_in_batch, 1, image_height, image_width, mask=batch[\"mask\"][i], device=device\n )\n indices[:, 0] = i\n all_indices.append(indices)\n all_images.append(batch[\"image\"][i][indices[:, 1], indices[:, 2]])\n\n else:\n num_rays_in_batch = num_rays_per_batch // num_images\n for i in range(num_images):\n image_height, image_width, _ = batch[\"image\"][i].shape\n if i == num_images - 1:\n num_rays_in_batch = num_rays_per_batch - (num_images - 1) * num_rays_in_batch\n indices = self.sample_method(num_rays_in_batch, 1, image_height, image_width, device=device)\n indices[:, 0] = i\n all_indices.append(indices)\n all_images.append(batch[\"image\"][i][indices[:, 1], indices[:, 2]])\n\n indices = torch.cat(all_indices, dim=0)\n\n c, y, x = (i.flatten() for i in torch.split(indices, 1, dim=-1))\n collated_batch = {\n key: value[c, y, x]\n for key, value in batch.items()\n if key != \"image_idx\" and key != \"image\" and key != \"mask\" and value is not None\n }\n\n collated_batch[\"image\"] = torch.cat(all_images, dim=0)\n\n assert collated_batch[\"image\"].shape == (num_rays_per_batch, 3), collated_batch[\"image\"].shape\n\n # Needed to correct the random indices to their actual camera idx locations.\n indices[:, 0] = batch[\"image_idx\"][c]\n collated_batch[\"indices\"] = indices # with the abs camera indices\n\n if keep_full_image:\n collated_batch[\"full_image\"] = batch[\"image\"]\n\n return collated_batch\n\n def sample(self, image_batch: Dict):\n \"\"\"Sample an image batch and return a pixel batch.\n\n Args:\n image_batch: batch of images to sample from\n \"\"\"\n if isinstance(image_batch[\"image\"], list):\n image_batch = dict(image_batch.items()) # copy the dictionary so we don't modify the original\n pixel_batch = self.collate_image_dataset_batch_list(\n image_batch, self.num_rays_per_batch, keep_full_image=self.keep_full_image\n )\n elif isinstance(image_batch[\"image\"], torch.Tensor):\n pixel_batch = self.collate_image_dataset_batch(\n image_batch, self.num_rays_per_batch, keep_full_image=self.keep_full_image\n )\n else:\n raise ValueError(\"image_batch['image'] must be a list or torch.Tensor\")\n return pixel_batch" }, { "identifier": "CacheDataloader", "path": "nerfstudio/data/utils/dataloaders.py", "snippet": "class CacheDataloader(DataLoader):\n \"\"\"Collated image dataset that implements caching of default-pytorch-collatable data.\n Creates batches of the InputDataset return type.\n\n Args:\n dataset: Dataset to sample from.\n num_samples_to_collate: How many images to sample rays for each batch. -1 for all images.\n num_times_to_repeat_images: How often to collate new images. -1 to never pick new images.\n device: Device to perform computation.\n collate_fn: The function we will use to collate our training data\n \"\"\"\n\n def __init__(\n self,\n dataset: Dataset,\n num_images_to_sample_from: int = -1,\n num_times_to_repeat_images: int = -1,\n device: Union[torch.device, str] = \"cpu\",\n collate_fn=nerfstudio_collate,\n **kwargs,\n ):\n self.dataset = dataset\n super().__init__(dataset=dataset, **kwargs) # This will set self.dataset\n self.num_times_to_repeat_images = num_times_to_repeat_images\n self.cache_all_images = (num_images_to_sample_from == -1) or (num_images_to_sample_from >= len(self.dataset))\n self.num_images_to_sample_from = len(self.dataset) if self.cache_all_images else num_images_to_sample_from\n self.device = device\n self.collate_fn = collate_fn\n self.num_workers = kwargs.get(\"num_workers\", 0)\n\n self.num_repeated = self.num_times_to_repeat_images # starting value\n self.first_time = True\n\n self.cached_collated_batch = None\n if self.cache_all_images:\n CONSOLE.print(f\"Caching all {len(self.dataset)} images.\")\n if len(self.dataset) > 500:\n CONSOLE.print(\n \"[bold yellow]Warning: If you run out of memory, try reducing the number of images to sample from.\"\n )\n self.cached_collated_batch = self._get_collated_batch()\n elif self.num_times_to_repeat_images == -1:\n CONSOLE.print(\n f\"Caching {self.num_images_to_sample_from} out of {len(self.dataset)} images, without resampling.\"\n )\n else:\n CONSOLE.print(\n f\"Caching {self.num_images_to_sample_from} out of {len(self.dataset)} images, \"\n f\"resampling every {self.num_times_to_repeat_images} iters.\"\n )\n\n def __getitem__(self, idx):\n return self.dataset.__getitem__(idx)\n\n def _get_batch_list(self):\n \"\"\"Returns a list of batches from the dataset attribute.\"\"\"\n\n indices = random.sample(range(len(self.dataset)), k=self.num_images_to_sample_from)\n batch_list = []\n results = []\n\n num_threads = int(self.num_workers) * 4\n num_threads = min(num_threads, multiprocessing.cpu_count() - 1)\n num_threads = max(num_threads, 1)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=num_threads) as executor:\n for idx in indices:\n res = executor.submit(self.dataset.__getitem__, idx)\n results.append(res)\n\n for res in track(results, description=\"Loading data batch\", transient=True):\n batch_list.append(res.result())\n\n return batch_list\n\n def _get_collated_batch(self):\n \"\"\"Returns a collated batch.\"\"\"\n batch_list = self._get_batch_list()\n collated_batch = self.collate_fn(batch_list)\n collated_batch = get_dict_to_torch(collated_batch, device=self.device, exclude=[\"image\"])\n return collated_batch\n\n def __iter__(self):\n while True:\n if self.cache_all_images:\n collated_batch = self.cached_collated_batch\n elif self.first_time or (\n self.num_times_to_repeat_images != -1 and self.num_repeated >= self.num_times_to_repeat_images\n ):\n # trigger a reset\n self.num_repeated = 0\n collated_batch = self._get_collated_batch()\n # possibly save a cached item\n self.cached_collated_batch = collated_batch if self.num_times_to_repeat_images != 0 else None\n self.first_time = False\n else:\n collated_batch = self.cached_collated_batch\n self.num_repeated += 1\n yield collated_batch" }, { "identifier": "FixedIndicesEvalDataloader", "path": "nerfstudio/data/utils/dataloaders.py", "snippet": "class FixedIndicesEvalDataloader(EvalDataloader):\n \"\"\"Dataloader that returns a fixed set of indices.\n\n Args:\n input_dataset: InputDataset to load data from\n image_indices: List of image indices to load data from. If None, then use all images.\n device: Device to load data to\n \"\"\"\n\n def __init__(\n self,\n input_dataset: InputDataset,\n image_indices: Optional[Tuple[int]] = None,\n device: Union[torch.device, str] = \"cpu\",\n **kwargs,\n ):\n super().__init__(input_dataset, device, **kwargs)\n if image_indices is None:\n self.image_indices = list(range(len(input_dataset)))\n else:\n self.image_indices = image_indices\n self.count = 0\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count < len(self.image_indices):\n image_idx = self.image_indices[self.count]\n ray_bundle, batch = self.get_data_from_image_idx(image_idx)\n self.count += 1\n return ray_bundle, batch\n raise StopIteration" }, { "identifier": "RandIndicesEvalDataloader", "path": "nerfstudio/data/utils/dataloaders.py", "snippet": "class RandIndicesEvalDataloader(EvalDataloader):\n \"\"\"Dataloader that returns random images.\n\n Args:\n input_dataset: InputDataset to load data from\n device: Device to load data to\n \"\"\"\n\n def __init__(\n self,\n input_dataset: InputDataset,\n device: Union[torch.device, str] = \"cpu\",\n **kwargs,\n ):\n super().__init__(input_dataset, device, **kwargs)\n self.count = 0\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count < 1:\n image_indices = range(self.cameras.size)\n image_idx = random.choice(image_indices)\n ray_bundle, batch = self.get_data_from_image_idx(image_idx)\n self.count += 1\n return ray_bundle, batch\n raise StopIteration" }, { "identifier": "nerfstudio_collate", "path": "nerfstudio/data/utils/nerfstudio_collate.py", "snippet": "def nerfstudio_collate(\n batch, extra_mappings: Union[Dict[type, Callable], None] = None\n): # pylint: disable=too-many-return-statements\n r\"\"\"\n This is the default pytorch collate function, but with support for nerfstudio types. All documentation\n below is copied straight over from pytorch's default_collate function, python version 3.8.13,\n pytorch version '1.12.1+cu113'. Custom nerfstudio types are accounted for at the end, and extra\n mappings can be passed in to handle custom types. These mappings are from types: callable (types\n being like int or float or the return value of type(3.), etc). The only code before we parse for custom types that\n was changed from default pytorch was the addition of the extra_mappings argument, a find and replace operation\n from default_collate to nerfstudio_collate, and the addition of the nerfstudio_collate_err_msg_format variable.\n\n\n Function that takes in a batch of data and puts the elements within the batch\n into a tensor with an additional outer dimension - batch size. The exact output type can be\n a :class:`torch.Tensor`, a `Sequence` of :class:`torch.Tensor`, a\n Collection of :class:`torch.Tensor`, or left unchanged, depending on the input type.\n This is used as the default function for collation when\n `batch_size` or `batch_sampler` is defined in :class:`~torch.utils.data.DataLoader`.\n\n Here is the general input type (based on the type of the element within the batch) to output type mapping:\n\n * :class:`torch.Tensor` -> :class:`torch.Tensor` (with an added outer dimension batch size)\n * NumPy Arrays -> :class:`torch.Tensor`\n * `float` -> :class:`torch.Tensor`\n * `int` -> :class:`torch.Tensor`\n * `str` -> `str` (unchanged)\n * `bytes` -> `bytes` (unchanged)\n * `Mapping[K, V_i]` -> `Mapping[K, nerfstudio_collate([V_1, V_2, ...])]`\n * `NamedTuple[V1_i, V2_i, ...]` -> `NamedTuple[nerfstudio_collate([V1_1, V1_2, ...]),\n nerfstudio_collate([V2_1, V2_2, ...]), ...]`\n * `Sequence[V1_i, V2_i, ...]` -> `Sequence[nerfstudio_collate([V1_1, V1_2, ...]),\n nerfstudio_collate([V2_1, V2_2, ...]), ...]`\n\n Args:\n batch: a single batch to be collated\n\n Examples:\n >>> # Example with a batch of `int`s:\n >>> nerfstudio_collate([0, 1, 2, 3])\n tensor([0, 1, 2, 3])\n >>> # Example with a batch of `str`s:\n >>> nerfstudio_collate(['a', 'b', 'c'])\n ['a', 'b', 'c']\n >>> # Example with `Map` inside the batch:\n >>> nerfstudio_collate([{'A': 0, 'B': 1}, {'A': 100, 'B': 100}])\n {'A': tensor([ 0, 100]), 'B': tensor([ 1, 100])}\n >>> # Example with `NamedTuple` inside the batch:\n >>> Point = namedtuple('Point', ['x', 'y'])\n >>> nerfstudio_collate([Point(0, 0), Point(1, 1)])\n Point(x=tensor([0, 1]), y=tensor([0, 1]))\n >>> # Example with `Tuple` inside the batch:\n >>> nerfstudio_collate([(0, 1), (2, 3)])\n [tensor([0, 2]), tensor([1, 3])]\n >>> # Example with `List` inside the batch:\n >>> nerfstudio_collate([[0, 1], [2, 3]])\n [tensor([0, 2]), tensor([1, 3])]\n \"\"\"\n if extra_mappings is None:\n extra_mappings = {}\n elem = batch[0]\n elem_type = type(elem)\n if isinstance(elem, torch.Tensor): # pylint: disable=no-else-return\n out = None\n if torch.utils.data.get_worker_info() is not None:\n # If we're in a background process, concatenate directly into a\n # shared memory tensor to avoid an extra copy\n numel = sum(x.numel() for x in batch)\n storage = elem.storage()._new_shared(numel, device=elem.device) # pylint: disable=protected-access\n out = elem.new(storage).resize_(len(batch), *list(elem.size()))\n return torch.stack(batch, 0, out=out)\n elif elem_type.__module__ == \"numpy\" and elem_type.__name__ != \"str_\" and elem_type.__name__ != \"string_\":\n # pylint: disable=no-else-return, consider-using-in\n if elem_type.__name__ == \"ndarray\" or elem_type.__name__ == \"memmap\":\n # array of string classes and object\n if np_str_obj_array_pattern.search(elem.dtype.str) is not None:\n raise TypeError(NERFSTUDIO_COLLATE_ERR_MSG_FORMAT.format(elem.dtype))\n\n return nerfstudio_collate([torch.as_tensor(b) for b in batch], extra_mappings=extra_mappings)\n elif elem.shape == (): # scalars\n return torch.as_tensor(batch)\n elif isinstance(elem, float):\n return torch.tensor(batch, dtype=torch.float64)\n elif isinstance(elem, int):\n return torch.tensor(batch)\n elif isinstance(elem, string_classes):\n return batch\n elif isinstance(elem, collections.abc.Mapping):\n try:\n return elem_type(\n {key: nerfstudio_collate([d[key] for d in batch], extra_mappings=extra_mappings) for key in elem}\n )\n except TypeError:\n # The mapping type may not support `__init__(iterable)`.\n return {key: nerfstudio_collate([d[key] for d in batch], extra_mappings=extra_mappings) for key in elem}\n elif isinstance(elem, tuple) and hasattr(elem, \"_fields\"): # namedtuple\n return elem_type(*(nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in zip(*batch)))\n elif isinstance(elem, collections.abc.Sequence):\n # check to make sure that the elements in batch have consistent size\n it = iter(batch)\n elem_size = len(next(it))\n if not all(len(elem) == elem_size for elem in it):\n raise RuntimeError(\"each element in list of batch should be of equal size\")\n transposed = list(zip(*batch)) # It may be accessed twice, so we use a list.\n\n if isinstance(elem, tuple):\n return [\n nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed\n ] # Backwards compatibility.\n else:\n try:\n return elem_type([nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed])\n except TypeError:\n # The sequence type may not support `__init__(iterable)` (e.g., `range`).\n return [nerfstudio_collate(samples, extra_mappings=extra_mappings) for samples in transposed]\n\n # NerfStudio types supported below\n\n elif isinstance(elem, Cameras):\n # If a camera, just concatenate along the batch dimension. In the future, this may change to stacking\n assert all((isinstance(cam, Cameras) for cam in batch))\n assert all((cam.distortion_params is None for cam in batch)) or all(\n (cam.distortion_params is not None for cam in batch)\n ), \"All cameras must have distortion parameters or none of them should have distortion parameters.\\\n Generalized batching will be supported in the future.\"\n\n # If no batch dimension exists, then we need to stack everything and create a batch dimension on 0th dim\n if elem.shape == ():\n op = torch.stack\n # If batch dimension exists, then we need to concatenate along the 0th dimension\n else:\n op = torch.cat\n\n return Cameras(\n op([cameras.camera_to_worlds for cameras in batch], dim=0),\n op([cameras.fx for cameras in batch], dim=0),\n op([cameras.fy for cameras in batch], dim=0),\n op([cameras.cx for cameras in batch], dim=0),\n op([cameras.cy for cameras in batch], dim=0),\n height=op([cameras.height for cameras in batch], dim=0),\n width=op([cameras.width for cameras in batch], dim=0),\n distortion_params=op(\n [\n cameras.distortion_params\n if cameras.distortion_params is not None\n else torch.zeros_like(cameras.distortion_params)\n for cameras in batch\n ],\n dim=0,\n ),\n camera_type=op([cameras.camera_type for cameras in batch], dim=0),\n times=torch.stack(\n [cameras.times if cameras.times is not None else -torch.ones_like(cameras.times) for cameras in batch],\n dim=0,\n ),\n )\n\n for type_key in extra_mappings:\n if isinstance(elem, type_key):\n return extra_mappings[type_key](batch)\n\n raise TypeError(NERFSTUDIO_COLLATE_ERR_MSG_FORMAT.format(elem_type))" }, { "identifier": "TrainingCallback", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallback:\n \"\"\"Callback class used during training.\n The function 'func' with 'args' and 'kwargs' will be called every 'update_every_num_iters' training iterations,\n including at iteration 0. The function is called after the training iteration.\n\n Args:\n where_to_run: List of locations for when to run callback (before/after iteration)\n func: The function that will be called.\n update_every_num_iters: How often to call the function `func`.\n iters: Tuple of iteration steps to perform callback\n args: args for the function 'func'.\n kwargs: kwargs for the function 'func'.\n \"\"\"\n\n def __init__(\n self,\n where_to_run: List[TrainingCallbackLocation],\n func: Callable,\n update_every_num_iters: Optional[int] = None,\n iters: Optional[Tuple[int, ...]] = None,\n args: Optional[List] = None,\n kwargs: Optional[Dict] = None,\n ):\n assert (\n \"step\" in signature(func).parameters.keys()\n ), f\"'step: int' must be an argument in the callback function 'func': {func.__name__}\"\n self.where_to_run = where_to_run\n self.update_every_num_iters = update_every_num_iters\n self.iters = iters\n self.func = func\n self.args = args if args is not None else []\n self.kwargs = kwargs if kwargs is not None else {}\n\n def run_callback(self, step: int) -> None:\n \"\"\"Callback to run after training step\n\n Args:\n step: current iteration step\n \"\"\"\n if self.update_every_num_iters is not None:\n if step % self.update_every_num_iters == 0:\n self.func(*self.args, **self.kwargs, step=step)\n elif self.iters is not None:\n if step in self.iters:\n self.func(*self.args, **self.kwargs, step=step)\n\n def run_callback_at_location(self, step: int, location: TrainingCallbackLocation) -> None:\n \"\"\"Runs the callback if it's supposed to be run at the given location.\n\n Args:\n step: current iteration step\n location: when to run callback (before/after iteration)\n \"\"\"\n if location in self.where_to_run:\n self.run_callback(step=step)" }, { "identifier": "TrainingCallbackAttributes", "path": "nerfstudio/engine/callbacks.py", "snippet": "class TrainingCallbackAttributes:\n \"\"\"Attributes that can be used to configure training callbacks.\n The callbacks can be specified in the Dataloader or Model implementations.\n Instead of providing access to the entire Trainer object, we only provide these attributes.\n This should be least prone to errors and fairly clean from a user perspective.\"\"\"\n\n # TODO(ethan): type this without circular imports\n optimizers: Optional[InitVar]\n \"\"\"optimizers for training\"\"\"\n grad_scaler: Optional[InitVar]\n \"\"\"gradient scalers\"\"\"\n pipeline: Optional[InitVar]\n \"\"\"reference to training pipeline\"\"\"" }, { "identifier": "RayGenerator", "path": "nerfstudio/model_components/ray_generators.py", "snippet": "class RayGenerator(nn.Module):\n \"\"\"torch.nn Module for generating rays.\n This class is the interface between the scene's cameras/camera optimizer and the ray sampler.\n\n Args:\n cameras: Camera objects containing camera info.\n pose_optimizer: pose optimization module, for optimizing noisy camera intrinsics/extrinsics.\n \"\"\"\n\n def __init__(self, cameras: Cameras, pose_optimizer: CameraOptimizer) -> None:\n super().__init__()\n self.cameras = cameras\n self.pose_optimizer = pose_optimizer\n self.register_buffer(\"image_coords\", cameras.get_image_coords(), persistent=False)\n\n def forward(self, ray_indices: TensorType[\"num_rays\", 3]) -> RayBundle:\n \"\"\"Index into the cameras to generate the rays.\n\n Args:\n ray_indices: Contains camera, row, and col indices for target rays.\n \"\"\"\n c = ray_indices[:, 0] # camera indices\n y = ray_indices[:, 1] # row indices\n x = ray_indices[:, 2] # col indices\n coords = self.image_coords[y, x]\n\n camera_opt_to_camera = self.pose_optimizer(c)\n\n ray_bundle = self.cameras.generate_rays(\n camera_indices=c.unsqueeze(-1),\n coords=coords,\n camera_opt_to_camera=camera_opt_to_camera,\n )\n return ray_bundle" }, { "identifier": "IterableWrapper", "path": "nerfstudio/utils/misc.py", "snippet": "class IterableWrapper: # pylint: disable=too-few-public-methods\n \"\"\"A helper that will allow an instance of a class to return multiple kinds of iterables bound\n to different functions of that class.\n\n To use this, take an instance of a class. From that class, pass in the <instance>.<new_iter_function>\n and <instance>.<new_next_function> to the IterableWrapper constructor. By passing in the instance's\n functions instead of just the class's functions, the self argument should automatically be accounted\n for.\n\n Args:\n new_iter: function that will be called instead as the __iter__() function\n new_next: function that will be called instead as the __next__() function\n length: length of the iterable. If -1, the iterable will be infinite.\n\n\n Attributes:\n new_iter: object's pointer to the function we are calling for __iter__()\n new_next: object's pointer to the function we are calling for __next__()\n length: length of the iterable. If -1, the iterable will be infinite.\n i: current index of the iterable.\n\n \"\"\"\n\n i: int\n\n def __init__(self, new_iter: Callable, new_next: Callable, length: int = -1):\n self.new_iter = new_iter\n self.new_next = new_next\n self.length = length\n\n def __next__(self):\n if self.length != -1 and self.i >= self.length:\n raise StopIteration\n self.i += 1\n return self.new_next()\n\n def __iter__(self):\n self.new_iter()\n self.i = 0\n return self" } ]
from abc import abstractmethod from dataclasses import dataclass, field from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Type, Union from rich.progress import Console from torch import nn from torch.nn import Parameter from torch.utils.data import Dataset from torch.utils.data.distributed import DistributedSampler from typing_extensions import Literal from nerfstudio.cameras.camera_optimizers import CameraOptimizerConfig from nerfstudio.cameras.cameras import CameraType from nerfstudio.cameras.rays import RayBundle from nerfstudio.configs.base_config import InstantiateConfig from nerfstudio.data.dataparsers.arkitscenes_dataparser import ( ARKitScenesDataParserConfig, ) from nerfstudio.data.dataparsers.base_dataparser import DataparserOutputs from nerfstudio.data.dataparsers.blender_dataparser import BlenderDataParserConfig from nerfstudio.data.dataparsers.dnerf_dataparser import DNeRFDataParserConfig from nerfstudio.data.dataparsers.dycheck_dataparser import DycheckDataParserConfig from nerfstudio.data.dataparsers.instant_ngp_dataparser import ( InstantNGPDataParserConfig, ) from nerfstudio.data.dataparsers.minimal_dataparser import MinimalDataParserConfig from nerfstudio.data.dataparsers.nerfstudio_dataparser import NerfstudioDataParserConfig from nerfstudio.data.dataparsers.nuscenes_dataparser import NuScenesDataParserConfig from nerfstudio.data.dataparsers.phototourism_dataparser import ( PhototourismDataParserConfig, ) from nerfstudio.data.dataparsers.scannet_dataparser import ScanNetDataParserConfig from nerfstudio.data.dataparsers.sdfstudio_dataparser import SDFStudioDataParserConfig from nerfstudio.data.dataparsers.sitcoms3d_dataparser import Sitcoms3DDataParserConfig from nerfstudio.data.datasets.base_dataset import InputDataset from nerfstudio.data.pixel_samplers import ( EquirectangularPixelSampler, PatchPixelSampler, PixelSampler, ) from nerfstudio.data.utils.dataloaders import ( CacheDataloader, FixedIndicesEvalDataloader, RandIndicesEvalDataloader, ) from nerfstudio.data.utils.nerfstudio_collate import nerfstudio_collate from nerfstudio.engine.callbacks import TrainingCallback, TrainingCallbackAttributes from nerfstudio.model_components.ray_generators import RayGenerator from nerfstudio.utils.misc import IterableWrapper import torch import tyro
17,392
def setup_eval(self): """Sets up the data manager for evaluation""" @abstractmethod def next_train(self, step: int) -> Tuple[RayBundle, Dict]: """Returns the next batch of data from the train data manager. Args: step: the step number of the eval image to retrieve Returns: A tuple of the ray bundle for the image, and a dictionary of additional batch information such as the groudtruth image. """ raise NotImplementedError @abstractmethod def next_eval(self, step: int) -> Tuple[RayBundle, Dict]: """Returns the next batch of data from the eval data manager. Args: step: the step number of the eval image to retrieve Returns: A tuple of the ray bundle for the image, and a dictionary of additional batch information such as the groudtruth image. """ raise NotImplementedError @abstractmethod def next_eval_image(self, step: int) -> Tuple[int, RayBundle, Dict]: """Retreive the next eval image. Args: step: the step number of the eval image to retrieve Returns: A tuple of the step number, the ray bundle for the image, and a dictionary of additional batch information such as the groudtruth image. """ raise NotImplementedError @abstractmethod def get_train_rays_per_batch(self) -> int: """Returns the number of rays per batch for training.""" raise NotImplementedError @abstractmethod def get_eval_rays_per_batch(self) -> int: """Returns the number of rays per batch for evaluation.""" raise NotImplementedError def get_datapath(self) -> Optional[Path]: # pylint:disable=no-self-use """Returns the path to the data. This is used to determine where to save camera paths.""" return None def get_training_callbacks( # pylint:disable=no-self-use self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument ) -> List[TrainingCallback]: """Returns a list of callbacks to be used during training.""" return [] @abstractmethod def get_param_groups(self) -> Dict[str, List[Parameter]]: # pylint: disable=no-self-use """Get the param groups for the data manager. Returns: A list of dictionaries containing the data manager's param groups. """ return {} @dataclass class VanillaDataManagerConfig(DataManagerConfig): """A basic data manager""" _target: Type = field(default_factory=lambda: VanillaDataManager) """Target class to instantiate.""" dataparser: AnnotatedDataParserUnion = BlenderDataParserConfig() """Specifies the dataparser used to unpack the data.""" train_num_rays_per_batch: int = 1024 """Number of rays per batch to use per training iteration.""" train_num_images_to_sample_from: int = -1 """Number of images to sample during training iteration.""" train_num_times_to_repeat_images: int = -1 """When not training on all images, number of iterations before picking new images. If -1, never pick new images.""" eval_num_rays_per_batch: int = 1024 """Number of rays per batch to use per eval iteration.""" eval_num_images_to_sample_from: int = -1 """Number of images to sample during eval iteration.""" eval_num_times_to_repeat_images: int = -1 """When not evaluating on all images, number of iterations before picking new images. If -1, never pick new images.""" eval_image_indices: Optional[Tuple[int, ...]] = (0,) """Specifies the image indices to use during eval; if None, uses all.""" camera_optimizer: CameraOptimizerConfig = CameraOptimizerConfig() """Specifies the camera pose optimizer used during training. Helpful if poses are noisy, such as for data from Record3D.""" collate_fn = staticmethod(nerfstudio_collate) """Specifies the collate function to use for the train and eval dataloaders.""" camera_res_scale_factor: float = 1.0 """The scale factor for scaling spatial data such as images, mask, semantics along with relevant information about camera intrinsics """ patch_size: int = 1 """Size of patch to sample from. If >1, patch-based sampling will be used.""" class VanillaDataManager(DataManager): # pylint: disable=abstract-method """Basic stored data manager implementation. This is pretty much a port over from our old dataloading utilities, and is a little jank under the hood. We may clean this up a little bit under the hood with more standard dataloading components that can be strung together, but it can be just used as a black box for now since only the constructor is likely to change in the future, or maybe passing in step number to the next_train and next_eval functions. Args: config: the DataManagerConfig used to instantiate class """ config: VanillaDataManagerConfig
# Copyright 2022 The Nerfstudio Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Datamanager. """ from __future__ import annotations CONSOLE = Console(width=120) AnnotatedDataParserUnion = tyro.conf.OmitSubcommandPrefixes[ # Omit prefixes of flags in subcommands. tyro.extras.subcommand_type_from_defaults( { "nerfstudio-data": NerfstudioDataParserConfig(), "minimal-parser": MinimalDataParserConfig(), "arkit-data": ARKitScenesDataParserConfig(), "blender-data": BlenderDataParserConfig(), "instant-ngp-data": InstantNGPDataParserConfig(), "nuscenes-data": NuScenesDataParserConfig(), "dnerf-data": DNeRFDataParserConfig(), "phototourism-data": PhototourismDataParserConfig(), "dycheck-data": DycheckDataParserConfig(), "scannet-data": ScanNetDataParserConfig(), "sdfstudio-data": SDFStudioDataParserConfig(), "sitcoms3d-data": Sitcoms3DDataParserConfig(), }, prefix_names=False, # Omit prefixes in subcommands themselves. ) ] """Union over possible dataparser types, annotated with metadata for tyro. This is the same as the vanilla union, but results in shorter subcommand names.""" @dataclass class DataManagerConfig(InstantiateConfig): """Configuration for data manager instantiation; DataManager is in charge of keeping the train/eval dataparsers; After instantiation, data manager holds both train/eval datasets and is in charge of returning unpacked train/eval data at each iteration """ _target: Type = field(default_factory=lambda: DataManager) """Target class to instantiate.""" data: Optional[Path] = None """Source of data, may not be used by all models.""" camera_optimizer: Optional[CameraOptimizerConfig] = None """Specifies the camera pose optimizer used during training. Helpful if poses are noisy.""" class DataManager(nn.Module): """Generic data manager's abstract class This version of the data manager is designed be a monolithic way to load data and latents, especially since this may contain learnable parameters which need to be shared across the train and test data managers. The idea is that we have setup methods for train and eval separately and this can be a combined train/eval if you want. Usage: To get data, use the next_train and next_eval functions. This data manager's next_train and next_eval methods will return 2 things: 1. A Raybundle: This will contain the rays we are sampling, with latents and conditionals attached (everything needed at inference) 2. A "batch" of auxiliary information: This will contain the mask, the ground truth pixels, etc needed to actually train, score, etc the model Rationale: Because of this abstraction we've added, we can support more NeRF paradigms beyond the vanilla nerf paradigm of single-scene, fixed-images, no-learnt-latents. We can now support variable scenes, variable number of images, and arbitrary latents. Train Methods: setup_train: sets up for being used as train iter_train: will be called on __iter__() for the train iterator next_train: will be called on __next__() for the training iterator get_train_iterable: utility that gets a clean pythonic iterator for your training data Eval Methods: setup_eval: sets up for being used as eval iter_eval: will be called on __iter__() for the eval iterator next_eval: will be called on __next__() for the eval iterator get_eval_iterable: utility that gets a clean pythonic iterator for your eval data Attributes: train_count (int): the step number of our train iteration, needs to be incremented manually eval_count (int): the step number of our eval iteration, needs to be incremented manually train_dataset (Dataset): the dataset for the train dataset eval_dataset (Dataset): the dataset for the eval dataset Additional attributes specific to each subclass are defined in the setup_train and setup_eval functions. """ train_dataset: Optional[Dataset] = None eval_dataset: Optional[Dataset] = None train_sampler: Optional[DistributedSampler] = None eval_sampler: Optional[DistributedSampler] = None def __init__(self): """Constructor for the DataManager class. Subclassed DataManagers will likely need to override this constructor. If you aren't manually calling the setup_train and setup_eval functions from an overriden constructor, that you call super().__init__() BEFORE you initialize any nn.Modules or nn.Parameters, but AFTER you've already set all the attributes you need for the setup functions.""" super().__init__() self.train_count = 0 self.eval_count = 0 if self.train_dataset and self.test_mode != "inference": self.setup_train() if self.eval_dataset and self.test_mode != "inference": self.setup_eval() def forward(self): """Blank forward method This is an nn.Module, and so requires a forward() method normally, although in our case we do not need a forward() method""" raise NotImplementedError def iter_train(self): """The __iter__ function for the train iterator. This only exists to assist the get_train_iterable function, since we need to pass in an __iter__ function for our trivial iterable that we are making.""" self.train_count = 0 def iter_eval(self): """The __iter__ function for the eval iterator. This only exists to assist the get_eval_iterable function, since we need to pass in an __iter__ function for our trivial iterable that we are making.""" self.eval_count = 0 def get_train_iterable(self, length=-1) -> IterableWrapper: """Gets a trivial pythonic iterator that will use the iter_train and next_train functions as __iter__ and __next__ methods respectively. This basically is just a little utility if you want to do something like: | for ray_bundle, batch in datamanager.get_train_iterable(): | <eval code here> since the returned IterableWrapper is just an iterator with the __iter__ and __next__ methods (methods bound to our DataManager instance in this case) specified in the constructor. """ return IterableWrapper(self.iter_train, self.next_train, length) def get_eval_iterable(self, length=-1) -> IterableWrapper: """Gets a trivial pythonic iterator that will use the iter_eval and next_eval functions as __iter__ and __next__ methods respectively. This basically is just a little utility if you want to do something like: | for ray_bundle, batch in datamanager.get_eval_iterable(): | <eval code here> since the returned IterableWrapper is just an iterator with the __iter__ and __next__ methods (methods bound to our DataManager instance in this case) specified in the constructor. """ return IterableWrapper(self.iter_eval, self.next_eval, length) @abstractmethod def setup_train(self): """Sets up the data manager for training. Here you will define any subclass specific object attributes from the attribute""" @abstractmethod def setup_eval(self): """Sets up the data manager for evaluation""" @abstractmethod def next_train(self, step: int) -> Tuple[RayBundle, Dict]: """Returns the next batch of data from the train data manager. Args: step: the step number of the eval image to retrieve Returns: A tuple of the ray bundle for the image, and a dictionary of additional batch information such as the groudtruth image. """ raise NotImplementedError @abstractmethod def next_eval(self, step: int) -> Tuple[RayBundle, Dict]: """Returns the next batch of data from the eval data manager. Args: step: the step number of the eval image to retrieve Returns: A tuple of the ray bundle for the image, and a dictionary of additional batch information such as the groudtruth image. """ raise NotImplementedError @abstractmethod def next_eval_image(self, step: int) -> Tuple[int, RayBundle, Dict]: """Retreive the next eval image. Args: step: the step number of the eval image to retrieve Returns: A tuple of the step number, the ray bundle for the image, and a dictionary of additional batch information such as the groudtruth image. """ raise NotImplementedError @abstractmethod def get_train_rays_per_batch(self) -> int: """Returns the number of rays per batch for training.""" raise NotImplementedError @abstractmethod def get_eval_rays_per_batch(self) -> int: """Returns the number of rays per batch for evaluation.""" raise NotImplementedError def get_datapath(self) -> Optional[Path]: # pylint:disable=no-self-use """Returns the path to the data. This is used to determine where to save camera paths.""" return None def get_training_callbacks( # pylint:disable=no-self-use self, training_callback_attributes: TrainingCallbackAttributes # pylint: disable=unused-argument ) -> List[TrainingCallback]: """Returns a list of callbacks to be used during training.""" return [] @abstractmethod def get_param_groups(self) -> Dict[str, List[Parameter]]: # pylint: disable=no-self-use """Get the param groups for the data manager. Returns: A list of dictionaries containing the data manager's param groups. """ return {} @dataclass class VanillaDataManagerConfig(DataManagerConfig): """A basic data manager""" _target: Type = field(default_factory=lambda: VanillaDataManager) """Target class to instantiate.""" dataparser: AnnotatedDataParserUnion = BlenderDataParserConfig() """Specifies the dataparser used to unpack the data.""" train_num_rays_per_batch: int = 1024 """Number of rays per batch to use per training iteration.""" train_num_images_to_sample_from: int = -1 """Number of images to sample during training iteration.""" train_num_times_to_repeat_images: int = -1 """When not training on all images, number of iterations before picking new images. If -1, never pick new images.""" eval_num_rays_per_batch: int = 1024 """Number of rays per batch to use per eval iteration.""" eval_num_images_to_sample_from: int = -1 """Number of images to sample during eval iteration.""" eval_num_times_to_repeat_images: int = -1 """When not evaluating on all images, number of iterations before picking new images. If -1, never pick new images.""" eval_image_indices: Optional[Tuple[int, ...]] = (0,) """Specifies the image indices to use during eval; if None, uses all.""" camera_optimizer: CameraOptimizerConfig = CameraOptimizerConfig() """Specifies the camera pose optimizer used during training. Helpful if poses are noisy, such as for data from Record3D.""" collate_fn = staticmethod(nerfstudio_collate) """Specifies the collate function to use for the train and eval dataloaders.""" camera_res_scale_factor: float = 1.0 """The scale factor for scaling spatial data such as images, mask, semantics along with relevant information about camera intrinsics """ patch_size: int = 1 """Size of patch to sample from. If >1, patch-based sampling will be used.""" class VanillaDataManager(DataManager): # pylint: disable=abstract-method """Basic stored data manager implementation. This is pretty much a port over from our old dataloading utilities, and is a little jank under the hood. We may clean this up a little bit under the hood with more standard dataloading components that can be strung together, but it can be just used as a black box for now since only the constructor is likely to change in the future, or maybe passing in step number to the next_train and next_eval functions. Args: config: the DataManagerConfig used to instantiate class """ config: VanillaDataManagerConfig
train_dataset: InputDataset
17
2023-10-26 04:39:15+00:00
24k
chenruduan/OAReactDiff
oa_reactdiff/trainer/pl_trainer.py
[ { "identifier": "ProcessedQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=2,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n\n self.n_fragments = pad_fragments + 1\n self.device = torch.device(device)\n\n n_samples = len(self.raw_dataset[\"charges\"])\n self.n_samples = n_samples\n\n self.data = {}\n self.process_molecules(\"raw_dataset\", n_samples, idx=0)\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 1)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]" }, { "identifier": "ProcessedDoubleQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedDoubleQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=1,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n\n self.n_fragments = pad_fragments + 2\n self.device = torch.device(device)\n n_samples = len(self.raw_dataset[\"charges\"])\n self.n_samples = len(self.raw_dataset[\"charges\"])\n\n self.get_subsets()\n self.get_pairs()\n\n self.data = {}\n self.process_molecules(\"frag1_data\", n_samples, idx=0)\n self.process_molecules(\"frag2_data\", n_samples, idx=1)\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 2)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]\n\n def get_pairs(self):\n self.frag1_data, self.frag2_data = {}, {}\n frag1_O_idx_1sthalf = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n frag2_N_idx_1sthalf = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n frag1_N_idx_2ndhalf = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n frag2_O_idx_2ndhalf = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n int(self.n_samples / 2),\n replace=True,\n )\n self.frag1_data = {\n key: np.concatenate(\n [\n self.hasO_set[key][frag1_O_idx_1sthalf],\n self.hasN_set[key][frag1_N_idx_2ndhalf],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }\n self.frag2_data = {\n key: np.concatenate(\n [\n self.hasN_set[key][frag2_N_idx_1sthalf],\n self.hasO_set[key][frag2_O_idx_2ndhalf],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }" }, { "identifier": "ProcessedTripleQM9", "path": "oa_reactdiff/dataset/qm9.py", "snippet": "class ProcessedTripleQM9(BaseQM9):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=0,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n\n self.n_fragments = pad_fragments + 3\n self.device = torch.device(device)\n n_samples = len(self.raw_dataset[\"charges\"])\n self.n_samples = len(self.raw_dataset[\"charges\"])\n\n self.get_subsets()\n self.get_pairs()\n\n self.data = {}\n self.process_molecules(\"frag1_data\", n_samples, idx=0)\n self.process_molecules(\"frag2_data\", n_samples, idx=1)\n self.process_molecules(\"frag3_data\", n_samples, idx=2)\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 3)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]\n\n def get_pairs(self):\n n1 = int(self.n_samples / 3)\n n2 = int(self.n_samples / 3)\n n3 = self.n_samples - n1 - n2\n self.frag1_data, self.frag2_data = {}, {}\n frag1_O_idx_1_3 = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n n1,\n replace=True,\n )\n frag2_N_idx_1_3 = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n n1,\n replace=True,\n )\n frag3_F_idx_1_3 = np.random.choice(\n len(self.hasF_set[\"charges\"]),\n n1,\n replace=True,\n )\n frag1_F_idx_2_3 = np.random.choice(\n len(self.hasF_set[\"charges\"]),\n n2,\n replace=True,\n )\n frag2_O_idx_2_3 = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n n2,\n replace=True,\n )\n frag3_N_idx_2_3 = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n n2,\n replace=True,\n )\n frag1_N_idx_3_3 = np.random.choice(\n len(self.hasN_set[\"charges\"]),\n n3,\n replace=True,\n )\n frag2_F_idx_3_3 = np.random.choice(\n len(self.hasF_set[\"charges\"]),\n n3,\n replace=True,\n )\n frag3_O_idx_3_3 = np.random.choice(\n len(self.hasO_set[\"charges\"]),\n n3,\n replace=True,\n )\n self.frag1_data = {\n key: np.concatenate(\n [\n self.hasO_set[key][frag1_O_idx_1_3],\n self.hasF_set[key][frag1_F_idx_2_3],\n self.hasN_set[key][frag1_N_idx_3_3],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }\n self.frag2_data = {\n key: np.concatenate(\n [\n self.hasN_set[key][frag2_N_idx_1_3],\n self.hasO_set[key][frag2_O_idx_2_3],\n self.hasF_set[key][frag2_F_idx_3_3],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }\n self.frag3_data = {\n key: np.concatenate(\n [\n self.hasF_set[key][frag3_F_idx_1_3],\n self.hasN_set[key][frag3_N_idx_2_3],\n self.hasO_set[key][frag3_O_idx_3_3],\n ],\n axis=0,\n )\n for key in self.raw_dataset\n }" }, { "identifier": "ProcessedTS1x", "path": "oa_reactdiff/dataset/transition1x.py", "snippet": "class ProcessedTS1x(BaseDataset):\n def __init__(\n self,\n npz_path,\n center=True,\n pad_fragments=0,\n device=\"cpu\",\n zero_charge=False,\n remove_h=False,\n single_frag_only=True,\n swapping_react_prod=False,\n append_frag=False,\n reflection=False,\n use_by_ind=False,\n only_ts=False,\n confidence_model=False,\n position_key=\"positions\",\n ediff=None,\n **kwargs,\n ):\n super().__init__(\n npz_path=npz_path,\n center=center,\n device=device,\n zero_charge=zero_charge,\n remove_h=remove_h,\n )\n if confidence_model:\n use_by_ind = False\n if remove_h:\n print(\"remove_h is ignored because it is not reasonble for TS.\")\n if single_frag_only:\n single_frag_inds = np.where(\n np.array(self.raw_dataset[\"single_fragment\"]) == 1\n )[0]\n else:\n single_frag_inds = np.array(range(len(self.raw_dataset[\"single_fragment\"])))\n if use_by_ind:\n use_inds = self.raw_dataset[\"use_ind\"]\n else:\n use_inds = range(len(self.raw_dataset[\"single_fragment\"]))\n single_frag_inds = list(set(single_frag_inds).intersection(set(use_inds)))\n\n data_duplicated = copy.deepcopy(self.raw_dataset)\n for k, mapped_k in FRAG_MAPPING.items():\n for v, val in data_duplicated[k].items():\n self.raw_dataset[k][v] = [val[ii] for ii in single_frag_inds]\n if swapping_react_prod:\n mapped_val = data_duplicated[mapped_k][v]\n self.raw_dataset[k][v] += [\n mapped_val[ii] for ii in single_frag_inds\n ]\n if reflection:\n for k, mapped_k in FRAG_MAPPING.items():\n for v, val in self.raw_dataset[k].items():\n if v in [\"wB97x_6-31G(d).forces\", position_key]:\n self.raw_dataset[k][v] += [reflect_z(_val) for _val in val]\n else:\n self.raw_dataset[k][v] += val\n\n self.reactant = self.raw_dataset[\"reactant\"]\n self.transition_state = self.raw_dataset[\"transition_state\"]\n self.product = self.raw_dataset[\"product\"]\n\n self.n_fragments = pad_fragments + 3\n self.device = torch.device(device)\n n_samples = len(self.reactant[\"charges\"])\n self.n_samples = len(self.reactant[\"charges\"])\n\n self.data = {}\n repeat = 2 if swapping_react_prod else 1\n if confidence_model:\n self.data[\"target\"] = torch.tensor(\n self.raw_dataset[\"target\"] * repeat\n ).unsqueeze(1)\n self.data[\"rmsd\"] = torch.tensor(\n self.raw_dataset[\"rmsd\"] * repeat\n ).unsqueeze(1)\n if ediff is not None:\n self.data[\"ediff\"] = torch.tensor(\n self.raw_dataset[ediff][\"ediff\"] * repeat\n ).unsqueeze(1)\n if not only_ts:\n if not append_frag:\n self.process_molecules(\n \"reactant\", n_samples, idx=0, position_key=position_key\n )\n self.process_molecules(\"transition_state\", n_samples, idx=1)\n self.process_molecules(\n \"product\", n_samples, idx=2, position_key=position_key\n )\n else:\n self.process_molecules(\n \"reactant\",\n n_samples,\n idx=0,\n append_charge=0,\n position_key=position_key,\n )\n self.process_molecules(\n \"transition_state\", n_samples, idx=1, append_charge=1\n )\n self.process_molecules(\n \"product\",\n n_samples,\n idx=2,\n append_charge=0,\n position_key=position_key,\n )\n\n for idx in range(pad_fragments):\n self.patch_dummy_molecules(idx + 3)\n else:\n if not append_frag:\n self.process_molecules(\"transition_state\", n_samples, idx=0)\n else:\n self.process_molecules(\n \"transition_state\", n_samples, idx=0, append_charge=1\n )\n # for idx in range(2):\n # self.patch_dummy_molecules(idx + 1)\n\n self.data[\"condition\"] = [\n torch.zeros(\n size=(1, 1),\n dtype=torch.int64,\n device=self.device,\n )\n for _ in range(self.n_samples)\n ]" }, { "identifier": "EGNNDynamics", "path": "oa_reactdiff/dynamics/egnn_dynamics.py", "snippet": "class EGNNDynamics(BaseDynamics):\n def __init__(\n self,\n model_config: Dict,\n fragment_names: List[str],\n node_nfs: List[int],\n edge_nf: int,\n condition_nf: int = 0,\n pos_dim: int = 3,\n update_pocket_coords: bool = True,\n condition_time: bool = True,\n edge_cutoff: Optional[float] = None,\n model: nn.Module = EGNN,\n device: torch.device = torch.device(\"cuda\"),\n enforce_same_encoding: Optional[List] = None,\n source: Optional[Dict] = None,\n ) -> None:\n r\"\"\"Base dynamics class set up for denoising process.\n\n Args:\n model_config (Dict): config for the equivariant model.\n fragment_names (List[str]): list of names for fragments\n node_nfs (List[int]): list of number of input node attributues.\n edge_nf (int): number of input edge attributes.\n condition_nf (int): number of attributes for conditional generation.\n Defaults to 0.\n pos_dim (int): dimension for position vector. Defaults to 3.\n update_pocket_coords (bool): whether to update positions of everything.\n Defaults to True.\n condition_time (bool): whether to condition on time. Defaults to True.\n edge_cutoff (Optional[float]): cutoff for building intra-fragment edges.\n Defaults to None.\n model (Optional[nn.Module]): Module for equivariant model. Defaults to None.\n \"\"\"\n super().__init__(\n model_config,\n fragment_names,\n node_nfs,\n edge_nf,\n condition_nf,\n pos_dim,\n update_pocket_coords,\n condition_time,\n edge_cutoff,\n model,\n device,\n enforce_same_encoding,\n source=source,\n )\n\n def forward(\n self,\n xh: List[Tensor],\n edge_index: Tensor,\n t: Tensor,\n conditions: Tensor,\n n_frag_switch: Tensor,\n combined_mask: Tensor,\n edge_attr: Optional[Tensor] = None,\n ) -> Tuple[List[Tensor], Tensor]:\n r\"\"\"predict noise /mu.\n\n Args:\n xh (List[Tensor]): list of concatenated tensors for pos and h\n edge_index (Tensor): [n_edge, 2]\n t (Tensor): time tensor. If dim is 1, same for all samples;\n otherwise different t for different samples\n conditions (Tensor): condition tensors\n n_frag_switch (Tensor): [n_nodes], fragment index for each nodes\n combined_mask (Tensor): [n_nodes], sample index for each node\n edge_attr (Optional[Tensor]): [n_edge, dim_edge_attribute]. Defaults to None.\n\n Raises:\n NotImplementedError: The fragement-position-fixed mode is not implement.\n\n Returns:\n Tuple[List[Tensor], Tensor]: updated pos-h and edge attributes\n \"\"\"\n pos = torch.concat(\n [_xh[:, : self.pos_dim].clone() for _xh in xh],\n dim=0,\n )\n h = torch.concat(\n [\n self.encoders[ii](xh[ii][:, self.pos_dim :].clone())\n for ii, name in enumerate(self.fragment_names)\n ],\n dim=0,\n )\n if self.edge_encoder is not None:\n edge_attr = self.edge_encoder(edge_attr)\n\n condition_dim = 0\n if self.condition_time:\n if len(t.size()) == 1:\n # t is the same for all elements in batch.\n h_time = torch.empty_like(h[:, 0:1]).fill_(t.item())\n else:\n # t is different over the batch dimension.\n h_time = t[combined_mask]\n h = torch.cat([h, h_time], dim=1)\n condition_dim += 1\n\n if self.condition_nf > 0:\n h_condition = conditions[combined_mask]\n h = torch.cat([h, h_condition], dim=1)\n condition_dim += self.condition_nf\n\n subgraph_mask = get_subgraph_mask(edge_index, n_frag_switch)\n if self.update_pocket_coords:\n update_coords_mask = None\n else:\n raise NotImplementedError # no need to mask pos for inpainting mode.\n\n h_final, pos_final, edge_attr_final = self.model(\n h,\n pos,\n edge_index,\n edge_attr,\n node_mask=None,\n edge_mask=None,\n update_coords_mask=update_coords_mask,\n subgraph_mask=subgraph_mask[:, None],\n )\n vel = pos_final - pos\n if torch.any(torch.isnan(vel)):\n print(\"Warning: detected nan in pos, resetting EGNN output to randn.\")\n vel = torch.randn_like(vel)\n if torch.any(torch.isnan(vel)):\n print(\"Warning: detected nan in h, resetting EGNN output to randn.\")\n h_final = torch.randn_like(h_final)\n\n h_final = h_final[:, :-condition_dim]\n\n frag_index = self.compute_frag_index(n_frag_switch)\n xh_final = [\n torch.cat(\n [\n self.remove_mean_batch(\n vel[frag_index[ii] : frag_index[ii + 1]],\n combined_mask[frag_index[ii] : frag_index[ii + 1]],\n ),\n self.decoders[ii](h_final[frag_index[ii] : frag_index[ii + 1]]),\n ],\n dim=-1,\n )\n for ii, name in enumerate(self.fragment_names)\n ]\n\n # xh_final = self.enpose_pbc(xh_final)\n\n if edge_attr_final is None or edge_attr_final.size(1) <= max(1, self.dist_dim):\n edge_attr_final = None\n else:\n edge_attr_final = self.edge_decoder(edge_attr_final)\n return xh_final, edge_attr_final\n\n @staticmethod\n def enpose_pbc(xh: List[Tensor], magnitude=10.0) -> List[Tensor]:\n xrange = magnitude * 2\n xh = [torch.remainder(_xh + magnitude, xrange) - magnitude for _xh in xh]\n return xh\n\n @staticmethod\n def compute_frag_index(n_frag_switch: Tensor) -> np.ndarray:\n counts = [\n torch.where(n_frag_switch == ii)[0].numel()\n for ii in torch.unique(n_frag_switch)\n ]\n return np.concatenate([np.array([0]), np.cumsum(counts)])\n\n @torch.no_grad()\n def adjust_edge_attr_on_new_eij(\n self,\n edge_index: Tensor,\n edge_attr: Tensor,\n edge_index_new: Tensor,\n ) -> Tensor:\n r\"\"\"Get ready new edge attributes (e_ij) given old {ij, e_ij} and new {ij}\n\n Args:\n edge_index (Tensor): ij\n edge_attr (Tensor): e_ij\n edge_index_new (Tensor): new ij\n\n Raises:\n ValueError: finding multiple entries for the same ij pair\n\n Returns:\n Tensor: new e_ij\n \"\"\"\n edge_index_T = torch.transpose(edge_index, 1, 0)\n edge_index_new_T = torch.transpose(edge_index_new, 1, 0)\n\n edge_attr_new = []\n for _ind, ij in enumerate(edge_index_new_T):\n ind = torch.where((ij == edge_index_T).all(dim=1))[0]\n if ind.size(0) > 1:\n raise ValueError(f\"ind should only be 0 or 1, getting {ind}\")\n\n if ind.size(0) == 0:\n self.create_new_edge_attr(\n ind_new=_ind,\n ij_new=ij,\n edge_index_new_T=edge_index_new_T,\n edge_attr_new=edge_attr_new,\n edge_attr=edge_attr,\n )\n else:\n edge_attr_new.append(edge_attr[ind.item()].detach())\n return torch.stack(edge_attr_new, dim=0)\n\n @staticmethod\n def init_edge_attr(sample_edge_attr):\n r\"\"\"initialize edge attributes.\"\"\"\n return torch.rand_like(sample_edge_attr)\n\n def create_new_edge_attr(\n self,\n ind_new: Tensor,\n ij_new: Tensor,\n edge_index_new_T: Tensor,\n edge_attr_new: List[Tensor],\n edge_attr: Tensor,\n ) -> List[Tensor]:\n r\"\"\"Create new edge attrbution for ij that is not present in old connections\n\n Args:\n ind_new (Tensor): natural index of new ij\n ij_new (Tensor): new ij\n edge_index_new_T (Tensor): new edge indexes, [n_edge, 2]\n edge_attr_new (List[Tensor]): list of new edge attributes\n edge_attr (Tensor): old edge attributes\n\n Raises:\n ValueError: not ji found for ij in new indexes\n\n Returns:\n List[Tensor]: list of new edge attributes\n \"\"\"\n ij_new_reverse = ij_new[torch.tensor([1, 0])]\n ind_new_reverse = torch.where((ij_new_reverse == edge_index_new_T).all(dim=1))[\n 0\n ]\n print(ind_new_reverse)\n if ind_new_reverse.size(0) == 0:\n raise ValueError(f\"should always find a reverse ind.\")\n # print(ij_new, ind_new, ind_new_reverse)\n if ind_new_reverse.item() >= ind_new:\n edge_attr_new.append(self.init_edge_attr(edge_attr[0]))\n else:\n edge_attr_new.append(edge_attr_new[ind_new_reverse.item()])\n return edge_attr_new\n\n @staticmethod\n def remove_mean_batch(x, indices):\n mean = scatter_mean(x, indices, dim=0)\n x = x - mean[indices]\n return x" }, { "identifier": "Confidence", "path": "oa_reactdiff/dynamics/confidence.py", "snippet": "class Confidence(BaseDynamics):\n def __init__(\n self,\n model_config: Dict,\n fragment_names: List[str],\n node_nfs: List[int],\n edge_nf: int,\n condition_nf: int = 0,\n pos_dim: int = 3,\n edge_cutoff: Optional[float] = None,\n model: nn.Module = EGNN,\n device: torch.device = torch.device(\"cuda\"),\n enforce_same_encoding: Optional[List] = None,\n source: Optional[Dict] = None,\n **kwargs,\n ) -> None:\n r\"\"\"Confindence score for generated samples.\n\n Args:\n model_config (Dict): config for the equivariant model.\n fragment_names (List[str]): list of names for fragments\n node_nfs (List[int]): list of number of input node attributues.\n edge_nf (int): number of input edge attributes.\n condition_nf (int): number of attributes for conditional generation.\n Defaults to 0.\n pos_dim (int): dimension for position vector. Defaults to 3.\n update_pocket_coords (bool): whether to update positions of everything.\n Defaults to True.\n condition_time (bool): whether to condition on time. Defaults to True.\n edge_cutoff (Optional[float]): cutoff for building intra-fragment edges.\n Defaults to None.\n model (Optional[nn.Module]): Module for equivariant model. Defaults to None.\n \"\"\"\n model_config.update({\"for_conf\": True})\n update_pocket_coords = True\n condition_time = (True,)\n super().__init__(\n model_config,\n fragment_names,\n node_nfs,\n edge_nf,\n condition_nf,\n pos_dim,\n update_pocket_coords,\n condition_time,\n edge_cutoff,\n model,\n device,\n enforce_same_encoding,\n source=source,\n )\n\n hidden_channels = model_config[\"hidden_channels\"]\n self.readout = GatedMLP(\n in_dim=hidden_channels,\n out_dims=[hidden_channels, hidden_channels, 1],\n activation=\"swish\",\n bias=True,\n last_layer_no_activation=True,\n )\n\n def _forward(\n self,\n xh: List[Tensor],\n edge_index: Tensor,\n t: Tensor,\n conditions: Tensor,\n n_frag_switch: Tensor,\n combined_mask: Tensor,\n edge_attr: Optional[Tensor] = None,\n ) -> Tensor:\n r\"\"\"predict confidence.\n\n Args:\n xh (List[Tensor]): list of concatenated tensors for pos and h\n edge_index (Tensor): [n_edge, 2]\n t (Tensor): time tensor. If dim is 1, same for all samples;\n otherwise different t for different samples\n conditions (Tensor): condition tensors\n n_frag_switch (Tensor): [n_nodes], fragment index for each nodes\n combined_mask (Tensor): [n_nodes], sample index for each node\n edge_attr (Optional[Tensor]): [n_edge, dim_edge_attribute]. Defaults to None.\n\n Raises:\n NotImplementedError: The fragement-position-fixed mode is not implement.\n\n Returns:\n Tensor: binary probability of confidence fo each graph.\n \"\"\"\n pos = torch.concat(\n [_xh[:, : self.pos_dim].clone() for _xh in xh],\n dim=0,\n )\n h = torch.concat(\n [\n self.encoders[ii](xh[ii][:, self.pos_dim :].clone())\n for ii, name in enumerate(self.fragment_names)\n ],\n dim=0,\n )\n if self.edge_encoder is not None:\n edge_attr = self.edge_encoder(edge_attr)\n\n condition_dim = 0\n if self.condition_time:\n if len(t.size()) == 1:\n # t is the same for all elements in batch.\n h_time = torch.empty_like(h[:, 0:1]).fill_(t.item())\n else:\n # t is different over the batch dimension.\n h_time = t[combined_mask]\n h = torch.cat([h, h_time], dim=1)\n condition_dim += 1\n\n if self.condition_nf > 0:\n h_condition = conditions[combined_mask]\n h = torch.cat([h, h_condition], dim=1)\n condition_dim += self.condition_nf\n\n subgraph_mask = get_subgraph_mask(edge_index, n_frag_switch)\n if self.update_pocket_coords:\n update_coords_mask = None\n else:\n raise NotImplementedError # no need to mask pos for inpainting mode.\n\n node_features = self.model(\n h,\n pos,\n edge_index,\n edge_attr,\n node_mask=None,\n edge_mask=None,\n update_coords_mask=update_coords_mask,\n subgraph_mask=subgraph_mask[:, None],\n ) # (n_node, n_hidden)\n\n graph_features = scatter_mean(\n node_features,\n index=combined_mask,\n dim=0,\n ) # (n_system, n_hidden)\n conf = self.readout(graph_features)\n return conf.squeeze()\n\n def forward(\n self,\n representations: List[Dict],\n conditions: Tensor,\n ):\n masks = [repre[\"mask\"] for repre in representations]\n combined_mask = torch.cat(masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n fragments_nodes = [repr[\"size\"] for repr in representations]\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n xh = [\n torch.cat(\n [repre[feature_type] for feature_type in FEATURE_MAPPING],\n dim=1,\n )\n for repre in representations\n ]\n\n pred = self._forward(\n xh=xh,\n edge_index=edge_index,\n t=torch.tensor([0]),\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None,\n )\n return pred" }, { "identifier": "DiffSchedule", "path": "oa_reactdiff/diffusion/_schedule.py", "snippet": "class DiffSchedule(nn.Module):\n def __init__(self, gamma_module: nn.Module, norm_values: Tuple[float]) -> None:\n super().__init__()\n self.gamma_module = gamma_module\n self.norm_values = norm_values\n self.check_issues_norm_values()\n\n @staticmethod\n def inflate_batch_array(array, target):\n r\"\"\"\n Inflates the batch array (array) with only a single axis\n (i.e. shape = (batch_size,), or possibly more empty axes\n (i.e. shape (batch_size, 1, ..., 1)) to match the target shape.\n \"\"\"\n target_shape = (array.size(0),) + (1,) * (len(target.size()) - 1)\n return array.view(target_shape)\n\n def sigma(self, gamma, target_tensor):\n r\"\"\"Computes sigma given gamma.\"\"\"\n return self.inflate_batch_array(torch.sqrt(torch.sigmoid(gamma)), target_tensor)\n\n def alpha(self, gamma, target_tensor):\n r\"\"\"Computes alpha given gamma.\"\"\"\n return self.inflate_batch_array(\n torch.sqrt(torch.sigmoid(-gamma)), target_tensor\n )\n\n @staticmethod\n def SNR(gamma):\n r\"\"\"Computes signal to noise ratio (alpha^2/sigma^2) given gamma.\"\"\"\n return torch.exp(-gamma)\n\n def sigma_and_alpha_t_given_s(\n self, gamma_t: Tensor, gamma_s: Tensor, target_tensor: Tensor\n ) -> tuple[Tensor, Tensor, Tensor]:\n r\"\"\"\n Computes sigma t given s, using gamma_t and gamma_s. Used during sampling.\n These are defined as:\n alpha t given s = alpha t / alpha s,\n sigma t given s = sqrt(1 - (alpha t given s) ^2 ).\n \"\"\"\n sigma2_t_given_s = self.inflate_batch_array(\n -torch.expm1(F.softplus(gamma_s) - F.softplus(gamma_t)), target_tensor\n )\n\n # alpha_t_given_s = alpha_t / alpha_s\n log_alpha2_t = F.logsigmoid(-gamma_t)\n log_alpha2_s = F.logsigmoid(-gamma_s)\n log_alpha2_t_given_s = log_alpha2_t - log_alpha2_s\n\n alpha_t_given_s = torch.exp(0.5 * log_alpha2_t_given_s)\n alpha_t_given_s = self.inflate_batch_array(alpha_t_given_s, target_tensor)\n\n sigma_t_given_s = torch.sqrt(sigma2_t_given_s)\n\n return sigma2_t_given_s, sigma_t_given_s, alpha_t_given_s\n\n def check_issues_norm_values(self, num_stdevs=8):\n zeros = torch.zeros((1, 1))\n gamma_0 = self.gamma_module(zeros)\n sigma_0 = self.sigma(gamma_0, target_tensor=zeros).item()\n\n # Checked if 1 / norm_value is still larger than 10 * standard\n # deviation.\n norm_value = self.norm_values[1]\n\n if sigma_0 * num_stdevs > 1.0 / norm_value:\n raise ValueError(\n f\"Value for normalization value {norm_value} probably too \"\n f\"large with sigma_0 {sigma_0:.5f} and \"\n f\"1 / norm_value = {1. / norm_value}\"\n )" }, { "identifier": "PredefinedNoiseSchedule", "path": "oa_reactdiff/diffusion/_schedule.py", "snippet": "class PredefinedNoiseSchedule(nn.Module):\n r\"\"\"\n Predefined noise schedule. Essentially creates a lookup array for predefined\n (non-learned) noise schedules.\n \"\"\"\n\n def __init__(\n self,\n noise_schedule: str,\n timesteps: int,\n precision: float,\n ):\n super().__init__()\n self.timesteps = timesteps\n\n if \"cosine\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) <= 2\n power = 1 if len(splits) == 1 else float(splits[1])\n alphas2 = cosine_beta_schedule(timesteps, raise_to_power=power)\n elif \"polynomial\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) == 2\n power = float(splits[1])\n alphas2 = polynomial_schedule(timesteps, s=precision, power=power)\n elif \"csin\" in noise_schedule:\n splits = noise_schedule.split(\"_\")\n assert len(splits) == 4\n start, end, tau = float(splits[1]), float(splits[2]), float(splits[3])\n alphas2 = ccosine_schedule(timesteps, start=start, end=end, tau=tau)\n elif \"linear\" in noise_schedule:\n alphas2 = linear_schedule(timesteps)\n else:\n raise ValueError(noise_schedule)\n\n # print(\"alphas2\", alphas2)\n\n sigmas2 = 1 - alphas2\n\n log_alphas2 = np.log(alphas2)\n log_sigmas2 = np.log(sigmas2)\n\n log_alphas2_to_sigmas2 = log_alphas2 - log_sigmas2\n\n # print(\"gamma\", -log_alphas2_to_sigmas2)\n\n self.gamma = torch.nn.Parameter(\n torch.from_numpy(-log_alphas2_to_sigmas2).float(), requires_grad=False\n )\n\n def forward(self, t):\n t_int = torch.round(t * self.timesteps).long()\n return self.gamma[t_int]" }, { "identifier": "Normalizer", "path": "oa_reactdiff/diffusion/_normalizer.py", "snippet": "class Normalizer(nn.Module):\n def __init__(\n self,\n norm_values: Tuple = (1.0, 1.0, 1.0),\n norm_biases: Tuple = (0.0, 0.0, 0.0),\n pos_dim: int = 3,\n ) -> None:\n super().__init__()\n self.norm_values = norm_values\n self.norm_biases = norm_biases\n self.pos_dim = pos_dim\n\n def normalize(self, representations: List[Dict]) -> List[Dict]:\n for ii in range(len(representations)):\n for jj, feature_type in enumerate(FEATURE_MAPPING):\n representations[ii][feature_type] = (\n representations[ii][feature_type] - self.norm_biases[jj]\n ) / self.norm_values[jj]\n return representations\n\n def unnormalize(self, x: Tensor, ind: int) -> Tensor:\n return x * self.norm_values[ind] + self.norm_biases[ind]\n\n def unnormalize_z(self, z_combined: List[Tensor]) -> List[Tensor]:\n for ii in range(len(z_combined)):\n z_combined[ii][:, : self.pos_dim] = self.unnormalize(\n z_combined[ii][:, : self.pos_dim], 0\n )\n z_combined[ii][:, self.pos_dim : -1] = self.unnormalize(\n z_combined[ii][:, self.pos_dim : -1], 1\n )\n z_combined[ii][:, -1:] = self.unnormalize(z_combined[ii][:, -1:], 2)\n return z_combined" }, { "identifier": "FEATURE_MAPPING", "path": "oa_reactdiff/diffusion/_normalizer.py", "snippet": "FEATURE_MAPPING = [\"pos\", \"one_hot\", \"charge\"]" }, { "identifier": "EnVariationalDiffusion", "path": "oa_reactdiff/diffusion/en_diffusion.py", "snippet": "class EnVariationalDiffusion(nn.Module):\n \"\"\"\n The E(n) Diffusion Module.\n \"\"\"\n\n def __init__(\n self,\n dynamics: EGNNDynamics,\n schdule: DiffSchedule,\n normalizer: Normalizer,\n size_histogram: Optional[Dict] = None,\n loss_type: str = \"l2\",\n pos_only: bool = False,\n fixed_idx: Optional[List] = None,\n ):\n super().__init__()\n assert loss_type in {\"vlb\", \"l2\"}\n\n self.dynamics = dynamics\n self.schedule = schdule\n self.normalizer = normalizer\n self.size_histogram = size_histogram\n self.loss_type = loss_type\n self.pos_only = pos_only\n self.fixed_idx = fixed_idx or []\n\n self.pos_dim = dynamics.pos_dim\n self.node_nfs = dynamics.node_nfs\n self.fragment_names = dynamics.fragment_names\n self.T = schdule.gamma_module.timesteps\n self.norm_values = normalizer.norm_values\n self.norm_biases = normalizer.norm_biases\n\n # ------ FORWARD PASS ------\n\n def forward(\n self,\n representations: List[Dict],\n conditions: Tensor,\n return_pred: bool = False,\n ):\n r\"\"\"\n Computes the loss and NLL terms.\n\n #TODO: edge_attr not considered at all\n \"\"\"\n num_sample = representations[0][\"size\"].size(0)\n n_nodes = torch.stack(\n [repr[\"size\"] for repr in representations],\n dim=0,\n ).sum(dim=0)\n device = representations[0][\"pos\"].device\n masks = [repre[\"mask\"] for repre in representations]\n combined_mask = torch.cat(masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n fragments_nodes = [repr[\"size\"] for repr in representations]\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n # Normalize data, take into account volume change in x.\n representations = self.normalizer.normalize(representations)\n\n # Likelihood change due to normalization\n delta_log_px = self.delta_log_px(n_nodes.sum())\n\n # Sample a timestep t for each example in batch\n # At evaluation time, loss_0 will be computed separately to decrease\n # variance in the estimator (costs two forward passes)\n lowest_t = 0 if self.training else 1\n t_int = torch.randint(\n lowest_t, self.T + 1, size=(num_sample, 1), device=device\n ).float()\n s_int = t_int - 1 # previous timestep\n\n # Masks: important to compute log p(x | z0).\n t_is_zero = (t_int == 0).float()\n t_is_not_zero = 1 - t_is_zero\n\n # Normalize t to [0, 1]. Note that the negative\n # step of s will never be used, since then p(x | z0) is computed.\n s = s_int / self.T\n t = t_int / self.T\n\n # Compute gamma_s and gamma_t via the network.\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s), representations[0][\"pos\"]\n )\n gamma_t = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t), representations[0][\"pos\"]\n )\n\n # Concatenate x, and h[categorical].\n xh = [\n torch.cat(\n [repre[feature_type] for feature_type in FEATURE_MAPPING],\n dim=1,\n )\n for repre in representations\n ]\n\n # Find noised representation\n z_t, eps_xh = self.noised_representation(xh, masks, gamma_t)\n\n # Neural net prediction.\n net_eps_xh, net_eps_edge_attr = self.dynamics(\n xh=z_t,\n edge_index=edge_index,\n t=t,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n\n if return_pred:\n return eps_xh, net_eps_xh\n\n # TODO: LJ term not implemented\n # xh_lig_hat = self.xh_given_zt_and_epsilon(z_t_lig, net_out_lig, gamma_t,\n # ligand['mask'])\n if self.pos_only:\n for ii in range(len(masks)):\n net_eps_xh[ii][:, self.pos_dim :] = torch.zeros_like(\n net_eps_xh[ii][:, self.pos_dim :],\n device=device,\n )\n # Compute the L2 error.\n error_t: List[Tensor] = [\n utils.sum_except_batch(\n (eps_xh[ii] - net_eps_xh[ii]) ** 2,\n masks[ii],\n dim_size=num_sample,\n )\n for ii in range(len(masks))\n ] # TODO: no edge_attr contribution\n\n # Compute weighting with SNR: (1 - SNR(s-t)) for epsilon parametrization\n SNR_weight = (1 - self.schedule.SNR(gamma_s - gamma_t)).squeeze(1)\n assert error_t[0].size() == SNR_weight.size()\n\n # The _constants_ depending on sigma_0 from the\n # cross entropy term E_q(z0 | x) [log p(x | z0)].\n neg_log_constants = -self.log_constants_p_x_given_z0(\n n_nodes=n_nodes, device=device\n )\n\n # The KL between q(zT | x) and p(zT) = Normal(0, 1).\n # Should be close to zero.\n # kl_prior = self.kl_prior_with_pocket(\n # xh_lig, xh_pocket, ligand['mask'], pocket['mask'],\n # ligand['size'] + pocket['size'])\n # TODO: approximate KL prior with zero now, which should not influence training.\n kl_prior = torch.zeros_like(neg_log_constants)\n\n if self.training:\n # Computes the L_0 term (even if gamma_t is not actually gamma_0)\n # and this will later be selected via masking.\n log_p_h_given_z0 = self.log_pxh_given_z0_without_constants(\n representations=representations,\n z_t=z_t,\n eps_xh=eps_xh,\n net_eps_xh=net_eps_xh,\n gamma_t=gamma_t,\n epsilon=1e-10,\n )\n loss_0_x = [\n -_log_p_fragment * t_is_zero.squeeze()\n for _log_p_fragment in log_p_h_given_z0[0]\n ]\n loss_0_cat = [\n -_log_p_fragment * t_is_zero.squeeze()\n for _log_p_fragment in log_p_h_given_z0[1]\n ]\n loss_0_charge = [\n -_log_p_fragment * t_is_zero.squeeze()\n for _log_p_fragment in log_p_h_given_z0[2]\n ]\n\n # apply t_is_zero mask\n error_t = [_error_t * t_is_not_zero.squeeze() for _error_t in error_t]\n\n else:\n # Compute noise values for t = 0.\n t_zeros = torch.zeros_like(s)\n gamma_0 = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t_zeros), representations[0][\"pos\"]\n )\n\n # Sample z_0 given x, h for timestep t, from q(z_t | x, h)\n z_0, eps_0_xh = self.noised_representation(xh, masks, gamma_0)\n net_eps_0_xh, net_eps_0_edge_attr = self.dynamics(\n xh=z_0,\n edge_index=edge_index,\n t=t_zeros,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n\n log_p_h_given_z0 = self.log_pxh_given_z0_without_constants(\n representations=representations,\n z_t=z_0,\n eps_xh=eps_0_xh,\n net_eps_xh=net_eps_0_xh,\n gamma_t=gamma_0,\n epsilon=1e-10,\n )\n loss_0_x = [-_log_p_fragment for _log_p_fragment in log_p_h_given_z0[0]]\n loss_0_cat = [-_log_p_fragment for _log_p_fragment in log_p_h_given_z0[1]]\n loss_0_charge = [\n -_log_p_fragment for _log_p_fragment in log_p_h_given_z0[2]\n ]\n\n loss_terms = {\n \"delta_log_px\": delta_log_px,\n \"error_t\": error_t,\n \"SNR_weight\": SNR_weight,\n \"loss_0_x\": loss_0_x,\n \"loss_0_cat\": loss_0_cat,\n \"loss_0_charge\": loss_0_charge,\n \"neg_log_constants\": neg_log_constants,\n \"kl_prior\": kl_prior,\n \"log_pN\": torch.zeros_like(kl_prior),\n \"t_int\": t_int.squeeze(),\n \"net_eps_xh\": net_eps_xh,\n \"eps_xh\": eps_xh,\n }\n return loss_terms\n\n def delta_log_px(self, num_nodes):\n return -self.subspace_dimensionality(num_nodes) * np.log(self.norm_values[0])\n\n def subspace_dimensionality(self, input_size):\n r\"\"\"\n Compute the dimensionality on translation-invariant linear subspace\n where distributions on x are defined.\n \"\"\"\n return (input_size - 1) * self.pos_dim\n\n def noised_representation(\n self,\n xh: List[Tensor],\n masks: List[Tensor],\n gamma_t: Tensor,\n ) -> Tuple[List[Tensor], List[Tensor]]:\n # Compute alpha_t and sigma_t from gamma.\n alpha_t = self.schedule.alpha(gamma_t, xh[0])\n sigma_t = self.schedule.sigma(gamma_t, xh[0])\n\n # Sample zt ~ Normal(alpha_t x, sigma_t)\n eps_xh = self.sample_combined_position_feature_noise(masks)\n\n # Sample z_t given x, h for timestep t, from q(z_t | x, h)\n z_t = [\n alpha_t[masks[ii]] * xh[ii] + sigma_t[masks[ii]] * eps_xh[ii]\n for ii in range(len(masks))\n ]\n\n return z_t, eps_xh\n\n def sample_combined_position_feature_noise(\n self,\n masks: List[Tensor],\n ) -> List[Tensor]:\n r\"\"\"\n Samples mean-centered normal noise for z_x, and standard normal noise for z_h.\n Note that we only need to put the center of gravity of *each fragment* to the origin.\n \"\"\"\n eps_xh = []\n for ii, mask in enumerate(masks):\n _eps_x = utils.sample_center_gravity_zero_gaussian_batch(\n size=(len(mask), self.pos_dim),\n indices=[mask],\n )\n _eps_h = utils.sample_gaussian(\n size=(len(mask), self.node_nfs[ii] - self.pos_dim),\n device=mask.device,\n )\n if self.pos_only:\n _eps_h = torch.zeros_like(_eps_h, device=mask.device)\n eps_xh.append(torch.cat([_eps_x, _eps_h], dim=1))\n for idx in self.fixed_idx:\n eps_xh[idx] = torch.zeros_like(eps_xh[idx], device=mask.device)\n return eps_xh\n\n def log_constants_p_x_given_z0(self, n_nodes, device):\n r\"\"\"Computes p(x|z0).\"\"\"\n\n batch_size = len(n_nodes)\n degrees_of_freedom_x = self.subspace_dimensionality(n_nodes).to(device)\n\n zeros = torch.zeros((batch_size, 1), device=device)\n gamma_0 = self.schedule.gamma_module(zeros)\n\n # Recall that sigma_x = sqrt(sigma_0^2 / alpha_0^2) = SNR(-0.5 gamma_0).\n log_sigma_x = 0.5 * gamma_0.view(batch_size)\n return degrees_of_freedom_x * (-log_sigma_x - 0.5 * np.log(2 * np.pi))\n\n def kl_prior(self):\n return NotImplementedError\n\n @staticmethod\n def gaussian_KL(q_mu_minus_p_mu_squared, q_sigma, p_sigma, d):\n \"\"\"Computes the KL distance between two normal distributions.\n Args:\n q_mu_minus_p_mu_squared: Squared difference between mean of\n distribution q and distribution p: ||mu_q - mu_p||^2\n q_sigma: Standard deviation of distribution q.\n p_sigma: Standard deviation of distribution p.\n d: dimension\n Returns:\n The KL distance\n \"\"\"\n return (\n d * torch.log(p_sigma / q_sigma)\n + 0.5 * (d * q_sigma**2 + q_mu_minus_p_mu_squared) / (p_sigma**2)\n - 0.5 * d\n )\n\n def log_pxh_given_z0_without_constants(\n self,\n representations: List[Dict],\n z_t: List[Tensor],\n eps_xh: List[Tensor],\n net_eps_xh: List[Tensor],\n gamma_t: Tensor,\n epsilon: float = 1e-10,\n ) -> List[List[Tensor]]:\n # Compute sigma_0 and rescale to the integer scale of the data.\n # for pos\n log_p_x_given_z0_without_constants = [\n -0.5\n * (\n utils.sum_except_batch(\n (eps_xh[ii][:, : self.pos_dim] - net_eps_xh[ii][:, : self.pos_dim])\n ** 2,\n representations[ii][\"mask\"],\n dim_size=representations[0][\"size\"].size(0),\n )\n )\n for ii in range(len(representations))\n ]\n\n # only keep first several elements\n z_t = [_z_t[:, : 3 + 5 + 1] for _z_t in z_t]\n for ii, repr in enumerate(representations):\n representations[ii][\"charge\"] = representations[ii][\"charge\"][:, :1]\n # for ohe of atom types\n sigma_0 = self.schedule.sigma(gamma_t, target_tensor=z_t[0])\n sigma_0_cat = sigma_0 * self.normalizer.norm_values[1]\n atoms = [\n self.normalizer.unnormalize(repr[\"one_hot\"], ind=1)\n for repr in representations\n ]\n est_atoms = [\n self.normalizer.unnormalize(_z_t[:, self.pos_dim : -1], ind=1)\n for _z_t in z_t\n ]\n centered_atoms = [_est_atoms - 1 for _est_atoms in est_atoms]\n log_ph_cat_proportionals = [\n torch.log(\n utils.cdf_standard_gaussian(\n (centered_atoms[ii] + 0.5)\n / sigma_0_cat[representations[ii][\"mask\"]]\n )\n - utils.cdf_standard_gaussian(\n (centered_atoms[ii] - 0.5)\n / sigma_0_cat[representations[ii][\"mask\"]]\n )\n + epsilon\n )\n for ii in range(len(representations))\n ]\n log_probabilities = [\n _log_ph_cat_proportionals\n - torch.logsumexp(\n _log_ph_cat_proportionals,\n dim=1,\n keepdim=True,\n )\n for _log_ph_cat_proportionals in log_ph_cat_proportionals\n ]\n log_p_hcat_given_z0 = [\n utils.sum_except_batch(\n log_probabilities[ii] * atoms[ii],\n representations[ii][\"mask\"],\n dim_size=representations[0][\"size\"].size(0),\n )\n for ii in range(len(representations))\n ]\n\n # for atom charge\n sigma_0_charge = sigma_0 * self.normalizer.norm_values[2]\n charges = [\n self.normalizer.unnormalize(repr[\"charge\"], ind=2)\n for repr in representations\n ]\n est_charges = [\n self.normalizer.unnormalize(_z_t[:, -1:], ind=2).long() for _z_t in z_t\n ]\n for ii in range(len(representations)):\n assert charges[ii].size() == est_charges[ii].size()\n centered_charges = [\n charges[ii] - est_charges[ii] for ii in range(len(representations))\n ]\n log_ph_charge_proportionals = [\n torch.log(\n utils.cdf_standard_gaussian(\n (centered_charges[ii] + 0.5)\n / sigma_0_charge[representations[ii][\"mask\"]]\n )\n - utils.cdf_standard_gaussian(\n (centered_charges[ii] - 0.5)\n / sigma_0_charge[representations[ii][\"mask\"]]\n )\n + epsilon\n )\n for ii in range(len(representations))\n ]\n log_p_hcharge_given_z0 = [\n utils.sum_except_batch(\n log_ph_charge_proportionals[ii],\n representations[ii][\"mask\"],\n dim_size=representations[0][\"size\"].size(0),\n )\n for ii in range(len(representations))\n ]\n\n log_p_h_given_z0 = [\n log_p_x_given_z0_without_constants,\n log_p_hcat_given_z0,\n log_p_hcharge_given_z0,\n ]\n return log_p_h_given_z0\n\n # ------ INVERSE PASS ------\n\n @torch.no_grad()\n def sample(\n self,\n n_samples: int,\n fragments_nodes: List[torch.tensor],\n conditions: Optional[Tensor] = None,\n return_frames: int = 1,\n timesteps: Optional[int] = None,\n h0: Optional[List[Tensor]] = None,\n ):\n r\"\"\"\n Draw samples from the generative model. Optionally, return intermediate\n states for visualization purposes.\n \"\"\"\n timesteps = self.T if timesteps is None else timesteps\n assert 0 < return_frames <= timesteps\n assert timesteps % return_frames == 0\n assert h0 is not None if self.pos_only else True\n\n fragments_masks = [\n get_mask_for_frag(natm_nodes) for natm_nodes in fragments_nodes\n ]\n combined_mask = torch.cat(fragments_masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n zt_xh = self.sample_combined_position_feature_noise(masks=fragments_masks)\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n out_samples = [\n [\n torch.zeros((return_frames,) + _zt_xh.size(), device=_zt_xh.device)\n for _zt_xh in zt_xh\n ]\n for _ in range(return_frames)\n ]\n\n # Iteratively sample p(z_s | z_t) for t = 1, ..., T, with s = t - 1.\n for s in reversed(range(0, timesteps)):\n s_array = torch.full((n_samples, 1), fill_value=s, device=zt_xh[0].device)\n t_array = s_array + 1\n s_array = s_array / timesteps\n t_array = t_array / timesteps\n\n # print(s, zt_xh)\n\n zt_xh = self.sample_p_zs_given_zt(\n s=s_array,\n t=t_array,\n zt_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n conditions=conditions,\n fix_noise=False,\n )\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n # save frame\n if (s * return_frames) % timesteps == 0:\n idx = (s * return_frames) // timesteps\n out_samples[idx] = self.normalizer.unnormalize_z(zt_xh)\n\n pos, cat, charge = self.sample_p_xh_given_z0(\n z0_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n batch_size=n_samples,\n conditions=conditions,\n )\n if self.pos_only:\n cat = [_h0[:, :-1] for _h0 in h0]\n charge = [_h0[:, -1:] for _h0 in h0]\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_pos[:, : self.pos_dim] for _pos in pos],\n dim=0,\n ),\n combined_mask,\n )\n\n # Overwrite last frame with the resulting x and h.\n out_samples[0] = [\n torch.cat([pos[ii], cat[ii], charge[ii]], dim=1) for ii in range(len(pos))\n ]\n return out_samples, fragments_masks\n\n def sample_p_zs_given_zt(\n self,\n s: Tensor,\n t: Tensor,\n zt_xh: List[Tensor],\n edge_index: Tensor,\n n_frag_switch: Tensor,\n masks: List[Tensor],\n conditions: Optional[Tensor] = None,\n fix_noise: bool = False,\n ):\n \"\"\"Samples from zs ~ p(zs | zt). Only used during sampling.\"\"\"\n gamma_s = self.schedule.gamma_module(s)\n gamma_t = self.schedule.gamma_module(t)\n\n (\n sigma2_t_given_s,\n sigma_t_given_s,\n alpha_t_given_s,\n ) = self.schedule.sigma_and_alpha_t_given_s(gamma_t, gamma_s, zt_xh[0])\n\n sigma_s = self.schedule.sigma(gamma_s, target_tensor=zt_xh[0])\n sigma_t = self.schedule.sigma(gamma_t, target_tensor=zt_xh[0])\n\n # Neural net prediction.\n combined_mask = torch.cat(masks)\n net_eps_xh, net_eps_edge_attr = self.dynamics(\n xh=zt_xh,\n edge_index=edge_index,\n t=t,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=combined_mask,\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_net_eps_xh[:, : self.pos_dim] for _net_eps_xh in net_eps_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n # Note: mu_{t->s} = 1 / alpha_{t|s} z_t - sigma_{t|s}^2 / sigma_t / alpha_{t|s} epsilon\n # follows from the definition of mu_{t->s} and Equ. (7) in the EDM paper\n mu = [\n zt_xh[ii] / alpha_t_given_s[masks[ii]]\n - net_eps_xh[ii] * (sigma2_t_given_s / alpha_t_given_s / sigma_t)[masks[ii]]\n for ii in range(len(zt_xh))\n ]\n\n # Compute sigma for p(zs | zt).\n sigma = sigma_t_given_s * sigma_s / sigma_t\n\n # Sample zs given the paramters derived from zt.\n zs_xh = self.sample_normal(mu=mu, sigma=sigma, masks=masks, fix_noise=fix_noise)\n\n # Project down to avoid numerical runaway of the center of gravity.\n for ii in range(len(masks)):\n zs_xh[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n zs_xh[ii][:, : self.pos_dim],\n masks[ii],\n )\n return zs_xh\n\n def sample_normal(\n self,\n mu: List[Tensor],\n sigma: Tensor,\n masks: List[Tensor],\n fix_noise: bool = False,\n ) -> List[Tensor]:\n r\"\"\"Samples from a Normal distribution.\"\"\"\n if fix_noise:\n # bs = 1 if fix_noise else mu.size(0)\n raise NotImplementedError(\"fix_noise option isn't implemented yet\")\n eps_xh = self.sample_combined_position_feature_noise(masks=masks)\n zs_xh = [mu[ii] + sigma[masks[ii]] * eps_xh[ii] for ii in range(len(masks))]\n return zs_xh\n\n def sample_p_xh_given_z0(\n self,\n z0_xh: List[Tensor],\n edge_index: Tensor,\n n_frag_switch: Tensor,\n masks: List[Tensor],\n batch_size: int,\n conditions: Optional[Tensor] = None,\n fix_noise: bool = False,\n ) -> Tuple[List[Tensor]]:\n \"\"\"Samples x ~ p(x|z0).\"\"\"\n t_zeros = torch.zeros(size=(batch_size, 1), device=z0_xh[0].device)\n gamma_0 = self.schedule.gamma_module(t_zeros)\n # Computes sqrt(sigma_0^2 / alpha_0^2)\n sigma_x = self.schedule.SNR(-0.5 * gamma_0)\n net_eps_xh, net_eps_edge_attr = self.dynamics(\n xh=z0_xh,\n edge_index=edge_index,\n t=t_zeros,\n conditions=conditions,\n n_frag_switch=n_frag_switch,\n combined_mask=torch.cat(masks),\n edge_attr=None, # TODO: no edge_attr is considered now\n )\n\n # Compute mu for p(zs | zt).\n mu_x = self.compute_x_pred(\n net_eps_xh=net_eps_xh,\n zt_xh=z0_xh,\n gamma_t=gamma_0,\n masks=masks,\n )\n x0_xh = self.sample_normal(\n mu=mu_x, sigma=sigma_x, masks=masks, fix_noise=fix_noise\n )\n\n pos_0 = [\n self.normalizer.unnormalize(x0_xh[ii][:, : self.pos_dim], ii)\n for ii in range(len(masks))\n ]\n cat_0 = [\n self.normalizer.unnormalize(x0_xh[ii][:, self.pos_dim : -1], ii)\n for ii in range(len(masks))\n ]\n charge_0 = [\n torch.round(self.normalizer.unnormalize(x0_xh[ii][:, -1:], ii)).long()\n for ii in range(len(masks))\n ]\n\n cat_0 = [\n F.one_hot(torch.argmax(cat_0[ii], dim=1), self.node_nfs[ii] - 4).long()\n for ii in range(len(masks))\n ]\n return pos_0, cat_0, charge_0\n\n def compute_x_pred(\n self,\n net_eps_xh: List[Tensor],\n zt_xh: List[Tensor],\n gamma_t: Tensor,\n masks: List[Tensor],\n ) -> List[Tensor]:\n \"\"\"Commputes x_pred, i.e. the most likely prediction of x.\"\"\"\n sigma_t = self.schedule.sigma(gamma_t, target_tensor=net_eps_xh[0])\n alpha_t = self.schedule.alpha(gamma_t, target_tensor=net_eps_xh[0])\n x_pred = [\n 1.0 / alpha_t[masks[ii]] * (zt_xh[ii] - sigma_t[masks[ii]] * net_eps_xh[ii])\n for ii in range(len(masks))\n ]\n return x_pred\n\n # ------ INPAINT ------\n @torch.no_grad()\n def inpaint(\n self,\n n_samples: int,\n fragments_nodes: List[torch.tensor],\n conditions: Optional[Tensor] = None,\n return_frames: int = 1,\n resamplings: int = 1,\n jump_length: int = 1,\n timesteps: Optional[int] = None,\n xh_fixed: Optional[List[Tensor]] = None,\n frag_fixed: Optional[List] = None,\n ):\n r\"\"\"\n Draw samples from the generative model. Optionally, return intermediate\n states for visualization purposes.\n \"\"\"\n timesteps = self.T if timesteps is None else timesteps\n assert 0 < return_frames <= timesteps\n assert timesteps % return_frames == 0\n assert len(xh_fixed)\n\n fragments_masks = [\n get_mask_for_frag(natm_nodes) for natm_nodes in fragments_nodes\n ]\n combined_mask = torch.cat(fragments_masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n h0 = [_xh_fixed[:, self.pos_dim :].long() for _xh_fixed in xh_fixed]\n\n for ii, _ in enumerate(xh_fixed):\n xh_fixed[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n xh_fixed[ii][:, : self.pos_dim],\n fragments_masks[ii],\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_xh_fixed[:, : self.pos_dim] for _xh_fixed in xh_fixed],\n dim=0,\n ),\n combined_mask,\n )\n\n zt_xh = self.sample_combined_position_feature_noise(masks=fragments_masks)\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n out_samples = [\n [\n torch.zeros((return_frames,) + _zt_xh.size(), device=_zt_xh.device)\n for _zt_xh in zt_xh\n ]\n for _ in range(return_frames)\n ]\n\n schedule = get_repaint_schedule(resamplings, jump_length, timesteps)\n s = timesteps - 1\n for i, n_denoise_steps in enumerate(schedule):\n for j in range(n_denoise_steps):\n s_array = torch.full(\n (n_samples, 1), fill_value=s, device=zt_xh[0].device\n )\n t_array = s_array + 1\n s_array = s_array / timesteps\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n\n zt_known, _ = self.noised_representation(\n xh_fixed, fragments_masks, gamma_s\n )\n zt_unknown = self.sample_p_zs_given_zt(\n s=s_array,\n t=t_array,\n zt_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n conditions=conditions,\n fix_noise=False,\n )\n\n if self.pos_only:\n zt_known = [\n torch.cat([zt_known[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n zt_unknown = [\n torch.cat([zt_unknown[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n zt_xh = [\n zt_known[ii] if ii in frag_fixed else zt_unknown[ii]\n for ii in range(len(h0))\n ]\n\n # Noise combined representation, i.e., resample\n if j == n_denoise_steps - 1 and i < len(schedule) - 1:\n # Go back jump_length steps\n t = s + jump_length\n t_array = torch.full(\n (n_samples, 1), fill_value=t, device=zt_xh[0].device\n )\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n gamma_t = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t_array), xh_fixed[0]\n )\n\n zt_xh = self.sample_p_zt_given_zs(\n zt_xh, fragments_masks, gamma_t, gamma_s\n )\n s = t\n\n s = s - 1\n\n # # save frame\n # if (s * return_frames) % timesteps == 0:\n # idx = (s * return_frames) // timesteps\n # out_samples[idx] = self.normalizer.unnormalize_z(zt_xh)\n\n pos, cat, charge = self.sample_p_xh_given_z0(\n z0_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n batch_size=n_samples,\n conditions=conditions,\n )\n if self.pos_only:\n cat = [_h0[:, :-1] for _h0 in h0]\n charge = [_h0[:, -1:] for _h0 in h0]\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_pos[:, : self.pos_dim] for _pos in pos],\n dim=0,\n ),\n combined_mask,\n )\n\n # Overwrite last frame with the resulting x and h.\n out_samples[0] = [\n torch.cat([pos[ii], cat[ii], charge[ii]], dim=1) for ii in range(len(pos))\n ]\n return out_samples, fragments_masks\n\n # ------ INPAINT ------\n @torch.no_grad()\n def inpaint_fixed(\n self,\n n_samples: int,\n fragments_nodes: List[torch.tensor],\n conditions: Optional[Tensor] = None,\n return_frames: int = 1,\n resamplings: int = 1,\n jump_length: int = 1,\n timesteps: Optional[int] = None,\n xh_fixed: Optional[List[Tensor]] = None,\n frag_fixed: Optional[List] = None,\n ):\n r\"\"\"\n Draw samples from the generative model. Optionally, return intermediate\n states for visualization purposes.\n \"\"\"\n timesteps = self.T if timesteps is None else timesteps\n assert 0 < return_frames <= timesteps\n assert timesteps % return_frames == 0\n assert len(xh_fixed)\n\n fragments_masks = [\n get_mask_for_frag(natm_nodes) for natm_nodes in fragments_nodes\n ]\n combined_mask = torch.cat(fragments_masks)\n edge_index = get_edges_index(combined_mask, remove_self_edge=True)\n n_frag_switch = get_n_frag_switch(fragments_nodes)\n\n h0 = [_xh_fixed[:, self.pos_dim :].long() for _xh_fixed in xh_fixed]\n\n for ii, _ in enumerate(xh_fixed):\n xh_fixed[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n xh_fixed[ii][:, : self.pos_dim],\n fragments_masks[ii],\n )\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_xh_fixed[:, : self.pos_dim] for _xh_fixed in xh_fixed],\n dim=0,\n ),\n combined_mask,\n )\n\n zt_xh = self.sample_combined_position_feature_noise(masks=fragments_masks)\n if self.pos_only:\n zt_xh = [\n torch.cat([zt_xh[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_zt_xh[:, : self.pos_dim] for _zt_xh in zt_xh],\n dim=0,\n ),\n combined_mask,\n )\n\n out_samples = [\n [\n torch.zeros((return_frames,) + _zt_xh.size(), device=_zt_xh.device)\n for _zt_xh in zt_xh\n ]\n for _ in range(return_frames)\n ]\n\n schedule = get_repaint_schedule(resamplings, jump_length, timesteps)\n s = timesteps - 1\n for i, n_denoise_steps in enumerate(schedule):\n for j in range(n_denoise_steps):\n s_array = torch.full(\n (n_samples, 1), fill_value=s, device=zt_xh[0].device\n )\n t_array = s_array + 1\n s_array = s_array / timesteps\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n\n zt_known, _ = self.noised_representation(\n xh_fixed, fragments_masks, gamma_s\n )\n zt_unknown = self.sample_p_zs_given_zt(\n s=s_array,\n t=t_array,\n zt_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n conditions=conditions,\n fix_noise=False,\n )\n\n if self.pos_only:\n zt_known = [\n torch.cat([zt_known[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n zt_unknown = [\n torch.cat([zt_unknown[ii][:, : self.pos_dim], h0[ii]], dim=1)\n for ii in range(len(h0))\n ]\n\n zt_xh = [\n zt_known[ii] if ii in frag_fixed else zt_unknown[ii]\n for ii in range(len(h0))\n ]\n\n # Noise combined representation, i.e., resample\n if j == n_denoise_steps - 1 and i < len(schedule) - 1:\n # Go back jump_length steps\n t = s + jump_length\n t_array = torch.full(\n (n_samples, 1), fill_value=t, device=zt_xh[0].device\n )\n t_array = t_array / timesteps\n\n gamma_s = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(s_array), xh_fixed[0]\n )\n gamma_t = self.schedule.inflate_batch_array(\n self.schedule.gamma_module(t_array), xh_fixed[0]\n )\n\n zt_xh = self.sample_p_zt_given_zs(\n zt_xh, fragments_masks, gamma_t, gamma_s\n )\n s = t\n\n s = s - 1\n\n # # save frame\n # if (s * return_frames) % timesteps == 0:\n # idx = (s * return_frames) // timesteps\n # out_samples[idx] = self.normalizer.unnormalize_z(zt_xh)\n\n pos, cat, charge = self.sample_p_xh_given_z0(\n z0_xh=zt_xh,\n edge_index=edge_index,\n n_frag_switch=n_frag_switch,\n masks=fragments_masks,\n batch_size=n_samples,\n conditions=conditions,\n )\n if self.pos_only:\n cat = [_h0[:, :-1] for _h0 in h0]\n charge = [_h0[:, -1:] for _h0 in h0]\n utils.assert_mean_zero_with_mask(\n torch.cat(\n [_pos[:, : self.pos_dim] for _pos in pos],\n dim=0,\n ),\n combined_mask,\n )\n\n # Overwrite last frame with the resulting x and h.\n out_samples[0] = [\n torch.cat([pos[ii], cat[ii], charge[ii]], dim=1) for ii in range(len(pos))\n ]\n return out_samples, fragments_masks\n\n def sample_p_zt_given_zs(\n self,\n zs: List[Tensor],\n masks: List[Tensor],\n gamma_t: Tensor,\n gamma_s: Tensor,\n fix_noise: bool = False,\n ) -> List[Tensor]:\n (\n sigma2_t_given_s,\n sigma_t_given_s,\n alpha_t_given_s,\n ) = self.schedule.sigma_and_alpha_t_given_s(gamma_t, gamma_s, zs[0])\n\n mu = [alpha_t_given_s[masks[ii]] * zs[ii] for ii in range(len(masks))]\n zt = self.sample_normal(\n mu=mu, sigma=sigma_t_given_s, masks=masks, fix_noise=fix_noise\n )\n\n for ii in range(len(masks)):\n zt[ii][:, : self.pos_dim] = utils.remove_mean_batch(\n zt[ii][:, : self.pos_dim],\n masks[ii],\n )\n return zt" }, { "identifier": "average_over_batch_metrics", "path": "oa_reactdiff/trainer/_metrics.py", "snippet": "def average_over_batch_metrics(batch_metrics: List[Dict], allowed: List = []):\n epoch_metrics = {}\n effective_batch = {}\n for ii, out in enumerate(batch_metrics):\n for k, v in out.items():\n if not (k in allowed or len(allowed) == 0):\n continue\n if ii == 0:\n epoch_metrics[k] = v\n effective_batch[k] = 1\n else:\n if not np.isnan(v):\n epoch_metrics[k] += v\n effective_batch[k] += 1\n for k in epoch_metrics:\n epoch_metrics[k] /= effective_batch[k]\n return epoch_metrics" }, { "identifier": "pretty_print", "path": "oa_reactdiff/trainer/_metrics.py", "snippet": "def pretty_print(epoch, metric_dict, prefix=\"Train\"):\n out = f\"{prefix} epoch {epoch} \"\n for k, v in metric_dict.items():\n out += f\"{k} {v:.2f} \"\n print(out)" }, { "identifier": "batch_rmsd", "path": "oa_reactdiff/analyze/rmsd.py", "snippet": "def batch_rmsd(\n fragments_nodes: List[Tensor],\n out_samples: List[Tensor],\n xh: List[Tensor],\n idx: int = 1,\n threshold=0.5,\n):\n rmsds = []\n out_samples_use = out_samples[idx]\n xh_use = xh[idx]\n nodes = fragments_nodes[idx].long().cpu().numpy()\n start_ind, end_ind = 0, 0\n for jj, natoms in enumerate(nodes):\n end_ind += natoms\n mol1 = xh2pmg(out_samples_use[start_ind:end_ind])\n mol2 = xh2pmg(xh_use[start_ind:end_ind])\n try:\n rmsd = pymatgen_rmsd(mol1, mol2, ignore_chirality=True, threshold=threshold)\n except:\n rmsd = 1.0\n rmsds.append(min(rmsd, 1.0))\n start_ind = end_ind\n return rmsds" } ]
from typing import Dict, List, Optional, Tuple from pathlib import Path from torch import nn from torch.utils.data import DataLoader from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts, StepLR from pytorch_lightning import LightningModule from torchmetrics.classification import ( BinaryAccuracy, BinaryAUROC, BinaryF1Score, BinaryPrecision, BinaryCohenKappa, ) from torchmetrics import PearsonCorrCoef, SpearmanCorrCoef, MeanAbsoluteError from oa_reactdiff.dataset import ( ProcessedQM9, ProcessedDoubleQM9, ProcessedTripleQM9, ProcessedTS1x, ) from oa_reactdiff.dynamics import EGNNDynamics, Confidence from oa_reactdiff.diffusion._schedule import DiffSchedule, PredefinedNoiseSchedule from oa_reactdiff.diffusion._normalizer import Normalizer, FEATURE_MAPPING from oa_reactdiff.diffusion.en_diffusion import EnVariationalDiffusion from oa_reactdiff.trainer._metrics import average_over_batch_metrics, pretty_print from oa_reactdiff.analyze.rmsd import batch_rmsd import torch import copy import torch.nn.functional as F import numpy as np import pandas as pd import oa_reactdiff.utils.training_tools as utils
20,008
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__() egnn_dynamics = EGNNDynamics( model_config=model_config, node_nfs=node_nfs, edge_nf=edge_nf, condition_nf=condition_nf, fragment_names=fragment_names, pos_dim=pos_dim, update_pocket_coords=update_pocket_coords, condition_time=condition_time, edge_cutoff=edge_cutoff, model=model, enforce_same_encoding=enforce_same_encoding, source=source, ) normalizer = Normalizer( norm_values=norm_values, norm_biases=norm_biases, pos_dim=pos_dim, )
PROCESS_FUNC = { "QM9": ProcessedQM9, "DoubleQM9": ProcessedDoubleQM9, "TripleQM9": ProcessedTripleQM9, "TS1x": ProcessedTS1x, } FILE_TYPE = { "QM9": ".npz", "DoubleQM9": ".npz", "TripleQM9": ".npz", "TS1x": ".pkl", } LR_SCHEDULER = { "cos": CosineAnnealingWarmRestarts, "step": StepLR, } class DDPMModule(LightningModule): def __init__( self, model_config: Dict, optimizer_config: Dict, training_config: Dict, node_nfs: List[int] = [9] * 3, edge_nf: int = 4, condition_nf: int = 3, fragment_names: List[str] = ["inorg_node", "org_edge", "org_node"], pos_dim: int = 3, update_pocket_coords: bool = True, condition_time: bool = True, edge_cutoff: Optional[float] = None, norm_values: Tuple = (1.0, 1.0, 1.0), norm_biases: Tuple = (0.0, 0.0, 0.0), noise_schedule: str = "polynomial_2", timesteps: int = 1000, precision: float = 1e-5, loss_type: str = "l2", pos_only: bool = False, process_type: Optional[str] = None, model: nn.Module = None, enforce_same_encoding: Optional[List] = None, scales: List[float] = [1.0, 1.0, 1.0], eval_epochs: int = 20, source: Optional[Dict] = None, fixed_idx: Optional[List] = None, ) -> None: super().__init__() egnn_dynamics = EGNNDynamics( model_config=model_config, node_nfs=node_nfs, edge_nf=edge_nf, condition_nf=condition_nf, fragment_names=fragment_names, pos_dim=pos_dim, update_pocket_coords=update_pocket_coords, condition_time=condition_time, edge_cutoff=edge_cutoff, model=model, enforce_same_encoding=enforce_same_encoding, source=source, ) normalizer = Normalizer( norm_values=norm_values, norm_biases=norm_biases, pos_dim=pos_dim, )
gamma_module = PredefinedNoiseSchedule(
7
2023-10-30 02:53:38+00:00
24k
lewandofskee/DiAD
sgn/.ipynb_checkpoints/sgn-checkpoint.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "linear", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)" }, { "identifier": "zero_module", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "SpatialTransformer", "path": "ldm/modules/attention.py", "snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in" }, { "identifier": "UNetModel", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class UNetModel(nn.Module):\n \"\"\"\n The full UNet model with attention and timestep embedding.\n :param in_channels: channels in the input Tensor.\n :param model_channels: base channel count for the model.\n :param out_channels: channels in the output Tensor.\n :param num_res_blocks: number of residual blocks per downsample.\n :param attention_resolutions: a collection of downsample rates at which\n attention will take place. May be a set, list, or tuple.\n For example, if this contains 4, then at 4x downsampling, attention\n will be used.\n :param dropout: the dropout probability.\n :param channel_mult: channel multiplier for each level of the UNet.\n :param conv_resample: if True, use learned convolutions for upsampling and\n downsampling.\n :param dims: determines if the signal is 1D, 2D, or 3D.\n :param num_classes: if specified (as an int), then this model will be\n class-conditional with `num_classes` classes.\n :param use_checkpoint: use gradient checkpointing to reduce memory usage.\n :param num_heads: the number of attention heads in each attention layer.\n :param num_heads_channels: if specified, ignore num_heads and instead use\n a fixed channel width per attention head.\n :param num_heads_upsample: works with num_heads to set a different number\n of heads for upsampling. Deprecated.\n :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.\n :param resblock_updown: use residual blocks for up/downsampling.\n :param use_new_attention_order: use a different attention pattern for potentially\n increased efficiency.\n \"\"\"\n\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n disable_self_attentions=None,\n num_attention_blocks=None,\n disable_middle_self_attn=False,\n use_linear_in_transformer=False,\n ):\n super().__init__()\n if use_spatial_transformer:\n assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'\n\n if context_dim is not None:\n assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'\n from omegaconf.listconfig import ListConfig\n if type(context_dim) == ListConfig:\n context_dim = list(context_dim)\n\n if num_heads_upsample == -1:\n num_heads_upsample = num_heads\n\n if num_heads == -1:\n assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'\n\n if num_head_channels == -1:\n assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'\n\n self.image_size = image_size\n self.in_channels = in_channels\n self.model_channels = model_channels\n self.out_channels = out_channels\n if isinstance(num_res_blocks, int):\n self.num_res_blocks = len(channel_mult) * [num_res_blocks]\n else:\n if len(num_res_blocks) != len(channel_mult):\n raise ValueError(\"provide num_res_blocks either as an int (globally constant) or \"\n \"as a list/tuple (per-level) with the same length as channel_mult\")\n self.num_res_blocks = num_res_blocks\n if disable_self_attentions is not None:\n # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not\n assert len(disable_self_attentions) == len(channel_mult)\n if num_attention_blocks is not None:\n assert len(num_attention_blocks) == len(self.num_res_blocks)\n assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))\n print(f\"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. \"\n f\"This option has LESS priority than attention_resolutions {attention_resolutions}, \"\n f\"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, \"\n f\"attention will still not be set.\")\n\n self.attention_resolutions = attention_resolutions\n self.dropout = dropout\n self.channel_mult = channel_mult\n self.conv_resample = conv_resample\n self.num_classes = num_classes\n self.use_checkpoint = use_checkpoint\n self.dtype = th.float16 if use_fp16 else th.float32\n self.num_heads = num_heads\n self.num_head_channels = num_head_channels\n self.num_heads_upsample = num_heads_upsample\n self.predict_codebook_ids = n_embed is not None\n\n time_embed_dim = model_channels * 4\n self.time_embed = nn.Sequential(\n linear(model_channels, time_embed_dim),\n nn.SiLU(),\n linear(time_embed_dim, time_embed_dim),\n )\n\n if self.num_classes is not None:\n if isinstance(self.num_classes, int):\n self.label_emb = nn.Embedding(num_classes, time_embed_dim)\n elif self.num_classes == \"continuous\":\n print(\"setting up linear c_adm embedding layer\")\n self.label_emb = nn.Linear(1, time_embed_dim)\n else:\n raise ValueError()\n\n self.input_blocks = nn.ModuleList(\n [\n TimestepEmbedSequential(\n conv_nd(dims, in_channels, model_channels, 3, padding=1)\n )\n ]\n )\n self._feature_size = model_channels\n input_block_chans = [model_channels]\n ch = model_channels\n ds = 1\n for level, mult in enumerate(channel_mult):\n for nr in range(self.num_res_blocks[level]):\n layers = [\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=mult * model_channels,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = mult * model_channels\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n )\n )\n self.input_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n input_block_chans.append(ch)\n if level != len(channel_mult) - 1:\n out_ch = ch\n self.input_blocks.append(\n TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n down=True,\n )\n if resblock_updown\n else Downsample(\n ch, conv_resample, dims=dims, out_channels=out_ch\n )\n )\n )\n ch = out_ch\n input_block_chans.append(ch)\n ds *= 2\n self._feature_size += ch\n\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n self.middle_block = TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n ),\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n )\n self._feature_size += ch\n\n self.output_blocks = nn.ModuleList([])\n for level, mult in list(enumerate(channel_mult))[::-1]:\n for i in range(self.num_res_blocks[level] + 1):\n ich = input_block_chans.pop()\n layers = [\n ResBlock(\n ch + ich,\n time_embed_dim,\n dropout,\n out_channels=model_channels * mult,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = model_channels * mult\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if not exists(num_attention_blocks) or i < num_attention_blocks[level]:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads_upsample,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n )\n )\n if level and i == self.num_res_blocks[level]:\n out_ch = ch\n # if level == 3:\n # layers.append(Upsample(ch, conv_resample, dims=dims, out_channels=out_ch, scale_guide=True))\n # else:\n # layers.append(Upsample(ch, conv_resample, dims=dims, out_channels=out_ch, scale_guide=False))\n layers.append(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n up=True,\n )\n if resblock_updown\n else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch, scale_guide=True)\n )\n ds //= 2\n self.output_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),\n )\n if self.predict_codebook_ids:\n self.id_predictor = nn.Sequential(\n normalization(ch),\n conv_nd(dims, model_channels, n_embed, 1),\n #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits\n )\n\n def convert_to_fp16(self):\n \"\"\"\n Convert the torso of the model to float16.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f16)\n self.middle_block.apply(convert_module_to_f16)\n self.output_blocks.apply(convert_module_to_f16)\n\n def convert_to_fp32(self):\n \"\"\"\n Convert the torso of the model to float32.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f32)\n self.middle_block.apply(convert_module_to_f32)\n self.output_blocks.apply(convert_module_to_f32)\n\n def forward(self, x, timesteps=None, context=None, y=None,**kwargs):\n \"\"\"\n Apply the model to an input batch.\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :param context: conditioning plugged in via crossattn\n :param y: an [N] Tensor of labels, if class-conditional.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n assert (y is not None) == (\n self.num_classes is not None\n ), \"must specify y if and only if the model is class-conditional\"\n hs = []\n t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)\n emb = self.time_embed(t_emb)\n\n if self.num_classes is not None:\n assert y.shape[0] == x.shape[0]\n emb = emb + self.label_emb(y)\n\n h = x.type(self.dtype)\n for module in self.input_blocks:\n h = module(h, emb, context)\n hs.append(h)\n h = self.middle_block(h, emb, context)\n for module in self.output_blocks:\n h = th.cat([h, hs.pop()], dim=1)\n h = module(h, emb, context)\n h = h.type(x.dtype)\n if self.predict_codebook_ids:\n return self.id_predictor(h)\n else:\n return self.out(h)" }, { "identifier": "TimestepEmbedSequential", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class TimestepEmbedSequential(nn.Sequential, TimestepBlock):\n \"\"\"\n A sequential module that passes timestep embeddings to the children that\n support it as an extra input.\n \"\"\"\n\n def forward(self, x, emb, context=None):\n for layer in self:\n if isinstance(layer, TimestepBlock):\n x = layer(x, emb)\n elif isinstance(layer, SpatialTransformer):\n x = layer(x, context)\n else:\n x = layer(x)\n return x" }, { "identifier": "ResBlock", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class ResBlock(TimestepBlock):\n \"\"\"\n A residual block that can optionally change the number of channels.\n :param channels: the number of input channels.\n :param emb_channels: the number of timestep embedding channels.\n :param dropout: the rate of dropout.\n :param out_channels: if specified, the number of out channels.\n :param use_conv: if True and out_channels is specified, use a spatial\n convolution instead of a smaller 1x1 convolution to change the\n channels in the skip connection.\n :param dims: determines if the signal is 1D, 2D, or 3D.\n :param use_checkpoint: if True, use gradient checkpointing on this module.\n :param up: if True, use this block for upsampling.\n :param down: if True, use this block for downsampling.\n \"\"\"\n\n def __init__(\n self,\n channels,\n emb_channels,\n dropout,\n out_channels=None,\n use_conv=False,\n use_scale_shift_norm=False,\n dims=2,\n use_checkpoint=False,\n up=False,\n down=False,\n ):\n super().__init__()\n self.channels = channels\n self.emb_channels = emb_channels\n self.dropout = dropout\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.use_checkpoint = use_checkpoint\n self.use_scale_shift_norm = use_scale_shift_norm\n\n self.in_layers = nn.Sequential(\n normalization(channels),\n nn.SiLU(),\n conv_nd(dims, channels, self.out_channels, 3, padding=1),\n )\n\n self.updown = up or down\n\n if up:\n self.h_upd = Upsample(channels, False, dims)\n self.x_upd = Upsample(channels, False, dims)\n elif down:\n self.h_upd = Downsample(channels, False, dims)\n self.x_upd = Downsample(channels, False, dims)\n else:\n self.h_upd = self.x_upd = nn.Identity()\n\n self.emb_layers = nn.Sequential(\n nn.SiLU(),\n linear(\n emb_channels,\n 2 * self.out_channels if use_scale_shift_norm else self.out_channels,\n ),\n )\n self.out_layers = nn.Sequential(\n normalization(self.out_channels),\n nn.SiLU(),\n nn.Dropout(p=dropout),\n zero_module(\n conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)\n ),\n )\n\n if self.out_channels == channels:\n self.skip_connection = nn.Identity()\n elif use_conv:\n self.skip_connection = conv_nd(\n dims, channels, self.out_channels, 3, padding=1\n )\n else:\n self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)\n\n def forward(self, x, emb):\n \"\"\"\n Apply the block to a Tensor, conditioned on a timestep embedding.\n :param x: an [N x C x ...] Tensor of features.\n :param emb: an [N x emb_channels] Tensor of timestep embeddings.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n return checkpoint(\n self._forward, (x, emb), self.parameters(), self.use_checkpoint\n )\n\n\n def _forward(self, x, emb):\n if self.updown:\n in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]\n h = in_rest(x)\n h = self.h_upd(h)\n x = self.x_upd(x)\n h = in_conv(h)\n else:\n h = self.in_layers(x)\n emb_out = self.emb_layers(emb).type(h.dtype)\n while len(emb_out.shape) < len(h.shape):\n emb_out = emb_out[..., None]\n if self.use_scale_shift_norm:\n out_norm, out_rest = self.out_layers[0], self.out_layers[1:]\n scale, shift = th.chunk(emb_out, 2, dim=1)\n h = out_norm(h) * (1 + scale) + shift\n h = out_rest(h)\n else:\n h = h + emb_out\n h = self.out_layers(h)\n return self.skip_connection(x) + h" }, { "identifier": "Downsample", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class Downsample(nn.Module):\n \"\"\"\n A downsampling layer with an optional convolution.\n :param channels: channels in the inputs and outputs.\n :param use_conv: a bool determining if a convolution is applied.\n :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then\n downsampling occurs in the inner-two dimensions.\n \"\"\"\n\n def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.dims = dims\n stride = 2 if dims != 3 else (1, 2, 2)\n if use_conv:\n self.op = conv_nd(\n dims, self.channels, self.out_channels, 3, stride=stride, padding=padding\n )\n else:\n assert self.channels == self.out_channels\n self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)\n\n def forward(self, x):\n assert x.shape[1] == self.channels\n return self.op(x)" }, { "identifier": "AttentionBlock", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class AttentionBlock(nn.Module):\n \"\"\"\n An attention block that allows spatial positions to attend to each other.\n Originally ported from here, but adapted to the N-d case.\n https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.\n \"\"\"\n\n def __init__(\n self,\n channels,\n num_heads=1,\n num_head_channels=-1,\n use_checkpoint=False,\n use_new_attention_order=False,\n ):\n super().__init__()\n self.channels = channels\n if num_head_channels == -1:\n self.num_heads = num_heads\n else:\n assert (\n channels % num_head_channels == 0\n ), f\"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}\"\n self.num_heads = channels // num_head_channels\n self.use_checkpoint = use_checkpoint\n self.norm = normalization(channels)\n self.qkv = conv_nd(1, channels, channels * 3, 1)\n if use_new_attention_order:\n # split qkv before split heads\n self.attention = QKVAttention(self.num_heads)\n else:\n # split heads before split qkv\n self.attention = QKVAttentionLegacy(self.num_heads)\n\n self.proj_out = zero_module(conv_nd(1, channels, channels, 1))\n\n def forward(self, x):\n return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!\n #return pt_checkpoint(self._forward, x) # pytorch\n\n def _forward(self, x):\n b, c, *spatial = x.shape\n x = x.reshape(b, c, -1)\n qkv = self.qkv(self.norm(x))\n h = self.attention(qkv)\n h = self.proj_out(h)\n return (x + h).reshape(b, c, *spatial)" }, { "identifier": "Upsample", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class Upsample(nn.Module):\n \"\"\"\n An upsampling layer with an optional convolution.\n :param channels: channels in the inputs and outputs.\n :param use_conv: a bool determining if a convolution is applied.\n :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then\n upsampling occurs in the inner-two dimensions.\n \"\"\"\n\n def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1, scale_guide=False):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.dims = dims\n self.scale_guide = scale_guide\n if use_conv:\n self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding)\n\n def forward(self, x):\n assert x.shape[1] == self.channels\n if self.dims == 3:\n x = F.interpolate(\n x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode=\"nearest\"\n )\n else:\n x = F.interpolate(x, scale_factor=2, mode=\"nearest\")\n # if self.scale_guide:\n # x = F.interpolate(x, scale_factor=1.75, mode=\"nearest\")\n # else:\n # x = F.interpolate(x, scale_factor=2, mode=\"nearest\")\n if self.use_conv:\n x = self.conv(x)\n return x" }, { "identifier": "LatentDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(self,\n first_stage_config,\n cond_stage_config,\n num_timesteps_cond=None,\n cond_stage_key=\"image\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n force_null_conditioning=False,\n *args, **kwargs):\n self.force_null_conditioning = force_null_conditioning\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs['timesteps']\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = 'concat' if concat_mode else 'crossattn'\n if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n reset_ema = kwargs.pop(\"reset_ema\", False)\n reset_num_ema_updates = kwargs.pop(\"reset_num_ema_updates\", False)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.cond_stage_key = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer('scale_factor', torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n if reset_ema:\n assert self.use_ema\n print(\n f\"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.\")\n self.model_ema = LitEma(self.model)\n if reset_num_ema_updates:\n print(\" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ \")\n assert self.use_ema\n self.model_ema.reset_num_updates()\n\n def make_cond_schedule(self, ):\n self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)\n ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()\n self.cond_ids[:self.num_timesteps_cond] = ids\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n # only for very first batch\n if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:\n assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'\n # set rescale weight to 1./std of encodings\n print(\"### USING STD-RESCALING ###\")\n x = super().get_input(batch, self.first_stage_key)\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n del self.scale_factor\n self.register_buffer('scale_factor', 1. / z.flatten().std())\n print(f\"setting self.scale_factor to {self.scale_factor}\")\n print(\"### USING STD-RESCALING ###\")\n\n def register_schedule(self,\n given_betas=None, beta_schedule=\"linear\", timesteps=1000,\n linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != '__is_first_stage__'\n assert config != '__is_unconditional__'\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n\n def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):\n denoise_row = []\n for zd in tqdm(samples, desc=desc):\n denoise_row.append(self.decode_first_stage(zd.to(self.device),\n force_not_quantize=force_no_decoder_quantization))\n n_imgs_per_row = len(denoise_row)\n denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W\n denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')\n denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')\n denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)\n return denoise_grid\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\")\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]\n return edge_dist\n\n def get_weighting(self, h, w, Ly, Lx, device):\n weighting = self.delta_border(h, w)\n weighting = torch.clip(weighting, self.split_input_params[\"clip_min_weight\"],\n self.split_input_params[\"clip_max_weight\"], )\n weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)\n\n if self.split_input_params[\"tie_braker\"]:\n L_weighting = self.delta_border(Ly, Lx)\n L_weighting = torch.clip(L_weighting,\n self.split_input_params[\"clip_min_tie_weight\"],\n self.split_input_params[\"clip_max_tie_weight\"])\n\n L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)\n weighting = weighting * L_weighting\n return weighting\n\n def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code\n \"\"\"\n :param x: img of size (bs, c, h, w)\n :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])\n \"\"\"\n bs, nc, h, w = x.shape\n\n # number of crops in image\n Ly = (h - kernel_size[0]) // stride[0] + 1\n Lx = (w - kernel_size[1]) // stride[1] + 1\n\n if uf == 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)\n\n weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))\n\n elif uf > 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),\n dilation=1, padding=0,\n stride=(stride[0] * uf, stride[1] * uf))\n fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))\n\n elif df > 1 and uf == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),\n dilation=1, padding=0,\n stride=(stride[0] // df, stride[1] // df))\n fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))\n\n else:\n raise NotImplementedError\n\n return fold, unfold, normalization, weighting\n\n @torch.no_grad()\n def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,\n cond_key=None, return_original_cond=False, bs=None, return_x=False):\n x = super().get_input(batch, k)\n if bs is not None:\n x = x[:bs]\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n # z = x\n if self.model.conditioning_key is not None and not self.force_null_conditioning:\n if cond_key is None:\n cond_key = self.cond_stage_key\n if cond_key != self.first_stage_key:\n if cond_key in ['caption', 'coordinates_bbox', \"txt\"]:\n xc = batch[cond_key]\n elif cond_key in ['class_label', 'cls']:\n xc = batch\n else:\n xc = super().get_input(batch, cond_key).to(self.device)\n else:\n xc = x\n if not self.cond_stage_trainable or force_c_encode:\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n c = self.get_learned_conditioning(xc.to(self.device))\n else:\n c = xc\n if bs is not None:\n c = c[:bs]\n\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n ckey = __conditioning_keys__[self.model.conditioning_key]\n c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}\n\n else:\n c = None\n xc = None\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n c = {'pos_x': pos_x, 'pos_y': pos_y}\n out = [z, c]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_x:\n out.extend([x])\n if return_original_cond:\n out.append(xc)\n return out\n\n @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, 'b h w c -> b c h w').contiguous()\n\n z = 1. / self.scale_factor * z\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c)\n return loss\n\n def forward(self, x, c, *args, **kwargs):\n t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()\n if self.model.conditioning_key is not None:\n assert c is not None\n if self.cond_stage_trainable:\n c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n if isinstance(cond, dict):\n # hybrid case, cond is expected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'\n cond = {key: cond}\n\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \\\n extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.apply_model(x_noisy, t, cond)\n\n loss_dict = {}\n prefix = 'train' if self.training else 'val'\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n elif self.parameterization == \"v\":\n target = self.get_v(x_start, noise, t)\n else:\n raise NotImplementedError()\n\n loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])\n loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})\n loss_dict.update({'logvar': self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})\n loss += (self.original_elbo_weight * loss_vlb)\n loss_dict.update({f'{prefix}/loss': loss})\n\n return loss, loss_dict\n\n def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,\n return_x0=False, score_corrector=None, corrector_kwargs=None):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1., 1.)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,\n return_codebook_ids=False, quantize_denoised=False, return_x0=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,\n img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,\n score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,\n log_every_t=None):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',\n total=timesteps) if verbose else reversed(\n range(0, timesteps))\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised, return_x0=True,\n temperature=temperature[i], noise_dropout=noise_dropout,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(self, cond, shape, return_intermediates=False,\n x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, start_T=None,\n log_every_t=None):\n\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(\n range(0, timesteps))\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised)\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,\n verbose=True, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, shape=None, **kwargs):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n return self.p_sample_loop(cond,\n shape,\n return_intermediates=return_intermediates, x_T=x_T,\n verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,\n mask=mask, x0=x0)\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):\n if ddim:\n ddim_sampler = DDIMSampler(self)\n shape = (self.channels, self.image_size, self.image_size)\n samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,\n shape, cond, verbose=False, **kwargs)\n\n else:\n samples, intermediates = self.sample(cond=cond, batch_size=batch_size,\n return_intermediates=True, **kwargs)\n\n return samples, intermediates\n\n @torch.no_grad()\n def get_unconditional_conditioning(self, batch_size, null_label=None):\n if null_label is not None:\n xc = null_label\n if isinstance(xc, ListConfig):\n xc = list(xc)\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n if hasattr(xc, \"to\"):\n xc = xc.to(self.device)\n c = self.get_learned_conditioning(xc)\n else:\n if self.cond_stage_key in [\"class_label\", \"cls\"]:\n xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)\n return self.get_learned_conditioning(xc)\n else:\n raise NotImplementedError(\"todo\")\n if isinstance(c, list): # in case the encoder gives us a list\n for i in range(len(c)):\n c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)\n else:\n c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)\n return c\n\n @torch.no_grad()\n def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,\n quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,\n plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,\n use_ema_scope=True,\n **kwargs):\n ema_scope = self.ema_scope if use_ema_scope else nullcontext\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=N)\n N = min(x.shape[0], N)\n n_row = min(x.shape[0], n_row)\n log[\"inputs\"] = x\n log[\"reconstruction\"] = xrec\n if self.model.conditioning_key is not None:\n if hasattr(self.cond_stage_model, \"decode\"):\n xc = self.cond_stage_model.decode(c)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in [\"caption\", \"txt\"]:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in ['class_label', \"cls\"]:\n try:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[\"human_label\"], size=x.shape[2] // 25)\n log['conditioning'] = xc\n except KeyError:\n # probably no \"human_label\" in batch\n pass\n elif isimage(xc):\n log[\"conditioning\"] = xc\n if ismap(xc):\n log[\"original_conditioning\"] = self.to_rgb(xc)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), '1 -> b', b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')\n diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n with ema_scope(\"Sampling\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(\n self.first_stage_model, IdentityFirstStage):\n # also display when quantizing x0 while sampling\n with ema_scope(\"Plotting Quantized Denoised\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n quantize_denoised=True)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,\n # quantize_denoised=True)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_x0_quantized\"] = x_samples\n\n if unconditional_guidance_scale > 1.0:\n uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)\n if self.model.conditioning_key == \"crossattn-adm\":\n uc = {\"c_crossattn\": [uc], \"c_adm\": c[\"c_adm\"]}\n with ema_scope(\"Sampling with classifier-free guidance\"):\n samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc,\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"] = x_samples_cfg\n\n if inpaint:\n # make a simple center square\n b, h, w = z.shape[0], z.shape[2], z.shape[3]\n mask = torch.ones(N, h, w).to(self.device)\n # zeros will be filled in\n mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.\n mask = mask[:, None, ...]\n with ema_scope(\"Plotting Inpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_inpainting\"] = x_samples\n log[\"mask\"] = mask\n\n # outpaint\n mask = 1. - mask\n with ema_scope(\"Plotting Outpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_outpainting\"] = x_samples\n\n if plot_progressive_rows:\n with ema_scope(\"Plotting Progressives\"):\n img, progressives = self.progressive_denoising(c,\n shape=(self.channels, self.image_size, self.image_size),\n batch_size=N)\n prog_row = self._get_denoise_row_from_list(progressives, desc=\"Progressive Generation\")\n log[\"progressive_row\"] = prog_row\n\n if return_keys:\n if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:\n return log\n else:\n return {key: log[key] for key in return_keys}\n return log\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = list(self.model.parameters())\n if self.cond_stage_trainable:\n print(f\"{self.__class__.__name__}: Also optimizing conditioner params!\")\n params = params + list(self.cond_stage_model.parameters())\n if self.learn_logvar:\n print('Diffusion model optimizing logvar')\n params.append(self.logvar)\n opt = torch.optim.AdamW(params, lr=lr)\n if self.use_scheduler:\n assert 'target' in self.scheduler_config\n scheduler = instantiate_from_config(self.scheduler_config)\n\n print(\"Setting up LambdaLR scheduler...\")\n scheduler = [\n {\n 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),\n 'interval': 'step',\n 'frequency': 1\n }]\n return [opt], scheduler\n return opt\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.\n return x" }, { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True,timesteps=1000):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n x_T=None,\n timesteps=1000,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose,timesteps=timesteps)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n timesteps=timesteps,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0])\n # subset_end = int(timesteps+1 * self.ddim_timesteps.shape[0] / self.ddpm_num_timesteps)\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % 500 == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import einops import torch import torch as th import torch.nn as nn import torchvision from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer from ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock, Upsample from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig
21,262
def custom_sigmoid(x): return 1 / (1 + torch.exp(-(x - 600) / 10)) class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs): hs = [] with torch.no_grad():
def custom_sigmoid(x): return 1 / (1 + torch.exp(-(x - 600) / 10)) class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, only_mid_control=False, **kwargs): hs = [] with torch.no_grad():
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
3
2023-10-30 14:21:09+00:00
24k
nv-tlabs/pacer
uhc/smpllib/smpl_local_robot.py
[ { "identifier": "Skeleton", "path": "uhc/khrylib/mocap/skeleton_local.py", "snippet": "class Skeleton:\n def __init__(self):\n self.bones = []\n self.name2bone = {}\n self.mass_scale = 1.0\n self.len_scale = 1.0\n self.dof_name = [\"x\", \"y\", \"z\"]\n self.root = None\n\n def forward_bvh(self, bone):\n bone.pos = bone.offset\n for bone_c in bone.child:\n self.forward_bvh(bone_c)\n\n def load_from_offsets(\n self,\n offsets,\n parents,\n scale,\n jrange,\n hull_dict,\n exclude_bones=None,\n channels=None,\n spec_channels=None,\n upright_start=False,\n remove_toe=False,\n real_weight_porpotion=False,\n real_weight=False,\n big_ankle = False,\n ):\n if channels is None:\n channels = [\"x\", \"y\", \"z\"]\n if exclude_bones is None:\n exclude_bones = {}\n if spec_channels is None:\n spec_channels = dict()\n self.hull_dict = hull_dict\n self.upright_start = upright_start\n self.remove_toe = remove_toe\n self.real_weight_porpotion = real_weight_porpotion\n self.real_weight = real_weight\n self.big_ankle = big_ankle\n joint_names = list(\n filter(lambda x: all([t not in x for t in exclude_bones]),\n offsets.keys()))\n dof_ind = {\"x\": 0, \"y\": 1, \"z\": 2}\n self.len_scale = scale\n self.root = Bone()\n self.root.id = 0\n self.root.name = joint_names[0]\n self.root.channels = channels\n self.name2bone[self.root.name] = self.root\n self.root.offset = offsets[self.root.name]\n self.bones.append(self.root)\n for i, joint in enumerate(joint_names[1:]):\n bone = Bone()\n bone.id = i + 1\n bone.name = joint\n\n bone.channels = (spec_channels[joint]\n if joint in spec_channels.keys() else channels)\n bone.dof_index = [dof_ind[x] for x in bone.channels]\n bone.offset = np.array(offsets[joint]) * self.len_scale\n bone.lb = np.rad2deg(jrange[joint][:, 0])\n bone.ub = np.rad2deg(jrange[joint][:, 1])\n\n self.bones.append(bone)\n self.name2bone[joint] = bone\n for bone in self.bones[1:]:\n parent_name = parents[bone.name]\n # print(parent_name)\n if parent_name in self.name2bone.keys():\n bone_p = self.name2bone[parent_name]\n bone_p.child.append(bone)\n bone.parent = bone_p\n self.forward_bvh(self.root)\n for bone in self.bones:\n if len(bone.child) == 0:\n bone.end = bone.pos.copy() + 0.002\n for c_bone, p_bone in parents.items():\n if p_bone == bone.name:\n bone.end += np.array(offsets[c_bone]) * self.len_scale\n break\n else:\n bone.end = sum([bone_c.pos for bone_c in bone.child]) / len(bone.child)\n\n def write_xml(\n self,\n fname,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n self.size_buffer = {}\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n joints = worldbody.findall(\".//joint\")\n\n for joint in joints[1:]:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"1\"\n SubElement(actuators, \"motor\", attr)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", {\n \"njmax\": \"700\",\n \"nconmax\": \"700\"\n })\n tree.write(fname, pretty_print=True)\n\n def write_str(\n self,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n self.size_buffer = {}\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n joints = worldbody.findall(\".//joint\")\n for joint in joints:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"500\"\n SubElement(actuators, \"motor\", attr)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", {\n \"njmax\": \"700\",\n \"nconmax\": \"700\"\n })\n\n return etree.tostring(tree, pretty_print=False)\n\n def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):\n attr = dict()\n attr[\"name\"] = bone.name\n attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n node = SubElement(parent_node, \"body\", attr)\n\n # write joints\n if bone.parent is None:\n j_attr = dict()\n j_attr[\"name\"] = bone.name\n SubElement(node, \"freejoint\", j_attr)\n else:\n for i in range(len(bone.dof_index)):\n ind = bone.dof_index[i]\n axis = bone.orient[:, ind]\n j_attr = dict()\n j_attr[\"name\"] = bone.name + \"_\" + self.dof_name[ind]\n j_attr[\"type\"] = \"hinge\"\n j_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos +\n offset))\n j_attr[\"axis\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*axis)\n j_attr[\"stiffness\"] = str(GAINS[bone.name][0] )\n j_attr[\"damping\"] = str(GAINS[bone.name][1])\n j_attr[\"armature\"] = \"0.02\"\n\n if i < len(bone.lb):\n j_attr[\"range\"] = \"{0:.4f} {1:.4f}\".format(\n bone.lb[i], bone.ub[i])\n else:\n j_attr[\"range\"] = \"-180.0 180.0\"\n if j_attr[\"name\"] in ref_angles.keys():\n j_attr[\"ref\"] = f\"{ref_angles[j_attr['name']]:.1f}\"\n\n SubElement(node, \"joint\", j_attr)\n\n # write geometry\n g_attr = dict()\n g_attr[\"type\"] = GEOM_TYPES[bone.name]\n g_attr[\"contype\"] = \"1\"\n g_attr[\"conaffinity\"] = \"1\"\n if self.real_weight:\n base_density = 1000\n else:\n base_density = 500\n g_attr[\"density\"] = str(base_density)\n e1 = np.zeros(3)\n e2 = bone.end.copy() + offset\n if bone.name in [\"Torso\", \"Chest\", \"Spine\"]:\n seperation = 0.45\n else:\n seperation = 0.2\n\n # if bone.name in [\"L_Hip\"]:\n # seperation = 0.3\n\n\n e1 += e2 * seperation\n e2 -= e2 * seperation\n hull_params = self.hull_dict[bone.name]\n\n if g_attr[\"type\"] == \"capsule\":\n g_attr[\n \"fromto\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f} {4:.4f} {5:.4f}\".format(\n *np.concatenate([e1, e2]))\n\n side_len = np.linalg.norm(e2 - e1)\n # radius = 0.067\n # V = np.pi * radius ** 2 * ((4/3) * radius + side_len)\n\n roots = np.polynomial.polynomial.Polynomial(\n [-hull_params['volume'], 0, side_len * np.pi,\n 4 / 3 * np.pi]).roots()\n real_valued = roots.real[abs(roots.imag) < 1e-5]\n real_valued = real_valued[real_valued > 0]\n if bone.name in [\"Torso\", \"Spine\", \"L_Hip\", \"R_Hip\"]:\n real_valued *= 0.7 # ZL Hack: shrinkage\n if self.real_weight_porpotion: # If shift is enabled, shift the weight based on teh shrinkage factor\n g_attr[\"density\"] = str((1 / 0.7**2) * base_density)\n\n if bone.name in [\"Chest\"]:\n real_valued *= 0.7 # ZL Hack: shrinkage\n if self.real_weight_porpotion:\n g_attr[\"density\"] = str((1 / 0.7**2) * base_density)\n\n if bone.name in [\"L_Knee\", 'R_Knee']:\n real_valued *= 0.9 # ZL Hack: shrinkage\n if self.real_weight_porpotion:\n g_attr[\"density\"] = str((1 / 0.9**2) * base_density)\n\n # if bone.name in [\"Spine\"]:\n # real_valued *= 0.01 # ZL Hack: shrinkage\n\n # g_attr[\"size\"] = \"{0:.4f}\".format(*template_attributes[\"size\"])\n g_attr[\"size\"] = \"{0:.4f}\".format(*real_valued)\n\n elif g_attr[\"type\"] == \"box\":\n pos = (e1 + e2) / 2\n min_verts = hull_params['norm_verts'].min(axis=0).values\n size = (hull_params['norm_verts'].max(axis=0).values - min_verts).numpy()\n if self.upright_start:\n if bone.name == \"L_Toe\" or bone.name == \"R_Toe\":\n size[0] = hull_params['volume'] / (size[2] * size[0])\n else:\n size[2] = hull_params['volume'] / (size[1] * size[0])\n else:\n size[1] = hull_params['volume'] / (size[2] * size[0])\n size /= 2\n\n if bone.name == \"L_Toe\" or bone.name == \"R_Toe\":\n if self.upright_start:\n pos[2] = -bone.pos[2] / 2 - self.size_buffer[bone.parent.name][2] + size[2] # To get toe to be at the same height as the parent\n pos[1] = -bone.pos[1] / 2 # To get toe to be at the same x as the parent\n else:\n pos[1] = -bone.pos[1] / 2 - self.size_buffer[bone.parent.name][1] + size[1] # To get toe to be at the same height as the parent\n pos[0] = -bone.pos[0] / 2 # To get toe to be at the same x as the parent\n\n if self.remove_toe:\n size /= 20 # Smaller toes...\n pos[1] = 0\n pos[0] = 0\n bone_dir = bone.end / np.linalg.norm(bone.end)\n if not self.remove_toe:\n rot = np.array([1, 0, 0, 0])\n else:\n rot = sRot.from_euler(\"xyz\",[0, 0, np.arctan(bone_dir[1] / bone_dir[0])]).as_quat()[[3, 0, 1, 2]]\n\n if self.big_ankle:\n # Big ankle override\n g_attr = {}\n hull_params = self.hull_dict[bone.name]\n min_verts, max_verts = hull_params['norm_verts'].min(axis=0).values, hull_params['norm_verts'].max(axis=0).values\n size = max_verts - min_verts\n\n bone_end = bone.end\n pos = (max_verts + min_verts)/2\n size /= 2\n\n if bone.name == \"L_Toe\" or bone.name == \"R_Toe\":\n parent_min, parent_max = self.hull_dict[bone.parent.name]['norm_verts'].min(axis=0).values, self.hull_dict[bone.parent.name]['norm_verts'].max(axis=0).values\n parent_pos = (parent_max + parent_min)/2\n\n pos[2] = parent_min[2] - bone.pos[2] + size[2] # To get toe to be at the same height as the parent\n pos[1] = parent_pos[1] - bone.pos[1] # To get toe to be at the y as the parent\n\n rot = np.array([1, 0, 0, 0])\n g_attr[\"type\"] = \"box\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*pos)\n g_attr[\"size\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*size)\n g_attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*rot)\n\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*pos)\n g_attr[\"size\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*size)\n g_attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*rot)\n self.size_buffer[bone.name] = size\n\n elif g_attr[\"type\"] == \"sphere\":\n pos = np.zeros(3)\n radius = np.cbrt(hull_params['volume'] * 3 / (4 * np.pi))\n if bone.name in [\"Pelvis\"]:\n radius *= 0.6 # ZL Hack: shrinkage\n if self.real_weight_porpotion:\n g_attr[\"density\"] = str((1 / 0.6**3) * base_density)\n\n g_attr[\"size\"] = \"{0:.4f}\".format(radius)\n # g_attr[\"size\"] = \"{0:.4f}\".format(*template_attributes[\"size\"])\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*pos)\n\n SubElement(node, \"geom\", g_attr)\n\n # write child bones\n for bone_c in bone.child:\n self.write_xml_bodynode(bone_c, node, offset, ref_angles)" }, { "identifier": "Skeleton", "path": "uhc/khrylib/mocap/skeleton_mesh_local.py", "snippet": "class Skeleton:\n def __init__(self, model_dir):\n self.model_dir = model_dir\n self.bones = []\n self.name2bone = {}\n self.mass_scale = 1.0\n self.len_scale = 1.0\n self.root = None\n self.equalities = None\n self.exclude_contacts = None\n self.collision_groups = None\n self.simple_geom = False\n self.buffer_dict = {\"njmax\": \"2500\", \"nconmax\": \"500\"}\n\n def forward_bones(self, bone):\n if bone.parent:\n # bone.pos = bone.parent.pos + bone.offset\n bone.pos = bone.offset\n for bone_c in bone.child:\n self.forward_bones(bone_c)\n\n def load_from_offsets(\n self,\n offsets,\n parents,\n axes,\n channels,\n jrange,\n sites,\n scale,\n equalities,\n hull_dict, \n exclude_contacts=None,\n collision_groups=None,\n conaffinity=None,\n simple_geom=False,\n color_dict=None,\n real_weight = False, \n replace_feet = True, \n ):\n if exclude_contacts is None:\n exclude_contacts = []\n if collision_groups is None:\n collision_groups = {}\n self.exclude_contacts = exclude_contacts\n self.collision_groups = {}\n self.conaffinity = {}\n self.color_dict = color_dict # giving color to the meshes\n self.real_weight = real_weight\n self.real_weight_porpotion = True\n self.replace_feet = replace_feet\n self.hull_dict = hull_dict\n\n for group, bones in collision_groups.items():\n for bone in bones:\n self.collision_groups[bone] = group\n\n for group, bones in conaffinity.items():\n for bone in bones:\n self.conaffinity[bone] = group\n\n self.simple_geom = simple_geom\n\n joint_names = list(offsets.keys())\n dof_ind = {\"x\": 0, \"y\": 1, \"z\": 2}\n self.equalities = equalities\n self.len_scale = scale\n self.root = Bone()\n self.root.id = 0\n self.root.name = joint_names[0]\n self.root.orient = axes[joint_names[0]]\n self.root.pos = offsets[joint_names[0]]\n self.root.sites = sites.get(joint_names[0], [])\n self.name2bone[self.root.name] = self.root\n self.bones.append(self.root)\n\n for i, joint in enumerate(joint_names[1:]):\n bone = Bone()\n bone.id = i + 1\n bone.name = joint\n bone.channels = channels[joint]\n bone.dof_index = [dof_ind[x[0]] for x in bone.channels]\n bone.offset = offsets[joint] * self.len_scale\n bone.orient = axes[joint]\n bone.lb = np.rad2deg(jrange[joint][:, 0])\n bone.ub = np.rad2deg(jrange[joint][:, 1])\n bone.sites = sites.get(joint, [])\n self.bones.append(bone)\n self.name2bone[joint] = bone\n\n for bone in self.bones[1:]:\n parent_name = parents[bone.name]\n if parent_name in self.name2bone.keys():\n bone_p = self.name2bone[parent_name]\n bone_p.child.append(bone)\n bone.parent = bone_p\n\n self.forward_bones(self.root)\n for bone in self.bones:\n if len(bone.child) == 0:\n bone.ends.append(bone.pos.copy())\n bone.end = bone.pos.copy() + 0.002\n for c_bone, p_bone in parents.items():\n if p_bone == bone.name:\n bone.end += np.array(offsets[c_bone]) * self.len_scale\n break\n else:\n bone.end = sum([bone_c.pos for bone_c in bone.child]) / len(bone.child)\n for bone_c in bone.child:\n bone.ends.append(bone_c.pos.copy())\n\n def write_str(\n self,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n tree = self.construct_tree(ref_angles=ref_angles,\n offset=offset,\n template_fname=template_fname)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", self.buffer_dict)\n return etree.tostring(tree, pretty_print=True)\n\n def write_xml(\n self,\n fname,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n bump_buffer=False,\n ):\n tree = self.construct_tree(ref_angles=ref_angles,\n offset=offset,\n template_fname=template_fname)\n if bump_buffer:\n SubElement(tree.getroot(), \"size\", self.buffer_dict)\n # create sensors\n # sensor = tree.getroot().find(\"sensor\")\n # for bone in self.bones:\n # SubElement(sensor, 'framelinvel', {'objtype': 'body', 'objname': bone.name})\n # for bone in self.bones:\n # SubElement(sensor, 'frameangvel', {'objtype': 'body', 'objname': bone.name})\n # for bone in self.bones:\n # SubElement(sensor, 'framelinvel', {'objtype': 'xbody', 'objname': bone.name})\n\n tree.write(fname, pretty_print=True)\n\n def construct_tree(\n self,\n template_fname=TEMPLATE_FILE,\n offset=np.array([0, 0, 0]),\n ref_angles=None,\n ):\n if ref_angles is None:\n ref_angles = {}\n parser = XMLParser(remove_blank_text=True)\n tree = parse(template_fname, parser=parser)\n worldbody = tree.getroot().find(\"worldbody\")\n\n self.write_xml_bodynode(self.root, worldbody, offset, ref_angles)\n\n # create meshes\n asset = tree.getroot().find(\"asset\")\n for bone in self.bones:\n if os.path.exists(f\"{self.model_dir}/geom/{bone.name}.stl\"):\n attr = {\n \"file\":\n f\"{self.model_dir.split('/')[-1]}/geom/{bone.name}.stl\",\n \"name\": f\"{bone.name}_mesh\"\n }\n # geom_relative_path = f'../mesh/smpl/{self.model_dir.split(\"/\")[-1]}'\n # attr = {\"file\": f\"{geom_relative_path}/geom/{bone.name}.stl\", \"name\": f\"{bone.name}_mesh\"}\n SubElement(asset, \"mesh\", attr)\n\n # create actuators\n actuators = tree.getroot().find(\"actuator\")\n\n joints = worldbody.findall(\".//joint\")\n for joint in joints:\n name = joint.attrib[\"name\"]\n attr = dict()\n attr[\"name\"] = name\n attr[\"joint\"] = name\n attr[\"gear\"] = \"1\"\n SubElement(actuators, \"motor\", attr)\n\n # create exclude contacts\n c_node = tree.getroot().find(\"contact\")\n for bname1, bname2 in self.exclude_contacts:\n attr = {\"body1\": bname1, \"body2\": bname2}\n SubElement(c_node, \"exclude\", attr)\n # create equalities\n eq_node = tree.getroot().find(\"equality\")\n for eq_joints in self.equalities.values():\n for j1 in range(len(eq_joints) - 1):\n for j2 in range(j1 + 1, len(eq_joints)):\n jname1, jcoeff1 = eq_joints[j1]\n jname2, jcoeff2 = eq_joints[j2]\n coeff = jcoeff1 / jcoeff2\n attr = {\n \"joint1\": jname1,\n \"joint2\": jname2,\n \"polycoef\": f\"0 {coeff:.6f} 0 0 0\",\n }\n SubElement(eq_node, \"joint\", attr)\n return tree\n\n def write_xml_bodynode(self, bone, parent_node, offset, ref_angles):\n if self.real_weight:\n base_density = 1000\n else:\n base_density = 500\n \n attr = dict()\n attr[\"name\"] = bone.name\n attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos + offset))\n quat = quaternion_from_matrix(bone.orient)\n attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*quat)\n node = SubElement(parent_node, \"body\", attr)\n\n # write joints\n if bone.parent is None:\n j_attr = dict()\n j_attr[\"name\"] = bone.name\n # j_attr[\"limited\"] = \"false\"\n # j_attr[\"type\"] = \"free\"\n # j_attr[\"armature\"] = \"0.02\"\n # j_attr[\"damping\"] = \"50\"\n # j_attr[\"stiffness\"] = \"500\"\n # j_attr[\"frictionloss\"] = \"0\"\n\n SubElement(node, \"freejoint\", j_attr)\n else:\n\n for i in range(len(bone.channels)):\n ind = bone.dof_index[i]\n axis = bone.orient[:, ind]\n j_attr = dict()\n\n\n j_attr[\"name\"] = bone.name + \"_\" + bone.channels[i]\n j_attr[\"type\"] = \"hinge\"\n j_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(bone.pos +\n offset))\n j_attr[\"axis\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*axis)\n\n\n j_attr[\"stiffness\"] = str(GAINS[bone.name][0])\n j_attr[\"damping\"] = str(GAINS[bone.name][1])\n if bone.name in [\"L_Ankle\", \"R_Ankle\"]:\n j_attr[\"armature\"] = \"0.01\"\n else:\n j_attr[\"armature\"] = \"0.02\"\n\n if i < len(bone.lb):\n j_attr[\"range\"] = \"{0:.4f} {1:.4f}\".format(\n bone.lb[i], bone.ub[i])\n else:\n j_attr[\"range\"] = \"-180.0 180.0\"\n if j_attr[\"name\"] in ref_angles.keys():\n j_attr[\"ref\"] = f\"{ref_angles[j_attr['name']]:.1f}\"\n SubElement(node, \"joint\", j_attr)\n\n # write sites\n for s_name, s_pos, s_quat in bone.sites:\n s_attr = {\"name\": s_name}\n s_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*(s_pos + offset))\n s_attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*s_quat)\n s_attr[\"type\"] = \"sphere\"\n s_attr[\"size\"] = \"0.03\"\n SubElement(node, \"site\", s_attr)\n\n\n geom_path = f\"{self.model_dir}/geom/{bone.name}.stl\"\n if os.path.exists(geom_path):\n g_attr = {\"type\": \"mesh\", \"mesh\": f\"{bone.name}_mesh\"}\n if bone.name in self.collision_groups.keys():\n g_attr[\"density\"] = str(base_density)\n \n\n g_attr[\"contype\"] = str(self.collision_groups[bone.name])\n g_attr[\"conaffinity\"] = str(self.conaffinity[bone.name])\n\n # g_attr[\"solimp\"] = \"0.9 0.95 0.001 0.5 2\"\n # g_attr[\"solref\"] = \"0.02 1\"\n # g_attr[\"size\"] = str(10)\n # g_attr[\"friction\"] = \"0.000000000005 0.000000000005 0.1\"\n if not self.color_dict is None:\n g_attr[\"rgba\"] = self.color_dict[bone.name]\n\n \n if bone.name in [\"L_Ankle\", \"R_Ankle\", \"L_Toe\", \"R_Toe\"] and self.replace_feet:\n g_attr = {}\n hull_params = self.hull_dict[bone.name]\n min_verts, max_verts = hull_params['norm_verts'].min(axis=0), hull_params['norm_verts'].max(axis=0)\n size = max_verts - min_verts\n\n bone_end = bone.end\n pos = (max_verts + min_verts)/2\n size /= 2\n\n if bone.name == \"L_Toe\" or bone.name == \"R_Toe\":\n parent_min, parent_max = self.hull_dict[bone.parent.name]['norm_verts'].min(axis=0), self.hull_dict[bone.parent.name]['norm_verts'].max(axis=0)\n parent_pos = (parent_max + parent_min)/2\n \n pos[2] = parent_min[2] - bone.pos[2] + size[2] # To get toe to be at the same height as the parent\n pos[1] = parent_pos[1] - bone.pos[1] # To get toe to be at the y as the parent\n\n rot = np.array([1, 0, 0, 0])\n if self.real_weight_porpotion:\n g_attr[\"density\"] = str((hull_params['volume'] / (size[0] * size[1] * size[2] * 8)) * base_density)\n g_attr[\"type\"] = \"box\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*pos)\n g_attr[\"size\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*size)\n g_attr[\"quat\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f}\".format(*rot)\n\n SubElement(node, \"geom\", g_attr)\n else:\n for end in bone.ends:\n g_attr = dict()\n e1 = bone.pos + offset\n e2 = end + offset\n v = e2 - e1\n if np.linalg.norm(v) > 1e-6:\n v /= np.linalg.norm(v)\n e1 += v * 0.02\n e2 -= v * 0.02\n g_attr[\"type\"] = \"capsule\"\n g_attr[\n \"fromto\"] = \"{0:.4f} {1:.4f} {2:.4f} {3:.4f} {4:.4f} {5:.4f}\".format(\n *np.concatenate([e1, e2]))\n else:\n g_attr[\"type\"] = \"sphere\"\n g_attr[\"pos\"] = \"{0:.4f} {1:.4f} {2:.4f}\".format(*bone.pos)\n g_attr[\"size\"] = \"0.0300\" if self.simple_geom else \"0.0100\"\n if not self.simple_geom:\n g_attr[\"contype\"] = \"0\"\n g_attr[\"conaffinity\"] = \"0\"\n elif bone.name in self.collision_groups.keys():\n group = str(self.collision_groups[bone.name])\n g_attr[\"contype\"] = group\n g_attr[\"conaffinity\"] = group\n SubElement(node, \"geom\", g_attr)\n\n # write child bones\n for bone_c in bone.child:\n self.write_xml_bodynode(bone_c, node, offset, ref_angles)" }, { "identifier": "SMPL_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPL_Parser(_SMPL):\n def __init__(self, create_transl=False, *args, **kwargs):\n \"\"\"SMPL model constructor\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n data_struct: Strct\n A struct object. If given, then the parameters of the model are\n read from the object. Otherwise, the model tries to read the\n parameters from the given `model_path`. (default = None)\n create_global_orient: bool, optional\n Flag for creating a member variable for the global orientation\n of the body. (default = True)\n global_orient: torch.tensor, optional, Bx3\n The default value for the global orientation variable.\n (default = None)\n create_body_pose: bool, optional\n Flag for creating a member variable for the pose of the body.\n (default = True)\n body_pose: torch.tensor, optional, Bx(Body Joints * 3)\n The default value for the body pose variable.\n (default = None)\n create_betas: bool, optional\n Flag for creating a member variable for the shape space\n (default = True).\n betas: torch.tensor, optional, Bx10\n The default value for the shape member variable.\n (default = None)\n create_transl: bool, optional\n Flag for creating a member variable for the translation\n of the body. (default = True)\n transl: torch.tensor, optional, Bx3\n The default value for the transl variable.\n (default = None)\n dtype: torch.dtype, optional\n The data type for the created variables\n batch_size: int, optional\n The batch size used for creating the member variables\n joint_mapper: object, optional\n An object that re-maps the joints. Useful if one wants to\n re-order the SMPL joints to some other convention (e.g. MSCOCO)\n (default = None)\n gender: str, optional\n Which gender to load\n vertex_ids: dict, optional\n A dictionary containing the indices of the extra vertices that\n will be selected\n \"\"\"\n super(SMPL_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPL_BONE_ORDER_NAMES\n\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"x\", \"y\", \"z\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n self.joint_range[\"L_Shoulder\"] *= 4\n self.joint_range[\"R_Shoulder\"] *= 4\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n\n # self.contype = {\n # 3: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 1: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n # self.conaffinity = {\n # 1: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 3: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n self.zero_pose = torch.zeros(1, 72).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPL_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 72\n \"\"\"\n if pose.shape[1] != 72:\n pose = pose.reshape(-1, 72)\n\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n if th_betas.shape[-1] == 16:\n th_betas = th_betas[:, :10]\n\n batch_size = pose.shape[0]\n\n smpl_output = self.forward(\n betas=th_betas,\n transl=th_trans,\n body_pose=pose[:, 3:],\n global_orient=pose[:, :3],\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints[:, :24]\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, zero_pose=None, betas=torch.zeros(1, 10).float()):\n with torch.no_grad():\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose,\n th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = Jtr.detach().cpu().numpy()\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n joint_names = self.joint_names\n joint_pos = Jtr[0].numpy()\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n parents_dict = {\n joint_names[i]: joint_names[parents[i]]\n for i in range(len(joint_names))\n }\n channels = [\"z\", \"y\", \"x\"]\n skin_weights = self.lbs_weights.numpy()\n return (verts[0], jts_np[0], skin_weights, self.joint_names,\n joint_offsets, parents_dict, channels, self.joint_range)\n\n def get_mesh_offsets(self,\n zero_pose=None,\n betas=torch.zeros(1, 10),\n flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose,\n th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )\n\n def get_mesh_offsets_batch(self, betas=torch.zeros(1, 10), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose.repeat(\n betas.shape[0], 1),\n th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr\n joint_offsets = {\n joint_names[c]:\n (joint_pos[:, c] - joint_pos[:, p]) if c > 0 else joint_pos[:,\n c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n skin_weights = self.lbs_weights\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLH_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLH_Parser(_SMPLH):\n def __init__(self, *args, **kwargs):\n super(SMPLH_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLH_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n L_hand_pose=pose[:, 66:111],\n R_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, betas=torch.zeros(1, 16).float()):\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i]\n for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]]\n for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, betas=torch.zeros(1, 16), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n }\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLX_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLX_Parser(_SMPLX):\n def __init__(self, *args, **kwargs):\n super(SMPLX_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {\n x: np.hstack([np.ones([3, 1]) * -np.pi,\n np.ones([3, 1]) * np.pi])\n for x in self.joint_names\n }\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n self.joint_to_use = [\n SMPLX_BONE_ORDER_NAMES.index(i) for i in SMPLH_BONE_ORDER_NAMES\n ]\n self.parents_to_use = np.concatenate(\n [np.arange(0, 22), np.arange(25, 55)])\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLX_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n left_hand_pose=pose[:, 66:111],\n right_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # return vertices, joints\n return vertices, joints\n\n def get_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {\n names_smpl[i]: offsets_smpl[i]\n for i in range(len(names_smpl))\n }\n parents_dict = {\n names_smpl[i]: names_smpl[parents[i]]\n for i in range(len(names_smpl))\n }\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n # joint_names = self.joint_names\n joint_names = SMPLX_BONE_ORDER_NAMES\n verts, Jtr = self.get_joints_verts(self.zero_pose)\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n # print(\n # joint_pos.shape,\n # smpl_joint_parents.shape,\n # len(self.parents_to_use),\n # self.parents.cpu().numpy().shape,\n # )\n joint_offsets = {\n joint_names[c]:\n (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c]\n for c, p in enumerate(smpl_joint_parents)\n if joint_names[c] in self.joint_names\n }\n joint_parents = {\n x: joint_names[i] if i >= 0 else None\n for x, i in zip(joint_names, smpl_joint_parents)\n if joint_names[i] in self.joint_names\n }\n\n verts = verts[0].numpy()\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()[:, self.parents_to_use]\n return (\n verts,\n joint_pos,\n skin_weights,\n self.joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "quadric_mesh_decimation", "path": "uhc/utils/geom.py", "snippet": "def quadric_mesh_decimation(fname, reduction_rate, verbose=False):\n reader = vtkSTLReader()\n reader.SetFileName(fname)\n reader.Update()\n inputPoly = reader.GetOutput()\n\n decimate = vtkQuadricDecimation()\n decimate.SetInputData(inputPoly)\n decimate.SetTargetReduction(reduction_rate)\n decimate.Update()\n decimatedPoly = vtkPolyData()\n decimatedPoly.ShallowCopy(decimate.GetOutput())\n\n if verbose:\n print(\n f\"Mesh Decimation: (points, faces) goes from ({inputPoly.GetNumberOfPoints(), inputPoly.GetNumberOfPolys()}) \"\n f\"to ({decimatedPoly.GetNumberOfPoints(), decimatedPoly.GetNumberOfPolys()})\"\n )\n\n stlWriter = vtkSTLWriter()\n stlWriter.SetFileName(fname)\n stlWriter.SetFileTypeToBinary()\n stlWriter.SetInputData(decimatedPoly)\n stlWriter.Write()" }, { "identifier": "center_scale_mesh", "path": "uhc/utils/geom.py", "snippet": "def center_scale_mesh(fname, scale):\n reader = vtkSTLReader()\n reader.SetFileName(fname)\n reader.Update()\n inputPoly = reader.GetOutputPort()\n\n centerOfMassFilter = vtkCenterOfMass()\n centerOfMassFilter.SetInputConnection(inputPoly)\n centerOfMassFilter.SetUseScalarsAsWeights(False)\n centerOfMassFilter.Update()\n center = centerOfMassFilter.GetCenter()\n\n transform = vtkTransform()\n transform.PostMultiply()\n transform.Translate(-center[0], -center[1], -center[2])\n transform.Scale(scale, scale, scale)\n transform.Translate(center[0], center[1], center[2])\n transform.Update()\n\n transformFilter = vtkTransformPolyDataFilter()\n transformFilter.SetInputConnection(inputPoly)\n transformFilter.SetTransform(transform)\n transformFilter.Update()\n\n stlWriter = vtkSTLWriter()\n stlWriter.SetFileName(fname)\n stlWriter.SetFileTypeToBinary()\n stlWriter.SetInputConnection(transformFilter.GetOutputPort())\n stlWriter.Write()" }, { "identifier": "flags", "path": "uhc/utils/flags.py", "snippet": "class Flags(object):\n def __init__(self, *items):" } ]
import os import sys import time import argparse import torch import pdb import os.path as osp import numpy as np import math import uuid import atexit import shutil import joblib import cv2 import mujoco import mujoco.viewer from copy import deepcopy from collections import defaultdict from lxml.etree import XMLParser, parse, ElementTree, Element, SubElement from lxml import etree from io import BytesIO from uhc.khrylib.mocap.skeleton_local import Skeleton from uhc.khrylib.mocap.skeleton_mesh_local import Skeleton as SkeletonMesh from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from collections import defaultdict from scipy.spatial import ConvexHull from stl import mesh from uhc.utils.geom import quadric_mesh_decimation, center_scale_mesh from uhc.utils.flags import flags
16,076
print("!!!! Using modified SMPL starting pose !!!!") self.remove_toe = cfg.get("remove_toe", False) self.big_ankle = cfg.get("big_ankle", False) self.real_weight = cfg.get("real_weight", False) self.real_weight_porpotion = cfg.get("real_weight_porpotion", False) self.rel_joint_lm = cfg.get("rel_joint_lm", True) # Rolling this out worldwide!! os.makedirs("/tmp/smpl/", exist_ok=True) self.masterfoot = cfg.get("masterfoot", False) self.param_specs = self.cfg.get("body_params", {}) self.hull_dict = {} self.beta = (torch.zeros( (1, 10)).float() if self.smpl_model == "smpl" else torch.zeros( (1, 16)).float()) if self.smpl_model == "smpl": self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") self.smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") self.smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") elif self.smpl_model == "smplh": self.smpl_parser_n = SMPLH_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLH_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False) self.smpl_parser_f = SMPLH_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False) elif self.smpl_model == "smplx": self.smpl_parser_n = SMPLX_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLX_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False) self.smpl_parser_f = SMPLX_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False) self.load_from_skeleton() atexit.register(self.remove_geoms) def remove_geoms(self): while len(self.model_dirs) > 0: geom_dir = self.model_dirs.pop(0) if osp.isdir(geom_dir): shutil.rmtree(geom_dir, ignore_errors=True) def get_joint_vertices(self, pose_aa, th_betas=None, th_trans=None, gender=[0]): if gender[0] == 0: smpl_parser = self.smpl_parser_n elif gender[0] == 1: smpl_parser = self.smpl_parser_m elif gender[0] == 2: smpl_parser = self.smpl_parser_f else: print(gender) raise Exception("Gender Not Supported!!") vertices, joints = smpl_parser.get_joints_verts(pose=pose_aa, th_betas=th_betas, th_trans=th_trans) return vertices, joints def load_from_skeleton( self, betas=None, v_template=None, gender=[0], objs_info=None, obj_pose=None, params=None, ): self.tree = None # xml tree if gender[0] == 0: self.smpl_parser = smpl_parser = self.smpl_parser_n elif gender[0] == 1: self.smpl_parser = smpl_parser = self.smpl_parser_m elif gender[0] == 2: self.smpl_parser = smpl_parser = self.smpl_parser_f else: print(gender) raise Exception("Gender Not Supported!!") if betas is None and self.beta is None: betas = (torch.zeros( (1, 10)).float() if self.smpl_model == "smpl" else torch.zeros( (1, 16)).float()) else: if params is None: self.beta = betas if not betas is None else self.beta else: # If params is not none, we need to set the beta first betas = self.map_params(betas) self.beta = torch.from_numpy( denormalize_range( betas.numpy().squeeze(), self.param_specs["beta"]["lb"], self.param_specs["beta"]["ub"], )[None, ])
sys.path.append(os.getcwd()) # from scipy.spatial.qhull import _Qhull def parse_vec(string): return np.fromstring(string, sep=" ") def parse_fromto(string): fromto = np.fromstring(string, sep=" ") return fromto[:3], fromto[3:] def normalize_range(value, lb, ub): return (value - lb) / (ub - lb) * 2 - 1 def denormalize_range(value, lb, ub): return (value + 1) * 0.5 * (ub - lb) + lb def vec_to_polar(v): phi = math.atan2(v[1], v[0]) theta = math.acos(v[2]) return np.array([theta, phi]) def polar_to_vec(p): v = np.zeros(3) v[0] = math.sin(p[0]) * math.cos(p[1]) v[1] = math.sin(p[0]) * math.sin(p[1]) v[2] = math.cos(p[0]) return v def in_hull(hull, queries): tolerance = 1e-3 if len(queries.shape) == 1: queries = queries[None, ] return np.all( np.add(np.dot(queries, hull.equations[:, :-1].T), hull.equations[:, -1]) <= tolerance, axis=1, ) def get_joint_geometries( smpl_verts, smpl_jts, skin_weights, joint_names, geom_dir, scale_dict={}, suffix=None, verbose=False, min_num_vert=50, ): vert_to_joint = skin_weights.argmax(axis=1) hull_dict = {} # create joint geometries os.makedirs(geom_dir, exist_ok=True) for jind, jname in enumerate(joint_names): vind = np.where(vert_to_joint == jind)[0] if len(vind) == 0: print(f"{jname} has no vertices!") continue norm_verts = (smpl_verts[vind] - smpl_jts[jind]) * scale_dict.get(jname, 1) hull = ConvexHull(smpl_verts[vind]) norm_hull = ConvexHull(norm_verts) hull_dict[jname] = { "norm_hull": norm_hull, "norm_verts": norm_verts, "verts": smpl_verts[vind], "hull": hull, "volume": hull.volume } center = norm_verts[hull.vertices].mean(axis=0) jgeom = mesh.Mesh( np.zeros(hull.simplices.shape[0], dtype=mesh.Mesh.dtype)) for i, f in enumerate(hull.simplices): for j in range(3): jgeom.vectors[i][j] = norm_verts[f[j], :] # check if the face's normal is facing outward normal = np.cross( jgeom.vectors[i][1] - jgeom.vectors[i][0], jgeom.vectors[i][2] - jgeom.vectors[i][0], ) out_vec = jgeom.vectors[i].mean(axis=0) - center if np.dot(normal, out_vec) < 0: jgeom.vectors[i] = jgeom.vectors[i][[0, 2, 1]] # flip the face if suffix is None: fname = f"{geom_dir}/{jname}.stl" else: fname = f"{geom_dir}/{jname}_{suffix}.stl" jgeom.save(fname) # mesh simplification with vtk # min_num_vert = 50 min_num_vert = 50 cur_num_vert = len(hull.vertices) reduction_rate = min(0.9, 1.0 - min_num_vert / cur_num_vert) quadric_mesh_decimation(fname, reduction_rate, verbose=verbose) return hull_dict def get_geom_dict( smpl_verts, smpl_jts, skin_weights, joint_names, scale_dict={}, ): vert_to_joint = skin_weights.argmax(axis=1) hull_dict = {} # create joint geometries for jind, jname in enumerate(joint_names): vind = np.where(vert_to_joint == jind)[0] if len(vind) == 0: print(f"{jname} has no vertices!") continue norm_verts = (smpl_verts[vind] - smpl_jts[jind]) * scale_dict.get(jname, 1) hull = ConvexHull(norm_verts) hull_dict[jname] = { "norm_hull": hull, "norm_verts": norm_verts, "verts": smpl_verts[vind], "volume": hull.volume } return hull_dict def update_joint_limits(joint_range): joint_range["Head"][0] = np.array([-np.pi / 2, np.pi / 2]) joint_range["Head"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["Head"][2] = np.array([-np.pi / 2, np.pi / 2]) joint_range["Chest"][0] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Chest"][1] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Chest"][2] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Spine"][0] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Spine"][1] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Spine"][2] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Torso"][0] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Torso"][1] = np.array([-np.pi / 3, np.pi / 3]) joint_range["Torso"][2] = np.array([-np.pi / 3, np.pi / 3]) ############################## joint_range["L_Thorax"][0] = np.array([-np.pi, np.pi]) joint_range["L_Thorax"][1] = np.array([-np.pi, np.pi]) joint_range["L_Thorax"][2] = np.array([-np.pi, np.pi]) joint_range["R_Thorax"][0] = np.array([-np.pi, np.pi]) joint_range["R_Thorax"][1] = np.array([-np.pi, np.pi]) joint_range["R_Thorax"][2] = np.array([-np.pi, np.pi]) joint_range["L_Shoulder"][0] = np.array([-np.pi, np.pi]) joint_range["L_Shoulder"][1] = np.array([-np.pi, np.pi]) joint_range["L_Shoulder"][2] = np.array([-np.pi, np.pi]) joint_range["R_Shoulder"][0] = np.array([-np.pi, np.pi]) joint_range["R_Shoulder"][1] = np.array([-np.pi, np.pi]) joint_range["R_Shoulder"][2] = np.array([-np.pi, np.pi]) ############################## joint_range["L_Hip"][0] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Hip"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Hip"][2] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Hip"][0] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Hip"][1] = np.array([-np.pi / 3, np.pi / 3]) joint_range["R_Hip"][2] = np.array([-np.pi / 3, np.pi / 3]) joint_range["L_Knee"][0] = np.array([-np.pi, np.pi]) joint_range["L_Knee"][1] = np.array([-np.pi / 32, np.pi / 32]) joint_range["L_Knee"][2] = np.array([-np.pi / 32, np.pi / 32]) joint_range["R_Knee"][0] = np.array([-np.pi, np.pi]) joint_range["R_Knee"][1] = np.array([-np.pi / 32, np.pi / 32]) joint_range["R_Knee"][2] = np.array([-np.pi / 32, np.pi / 32]) joint_range["L_Ankle"][0] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Ankle"][2] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Ankle"][0] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Ankle"][2] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Toe"][0] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Toe"][1] = np.array([-np.pi / 4, np.pi / 4]) joint_range["L_Toe"][2] = np.array([-np.pi / 4, np.pi / 4]) joint_range["R_Toe"][0] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Toe"][1] = np.array([-np.pi / 4, np.pi / 4]) joint_range["R_Toe"][2] = np.array([-np.pi / 4, np.pi / 4]) return joint_range def update_joint_limits_upright(joint_range): joint_range["L_Knee"][0] = np.array([-np.pi / 32, np.pi / 32]) joint_range["L_Knee"][1] = np.array([-np.pi, np.pi]) joint_range["L_Knee"][2] = np.array([-np.pi / 32, np.pi / 32]) joint_range["R_Knee"][0] = np.array([-np.pi / 32, np.pi / 32]) joint_range["R_Knee"][1] = np.array([-np.pi, np.pi]) joint_range["R_Knee"][2] = np.array([-np.pi / 32, np.pi / 32]) joint_range["L_Ankle"][0] = np.array([-np.pi / 4, np.pi / 4]) joint_range["L_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Ankle"][2] = np.array([-np.pi / 4, np.pi / 4]) joint_range["R_Ankle"][0] = np.array([-np.pi / 4, np.pi / 4]) joint_range["R_Ankle"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Ankle"][2] = np.array([-np.pi / 4, np.pi / 4]) joint_range["L_Toe"][0] = np.array([-np.pi / 32, np.pi / 32]) joint_range["L_Toe"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["L_Toe"][2] = np.array([-np.pi / 32, np.pi / 32]) joint_range["R_Toe"][0] = np.array([-np.pi / 32, np.pi / 32]) joint_range["R_Toe"][1] = np.array([-np.pi / 2, np.pi / 2]) joint_range["R_Toe"][2] = np.array([-np.pi / 32, np.pi / 32]) return joint_range class Joint: def __init__(self, node, body): self.node = node self.body = body self.cfg = body.cfg self.local_coord = body.local_coord self.name = node.attrib["name"] self.type = node.attrib["type"] if "type" in node.attrib else "free" if self.type == "hinge": self.range = np.deg2rad( parse_vec(node.attrib.get("range", "-360 360"))) actu_node = (body.tree.getroot().find("actuator").find( f'motor[@joint="{self.name}"]')) if actu_node is not None: self.actuator = Actuator(actu_node, self) else: self.actuator = None self.parse_param_specs() self.param_inited = False # tunable parameters self.pos = parse_vec("0 0 0") if self.type == "hinge": self.axis = vec_to_polar(parse_vec(node.attrib["axis"])) if self.local_coord: self.pos += body.pos self.damping = (parse_vec(node.attrib["damping"]) if "damping" in node.attrib else np.array([0])) self.stiffness = (parse_vec(node.attrib["stiffness"]) if "stiffness" in node.attrib else np.array([0])) self.armature = (parse_vec(node.attrib["armature"]) if "armature" in node.attrib else np.array([0.01])) self.frictionloss = (parse_vec(node.attrib["frictionloss"]) if "frictionloss" in node.attrib else np.array([0])) # import ipdb; ipdb.set_trace() # assert np.all(self.pos == body.pos) def __repr__(self): return "joint_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("joint_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) def sync_node(self, rename=False, index=0): pos = self.pos - self.body.pos if self.local_coord else self.pos if rename: self.name = self.body.name + "_joint_" + str(index) self.node.attrib["name"] = self.name if self.type == "hinge": axis_vec = polar_to_vec(self.axis) self.node.attrib["axis"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in axis_vec]) self.node.attrib["pos"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in pos]) self.node.attrib["damping"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.damping]) self.node.attrib["stiffness"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.stiffness]) self.node.attrib["armature"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.armature]) elif self.type == "free": pass if self.actuator is not None: self.actuator.sync_node() def get_params(self, param_list, get_name=False, pad_zeros=False): if "axis" in self.param_specs: if self.type == "hinge": if get_name: param_list += ["axis_theta", "axis_phi"] else: axis = normalize_range( self.axis, np.array([0, -2 * np.pi]), np.array([np.pi, 2 * np.pi]), ) param_list.append(axis) elif pad_zeros: param_list.append(np.zeros(2)) if self.actuator is not None: self.actuator.get_params(param_list, get_name) elif pad_zeros: param_list.append( np.zeros(3 if self.type == "free" else 1) ) # ZL currently a workaround for supporting 3D joints if "damping" in self.param_specs: if get_name: param_list.append("damping") else: if not self.param_inited and self.param_specs["damping"].get( "rel", False): self.param_specs["damping"]["lb"] += self.damping self.param_specs["damping"]["ub"] += self.damping self.param_specs["damping"]["lb"] = max( self.param_specs["damping"]["lb"], self.param_specs["damping"].get("min", -np.inf), ) self.param_specs["damping"]["ub"] = min( self.param_specs["damping"]["ub"], self.param_specs["damping"].get("max", np.inf), ) damping = normalize_range( self.damping, self.param_specs["damping"]["lb"], self.param_specs["damping"]["ub"], ) param_list.append(damping.flatten()) if "armature" in self.param_specs: if get_name: param_list.append("armature") else: if not self.param_inited and self.param_specs["armature"].get( "rel", False): self.param_specs["armature"]["lb"] += self.armature self.param_specs["armature"]["ub"] += self.armature self.param_specs["armature"]["lb"] = max( self.param_specs["armature"]["lb"], self.param_specs["armature"].get("min", -np.inf), ) self.param_specs["armature"]["ub"] = min( self.param_specs["armature"]["ub"], self.param_specs["armature"].get("max", np.inf), ) armature = normalize_range( self.armature, self.param_specs["armature"]["lb"], self.param_specs["armature"]["ub"], ) param_list.append(armature.flatten()) if "stiffness" in self.param_specs: if get_name: param_list.append("stiffness") else: if not self.param_inited and self.param_specs["stiffness"].get( "rel", False): self.param_specs["stiffness"]["lb"] += self.stiffness self.param_specs["stiffness"]["ub"] += self.stiffness self.param_specs["stiffness"]["lb"] = max( self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"].get("min", -np.inf), ) self.param_specs["stiffness"]["ub"] = min( self.param_specs["stiffness"]["ub"], self.param_specs["stiffness"].get("max", np.inf), ) stiffness = normalize_range( self.stiffness, self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"]["ub"], ) param_list.append(stiffness.flatten()) if "frictionloss" in self.param_specs: if get_name: param_list.append("frictionloss") else: if not self.param_inited and self.param_specs[ "frictionloss"].get("rel", False): self.param_specs["frictionloss"]["lb"] += self.frictionloss self.param_specs["frictionloss"]["ub"] += self.frictionloss self.param_specs["frictionloss"]["lb"] = max( self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"].get("min", -np.inf), ) self.param_specs["frictionloss"]["ub"] = min( self.param_specs["frictionloss"]["ub"], self.param_specs["frictionloss"].get("max", np.inf), ) frictionloss = normalize_range( self.frictionloss, self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"]["ub"], ) param_list.append(frictionloss.flatten()) if not get_name: self.param_inited = True # import ipdb; ipdb.set_trace() def set_params(self, params, pad_zeros=False): if "axis" in self.param_specs: if self.type == "hinge": self.axis = denormalize_range(params[:2], np.array([0, -2 * np.pi]), np.array([np.pi, 2 * np.pi])) params = params[2:] elif pad_zeros: params = params[2:] if self.actuator is not None: params = self.actuator.set_params(params) elif pad_zeros: params = params[1:] # Order of this matters!!! Should always be damping, aramature, stiffness (the order they are read) if "damping" in self.param_specs: self.damping = denormalize_range( params[[0]], self.param_specs["damping"]["lb"], self.param_specs["damping"]["ub"], ) params = params[1:] if "armature" in self.param_specs: self.armature = denormalize_range( params[[0]], self.param_specs["armature"]["lb"], self.param_specs["armature"]["ub"], ) params = params[1:] if "stiffness" in self.param_specs: self.stiffness = denormalize_range( params[[0]], self.param_specs["stiffness"]["lb"], self.param_specs["stiffness"]["ub"], ) params = params[1:] if "frictionloss" in self.param_specs: self.frictionloss = denormalize_range( params[[0]], self.param_specs["frictionloss"]["lb"], self.param_specs["frictionloss"]["ub"], ) params = params[1:] return params class Geom: def __init__(self, node, body): self.node = node self.body = body self.cfg = body.cfg self.local_coord = body.local_coord self.name = node.attrib.get("name", "") self.type = node.attrib["type"] self.density = (parse_vec(node.attrib["density"]) / 1000 if "density" in node.attrib else np.array([1])) self.parse_param_specs() self.param_inited = False # tunable parameters # self.size = ( # parse_vec(node.attrib["size"]) if "size" in node.attrib else np.array([0]) # ) self.size = (parse_vec(node.attrib["size"]) if "size" in node.attrib else np.array([1, 1, 1])) if self.type == "box": self.start = self.end = self.pos = parse_vec(node.attrib["pos"]) self.pos_delta = np.array([0, 0, 0]) self.rot = parse_vec(node.attrib["quat"]) elif self.type == "sphere": self.pos_delta = np.array([0, 0, 0]) self.start = self.end = self.pos = parse_vec(node.attrib["pos"]) elif self.type == "capsule": self.start, self.end = parse_fromto(node.attrib["fromto"]) elif self.type == "mesh": self.start, self.end = body.pos.copy(), body.pos.copy() if self.local_coord: self.start += body.pos self.end += body.pos if body.bone_start is None: self.bone_start = self.start.copy() body.bone_start = self.bone_start.copy() else: self.bone_start = body.bone_start.copy() self.ext_start = np.linalg.norm( self.bone_start - self.start) ## Geom extension from bone start def __repr__(self): return "geom_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("geom_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): if self.type == "box": specs["lb"] = np.array([specs["lb"]] * 3) elif self.type == "capsule": specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): if self.type == "box": specs["lb"] = np.array([specs["lb"]] * 3) elif self.type == "capsule": specs["lb"] = np.array(specs["lb"]) def update_start(self): if self.type == "capsule": vec = self.bone_start - self.end self.start = self.bone_start + vec * (self.ext_start / np.linalg.norm(vec)) def sync_node(self): # self.node.attrib['name'] = self.name self.node.attrib.pop("name", None) if not self.size is None: self.node.attrib["size"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.size]) self.node.attrib["density"] = " ".join( [f"{x * 1000:.6f}".rstrip("0").rstrip(".") for x in self.density]) # if self.type == "capsule": # start = self.start - self.body.pos if self.local_coord else self.start # end = self.end - self.body.pos if self.local_coord else self.end # self.node.attrib["fromto"] = " ".join( # [ # f"{x:.6f}".rstrip("0").rstrip(".") # for x in np.concatenate([start, end]) # ] # ) # elif self.type == "box" or self.type == "sphere": # # self.node.attrib["pos"] = " ".join( # # [f"{x:.6f}".rstrip("0").rstrip(".") for x in self.pos + self.pos_delta] # # ) # import ipdb; ipdb.set_trace() # pass def get_params(self, param_list, get_name=False, pad_zeros=False): if "size" in self.param_specs: if get_name: param_list.append("size") else: if (self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh"): if not self.param_inited and self.param_specs["size"].get( "rel", False): self.param_specs["size"]["lb"] += self.size self.param_specs["size"]["ub"] += self.size self.param_specs["size"]["lb"] = max( self.param_specs["size"]["lb"], self.param_specs["size"].get("min", -np.inf), ) self.param_specs["size"]["ub"] = min( self.param_specs["size"]["ub"], self.param_specs["size"].get("max", np.inf), ) size = normalize_range( self.size, self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) param_list.append(size.flatten()) if pad_zeros and self.type == "capsule": param_list.append( np.zeros(2)) # capsule has needs to be 3 for GNN elif pad_zeros: param_list.append(np.zeros(self.size.shape)) if "ext_start" in self.param_specs: if get_name: param_list.append("ext_start") else: if (self.type == "capsule" or self.type == "box" or self.type == "sphere"): if not self.param_inited and self.param_specs[ "ext_start"].get("rel", False): self.param_specs["ext_start"]["lb"] += self.ext_start self.param_specs["ext_start"]["ub"] += self.ext_start self.param_specs["ext_start"]["lb"] = max( self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"].get("min", -np.inf), ) self.param_specs["ext_start"]["ub"] = min( self.param_specs["ext_start"]["ub"], self.param_specs["ext_start"].get("max", np.inf), ) ext_start = normalize_range( self.ext_start, self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"]["ub"], ) param_list.append(ext_start.flatten()) elif pad_zeros: param_list.append(np.zeros(self.size.shape)) if "density" in self.param_specs: if get_name: param_list.append("density") else: if not self.param_inited and self.param_specs["density"].get( "rel", False): self.param_specs["density"]["lb"] += self.density self.param_specs["density"]["ub"] += self.density self.param_specs["density"]["lb"] = max( self.param_specs["density"]["lb"], self.param_specs["density"].get("min", -np.inf), ) self.param_specs["density"]["ub"] = min( self.param_specs["density"]["ub"], self.param_specs["density"].get("max", np.inf), ) density = normalize_range( self.density, self.param_specs["density"]["lb"], self.param_specs["density"]["ub"], ) param_list.append(density.flatten()) # if pad_zeros: # param_list.append(np.zeros(self.density.shape)) if "pos_delta" in self.param_specs: if get_name: param_list.append("pos_delta") else: if self.type == "box" or self.type == "sphere": if not self.param_inited and self.param_specs[ "pos_delta"].get("rel", False): self.param_specs["pos_delta"]["lb"] += self.density self.param_specs["pos_delta"]["ub"] += self.density self.param_specs["pos_delta"]["lb"] = max( self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"].get("min", -np.inf), ) self.param_specs["pos_delta"]["ub"] = min( self.param_specs["pos_delta"]["ub"], self.param_specs["pos_delta"].get("max", np.inf), ) pos_delta = normalize_range( self.pos_delta, self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"]["ub"], ) param_list.append(pos_delta.flatten()) elif pad_zeros: param_list.append(np.zeros(3)) if not get_name: self.param_inited = True def set_params(self, params, pad_zeros=False): if "size" in self.param_specs: if (self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh"): if len(self.size) == 1: self.size = denormalize_range( params[[0]], self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) params = params[1:] elif len(self.size) == 3: self.size = denormalize_range( np.array(params[:3]), self.param_specs["size"]["lb"], self.param_specs["size"]["ub"], ) params = params[3:] elif pad_zeros: params = params[1:] if "ext_start" in self.param_specs: if self.type == "capsule" or self.type == "box" or self.type == "sphere": self.ext_start = denormalize_range( params[[0]], self.param_specs["ext_start"]["lb"], self.param_specs["ext_start"]["ub"], ) params = params[1:] elif pad_zeros: params = params[1:] if "density" in self.param_specs: if (self.type == "capsule" or self.type == "box" or self.type == "sphere" or self.type == "mesh"): self.density = denormalize_range( params[[0]], self.param_specs["density"]["lb"], self.param_specs["density"]["ub"], ) params = params[1:] elif pad_zeros: params = params[1:] if "pos_delta" in self.param_specs: if self.type == "box" or self.type == "sphere": self.pos_delta = denormalize_range( np.array(params[:3]), self.param_specs["pos_delta"]["lb"], self.param_specs["pos_delta"]["ub"], ) params = params[3:] elif pad_zeros: params = params[3:] return params class Actuator: def __init__(self, node, joint): self.node = node self.joint = joint self.cfg = joint.cfg self.joint_name = node.attrib["joint"] self.name = self.joint_name self.parse_param_specs() self.param_inited = False # tunable parameters self.gear = float(node.attrib["gear"]) def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("actuator_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) def sync_node(self): self.node.attrib["gear"] = f"{self.gear:.6f}".rstrip("0").rstrip(".") self.name = self.joint.name self.node.attrib["name"] = self.name self.node.attrib["joint"] = self.joint.name def get_params(self, param_list, get_name=False): if "gear" in self.param_specs: if get_name: param_list.append("gear") else: if not self.param_inited and self.param_specs["gear"].get( "rel", False): self.param_specs["gear"]["lb"] += self.gear self.param_specs["gear"]["ub"] += self.gear self.param_specs["gear"]["lb"] = max( self.param_specs["gear"]["lb"], self.param_specs["gear"].get("min", -np.inf), ) self.param_specs["gear"]["ub"] = min( self.param_specs["gear"]["ub"], self.param_specs["gear"].get("max", np.inf), ) gear = normalize_range( self.gear, self.param_specs["gear"]["lb"], self.param_specs["gear"]["ub"], ) param_list.append(np.array([gear])) if not get_name: self.param_inited = True def set_params(self, params): if "gear" in self.param_specs: self.gear = denormalize_range( params[0].item(), self.param_specs["gear"]["lb"], self.param_specs["gear"]["ub"], ) params = params[1:] return params class Body: def __init__(self, node, parent_body, robot, cfg, new_body=False): self.node = node self.parent = parent_body self.new_body = new_body if parent_body is not None: parent_body.child.append(self) parent_body.cind += 1 self.depth = parent_body.depth + 1 else: self.depth = 0 self.robot = robot self.cfg = cfg self.tree = robot.tree self.local_coord = robot.local_coord self.name = (node.attrib["name"] if "name" in node.attrib else self.parent.name + f"_child{len(self.parent.child)}") self.child = [] self.cind = 0 self.pos = parse_vec(node.attrib["pos"]) if self.local_coord and parent_body is not None: self.pos += parent_body.pos if cfg.get("init_root_from_geom", False): self.bone_start = None if parent_body is None else self.pos.copy() else: self.bone_start = self.pos.copy() self.joints = [Joint(x, self) for x in node.findall('joint[@type="hinge"]')] + \ [Joint(x, self) for x in node.findall('joint[@type="free"]')] + \ [Joint(x, self) for x in node.findall('freejoint')] # self.geoms = [Geom(x, self) for x in node.findall('geom[@type="capsule"]')] supported_geoms = self.cfg.get("supported_geoms", ["capsule", "box"]) self.geoms = [ Geom(x, self) for geom_type in supported_geoms for x in node.findall(f'geom[@type="{geom_type}"]') ] # self.geoms = [Geom(x, self) for x in node.findall('geom[@type="capsule"]')] + [Geom(x, self) for x in node.findall('geom[@type="sphere"]')] + [Geom(x, self) for x in node.findall('geom[@type="box"]')] self.parse_param_specs() self.param_inited = False # parameters self.bone_end = None self.bone_offset = None def __repr__(self): return "body_" + self.name def parse_param_specs(self): self.param_specs = deepcopy(self.cfg.get("body_params", {})) for name, specs in self.param_specs.items(): if "lb" in specs and isinstance(specs["lb"], list): specs["lb"] = np.array(specs["lb"]) if "ub" in specs and isinstance(specs["ub"], list): specs["ub"] = np.array(specs["ub"]) if name == "bone_ang": specs["lb"] = np.deg2rad(specs["lb"]) specs["ub"] = np.deg2rad(specs["ub"]) def reindex(self): if self.parent is None: self.index = "0" else: ind = self.parent.child.index(self) + 1 pname = "" if self.parent.index == "0" else self.parent.index self.index = str(ind) + pname if self.new_body: self.name = self.index def init(self): if len(self.child) > 0: bone_ends = [x.bone_start for x in self.child] else: bone_ends = [x.end for x in self.geoms] if len(bone_ends) > 0: self.bone_end = np.mean(np.stack(bone_ends), axis=0) self.bone_offset = self.bone_end - self.bone_start def get_actuator_name(self): for joint in self.joints: if joint.actuator is not None: return joint.actuator.name def get_joint_range(self): assert len(self.joints) == 1 return self.joints[0].range def sync_node(self): pos = (self.pos - self.parent.pos if self.local_coord and self.parent is not None else self.pos) self.node.attrib["name"] = self.name self.node.attrib["pos"] = " ".join( [f"{x:.6f}".rstrip("0").rstrip(".") for x in pos]) for idx, joint in enumerate(self.joints): joint.sync_node(rename=self.new_body, index=idx) for geom in self.geoms: geom.sync_node() def sync_geom(self): for geom in self.geoms: geom.bone_start = self.bone_start.copy() # geom.end = self.bone_end.copy() # geom.update_start() def sync_joint(self): if self.parent is not None: for joint in self.joints: joint.pos = self.pos.copy() def rebuild(self): if self.parent is not None: # self.bone_start = self.parent.bone_end.copy() self.pos = self.bone_start.copy() if self.bone_offset is not None: self.bone_end = self.bone_start + self.bone_offset if self.parent is None and self.cfg.get("no_root_offset", False): self.bone_end = self.bone_start self.sync_geom() self.sync_joint() def get_params(self, param_list, get_name=False, pad_zeros=False, demap_params=False): if self.bone_offset is not None and "offset" in self.param_specs: if get_name: if self.param_specs["offset"]["type"] == "xz": param_list += ["offset_x", "offset_z"] elif self.param_specs["offset"]["type"] == "xy": param_list += ["offset_x", "offset_y"] else: param_list += ["offset_x", "offset_y", "offset_z"] else: if self.param_specs["offset"]["type"] == "xz": offset = self.bone_offset[[0, 2]] elif self.param_specs["offset"]["type"] == "xy": offset = self.bone_offset[[0, 1]] else: offset = self.bone_offset if not self.param_inited and self.param_specs["offset"].get( "rel", False): self.param_specs["offset"]["lb"] += offset self.param_specs["offset"]["ub"] += offset self.param_specs["offset"]["lb"] = np.maximum( self.param_specs["offset"]["lb"], self.param_specs["offset"].get( "min", np.full_like(offset, -np.inf)), ) self.param_specs["offset"]["ub"] = np.minimum( self.param_specs["offset"]["ub"], self.param_specs["offset"].get( "max", np.full_like(offset, np.inf)), ) offset = normalize_range( offset, self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) param_list.append(offset.flatten()) if self.bone_offset is not None and "bone_len" in self.param_specs: if get_name: param_list += ["bone_len"] else: bone_len = np.linalg.norm(self.bone_offset) if not self.param_inited and self.param_specs["bone_len"].get( "rel", False): self.param_specs["bone_len"]["lb"] += bone_len self.param_specs["bone_len"]["ub"] += bone_len self.param_specs["bone_len"]["lb"] = max( self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"].get("min", -np.inf), ) self.param_specs["bone_len"]["ub"] = min( self.param_specs["bone_len"]["ub"], self.param_specs["bone_len"].get("max", np.inf), ) bone_len = normalize_range( bone_len, self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"]["ub"], ) param_list.append(np.array([bone_len])) if self.bone_offset is not None and "bone_ang" in self.param_specs: if get_name: param_list += ["bone_ang"] else: bone_ang = math.atan2(self.bone_offset[2], self.bone_offset[0]) if not self.param_inited and self.param_specs["bone_ang"].get( "rel", False): self.param_specs["bone_ang"]["lb"] += bone_ang self.param_specs["bone_ang"]["ub"] += bone_ang self.param_specs["bone_ang"]["lb"] = max( self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"].get("min", -np.inf), ) self.param_specs["bone_ang"]["ub"] = min( self.param_specs["bone_ang"]["ub"], self.param_specs["bone_ang"].get("max", np.inf), ) bone_ang = normalize_range( bone_ang, self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) param_list.append(np.array([bone_ang])) for joint in self.joints: joint.get_params(param_list, get_name, pad_zeros) for geom in self.geoms: geom.get_params(param_list, get_name, pad_zeros) if not get_name: self.param_inited = True if demap_params and not get_name and len(param_list) > 0: params = self.robot.demap_params(np.concatenate(param_list)) return params def set_params(self, params, pad_zeros=False, map_params=False): if map_params: params = self.robot.map_params(params) if self.bone_offset is not None and "offset" in self.param_specs: if self.param_specs["offset"]["type"] in {"xz", "xy"}: offset = denormalize_range( params[:2], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 if self.param_specs["offset"]["type"] == "xz": self.bone_offset[[0, 2]] = offset elif self.param_specs["offset"]["type"] == "xy": self.bone_offset[[0, 1]] = offset params = params[2:] else: offset = denormalize_range( params[:3], self.param_specs["offset"]["lb"], self.param_specs["offset"]["ub"], ) if np.all(offset == 0.0): offset[0] += 1e-8 self.bone_offset[:] = offset params = params[3:] if self.bone_offset is not None and "bone_len" in self.param_specs: bone_len = denormalize_range( params[0].item(), self.param_specs["bone_len"]["lb"], self.param_specs["bone_len"]["ub"], ) bone_len = max(bone_len, 1e-4) params = params[1:] elif self.bone_offset is not None: bone_len = np.linalg.norm(self.bone_offset) if self.bone_offset is not None and "bone_ang" in self.param_specs: bone_ang = denormalize_range( params[0].item(), self.param_specs["bone_ang"]["lb"], self.param_specs["bone_ang"]["ub"], ) params = params[1:] elif self.bone_offset is not None: bone_ang = math.atan2(self.bone_offset[2], self.bone_offset[0]) if "bone_len" in self.param_specs or "bone_ang" in self.param_specs: self.bone_offset = np.array([ bone_len * math.cos(bone_ang), 0, bone_len * math.sin(bone_ang) ]) for joint in self.joints: params = joint.set_params(params, pad_zeros) for geom in self.geoms: params = geom.set_params(params, pad_zeros) # rebuild bone, geom, joint self.rebuild() return params class Robot: def __init__(self, cfg, data_dir="data/smpl"): self.bodies = [] self.weight = 0 self.height = 0 self.cfg = cfg self.model_dirs = [] self.param_mapping = cfg.get("param_mapping", "clip") self.smpl_model = cfg.get("model", "smpl") self.mesh = cfg.get("mesh", False) self.replace_feet = cfg.get("replace_feet", True) self.gender = cfg.get("gender", "neutral") self.flatfoot = cfg.get("flatfoot", True) self.upright_start = cfg.get("upright_start", True) if self.upright_start: print("!!!! Using modified SMPL starting pose !!!!") self.remove_toe = cfg.get("remove_toe", False) self.big_ankle = cfg.get("big_ankle", False) self.real_weight = cfg.get("real_weight", False) self.real_weight_porpotion = cfg.get("real_weight_porpotion", False) self.rel_joint_lm = cfg.get("rel_joint_lm", True) # Rolling this out worldwide!! os.makedirs("/tmp/smpl/", exist_ok=True) self.masterfoot = cfg.get("masterfoot", False) self.param_specs = self.cfg.get("body_params", {}) self.hull_dict = {} self.beta = (torch.zeros( (1, 10)).float() if self.smpl_model == "smpl" else torch.zeros( (1, 16)).float()) if self.smpl_model == "smpl": self.smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") self.smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") self.smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") elif self.smpl_model == "smplh": self.smpl_parser_n = SMPLH_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLH_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False) self.smpl_parser_f = SMPLH_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False) elif self.smpl_model == "smplx": self.smpl_parser_n = SMPLX_Parser( model_path=data_dir, gender="neutral", use_pca=False, create_transl=False, ) self.smpl_parser_m = SMPLX_Parser(model_path=data_dir, gender="male", use_pca=False, create_transl=False) self.smpl_parser_f = SMPLX_Parser(model_path=data_dir, gender="female", use_pca=False, create_transl=False) self.load_from_skeleton() atexit.register(self.remove_geoms) def remove_geoms(self): while len(self.model_dirs) > 0: geom_dir = self.model_dirs.pop(0) if osp.isdir(geom_dir): shutil.rmtree(geom_dir, ignore_errors=True) def get_joint_vertices(self, pose_aa, th_betas=None, th_trans=None, gender=[0]): if gender[0] == 0: smpl_parser = self.smpl_parser_n elif gender[0] == 1: smpl_parser = self.smpl_parser_m elif gender[0] == 2: smpl_parser = self.smpl_parser_f else: print(gender) raise Exception("Gender Not Supported!!") vertices, joints = smpl_parser.get_joints_verts(pose=pose_aa, th_betas=th_betas, th_trans=th_trans) return vertices, joints def load_from_skeleton( self, betas=None, v_template=None, gender=[0], objs_info=None, obj_pose=None, params=None, ): self.tree = None # xml tree if gender[0] == 0: self.smpl_parser = smpl_parser = self.smpl_parser_n elif gender[0] == 1: self.smpl_parser = smpl_parser = self.smpl_parser_m elif gender[0] == 2: self.smpl_parser = smpl_parser = self.smpl_parser_f else: print(gender) raise Exception("Gender Not Supported!!") if betas is None and self.beta is None: betas = (torch.zeros( (1, 10)).float() if self.smpl_model == "smpl" else torch.zeros( (1, 16)).float()) else: if params is None: self.beta = betas if not betas is None else self.beta else: # If params is not none, we need to set the beta first betas = self.map_params(betas) self.beta = torch.from_numpy( denormalize_range( betas.numpy().squeeze(), self.param_specs["beta"]["lb"], self.param_specs["beta"]["ub"], )[None, ])
if flags.debug:
7
2023-10-31 20:47:12+00:00
24k
Improbable-AI/dexenv
dexenv/envs/dclaw_multiobjs.py
[ { "identifier": "DClawBase", "path": "dexenv/envs/dclaw_base.py", "snippet": "class DClawBase(VecTask):\n\n def __init__(self, cfg, sim_device, rl_device, graphics_device_id):\n\n self.cfg = cfg\n headless = self.cfg.headless\n self.randomize = self.cfg[\"task\"][\"randomize\"]\n if self.randomize:\n logger.warning(f'Domain randomization is enabled!')\n self.randomization_params = self.cfg[\"task\"][\"randomization_params\"]\n self.aggregate_mode = self.cfg[\"env\"][\"aggregateMode\"]\n\n self.dist_reward_scale = self.cfg[\"env\"][\"rew\"][\"distRewardScale\"]\n self.rot_reward_scale = self.cfg[\"env\"][\"rew\"][\"rotRewardScale\"]\n self.success_tolerance = self.cfg[\"env\"][\"rew\"][\"successTolerance\"]\n self.reach_goal_bonus = self.cfg[\"env\"][\"rew\"][\"reachGoalBonus\"]\n self.fall_dist = self.cfg[\"env\"][\"rew\"][\"fallDistance\"]\n self.fall_penalty = self.cfg[\"env\"][\"rew\"][\"fallPenalty\"]\n self.rot_eps = self.cfg[\"env\"][\"rew\"][\"rotEps\"]\n\n self.vel_obs_scale = 0.2 # scale factor of velocity based observations\n self.force_torque_obs_scale = 10.0 # scale factor of velocity based observations\n\n self.reset_position_noise = self.cfg[\"env\"][\"resetPositionNoise\"]\n self.reset_rotation_noise = self.cfg[\"env\"][\"resetRotationNoise\"]\n self.reset_dof_pos_noise = self.cfg[\"env\"][\"resetDofPosRandomInterval\"]\n self.reset_dof_vel_noise = self.cfg[\"env\"][\"resetDofVelRandomInterval\"]\n\n self.force_scale = self.cfg[\"env\"].get(\"forceScale\", 0.0)\n self.force_prob_range = self.cfg[\"env\"].get(\"forceProbRange\", [0.001, 0.1])\n self.force_decay = self.cfg[\"env\"].get(\"forceDecay\", 0.99)\n self.force_decay_interval = self.cfg[\"env\"].get(\"forceDecayInterval\", 0.08)\n\n self.dclaw_dof_speed_scale = self.cfg[\"env\"][\"dofSpeedScale\"]\n # self.act_moving_average = self.cfg[\"env\"][\"actionsMovingAverage\"]\n\n self.debug_viz = self.cfg[\"env\"][\"enableDebugVis\"]\n\n self.max_episode_length = self.cfg[\"env\"][\"episodeLength\"]\n self.reset_time = self.cfg[\"env\"].get(\"resetTime\", -1.0)\n self.print_success_stat = self.cfg[\"env\"][\"printNumSuccesses\"]\n self.max_consecutive_successes = self.cfg[\"env\"][\"maxConsecutiveSuccesses\"]\n self.av_factor = self.cfg[\"env\"].get(\"averFactor\", 0.1)\n\n self.object_type = self.cfg[\"env\"][\"objectType\"]\n\n self.asset_files_dict = {\n \"block\": \"urdf/objects/cube_multicolor.urdf\",\n \"egg\": \"mjcf/open_ai_assets/hand/egg.xml\",\n \"airplane\": \"single_objects/airplane/model.urdf\",\n 'power_drill': 'single_objects/power_drill/model.urdf',\n 'mug': 'single_objects/mug/model.urdf',\n 'elephant': 'asymm/train/elephant/var_000/model.urdf',\n 'train': 'asymm/train/train/var_000/model.urdf',\n 'stanford_bunny': 'asymm/train/stanford_bunny/var_004/model.urdf'\n\n }\n self.objs_in_isaacgym = ['block', 'egg']\n\n if \"asset\" in self.cfg[\"env\"]:\n self.asset_files_dict[\"block\"] = self.cfg[\"env\"][\"asset\"].get(\"assetFileNameBlock\",\n self.asset_files_dict[\"block\"])\n self.asset_files_dict[\"egg\"] = self.cfg[\"env\"][\"asset\"].get(\"assetFileNameEgg\",\n self.asset_files_dict[\"egg\"])\n\n self.obs_type = self.cfg[\"env\"][\"observationType\"]\n\n if not (self.obs_type in [\"full_no_vel\", \"full\", \"full_state\"]):\n raise Exception(\n \"Unknown type of observations!\\nobservationType should be one of: [openai, full_no_vel, full, full_state]\")\n\n print(\"Obs type:\", self.obs_type)\n\n ## TODO: change value here\n self.num_obs_dict = {\n \"full_no_vel\": 42,\n \"full\": 87,\n \"full_state\": 114\n }\n\n self.up_axis = 'z'\n\n num_states = 0\n\n self.cfg[\"env\"][\"numObservations\"] = self.num_obs_dict[self.obs_type]\n self.cfg[\"env\"][\"numStates\"] = num_states\n self.cfg[\"env\"][\"numActions\"] = 12\n self.hist_buf_reset_env_ids = None\n\n super().__init__(config=self.cfg,\n sim_device=sim_device,\n rl_device=rl_device,\n graphics_device_id=graphics_device_id,\n headless=headless)\n\n self.dt = self.sim_params.dt\n control_freq_inv = self.cfg[\"env\"].get(\"controlFrequencyInv\", 1)\n if self.reset_time > 0.0:\n self.max_episode_length = int(round(self.reset_time / (control_freq_inv * self.dt)))\n print(\"Reset time: \", self.reset_time)\n print(\"New episode length: \", self.max_episode_length)\n\n if self.viewer != None:\n cam_pos = gymapi.Vec3(0.16, -0.5, 0.5)\n cam_target = gymapi.Vec3(0.0, 0.0, 0.15)\n self.gym.viewer_camera_look_at(self.viewer, None, cam_pos, cam_target)\n\n actor_root_state_tensor = self.gym.acquire_actor_root_state_tensor(self.sim)\n dof_state_tensor = self.gym.acquire_dof_state_tensor(self.sim)\n rigid_body_tensor = self.gym.acquire_rigid_body_state_tensor(self.sim)\n dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)\n\n if self.obs_type == \"full_state\":\n sensor_tensor = self.gym.acquire_force_sensor_tensor(self.sim)\n self.vec_sensor_tensor = gymtorch.wrap_tensor(sensor_tensor).view(self.num_envs, self.num_fingertips * 6)\n\n dof_force_tensor = self.gym.acquire_dof_force_tensor(self.sim)\n self.dof_force_tensor = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs,\n self.num_dclaw_dofs)\n\n self.gym.refresh_actor_root_state_tensor(self.sim)\n self.gym.refresh_dof_state_tensor(self.sim)\n if self.cfg.env.dof_torque_on:\n self.gym.refresh_dof_force_tensor(self.sim)\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n\n self.dof_state = gymtorch.wrap_tensor(dof_state_tensor)\n self.dclaw_dof_state = self.dof_state.view(self.num_envs, -1, 2)[:, :self.num_dclaw_dofs]\n self.dclaw_dof_pos = self.dclaw_dof_state[..., 0]\n self.dclaw_dof_vel = self.dclaw_dof_state[..., 1]\n if self.cfg.env.dof_torque_on:\n self.dclaw_dof_torque = gymtorch.wrap_tensor(dof_force_tensor).view(self.num_envs, -1)\n else:\n self.dclaw_dof_torque = None\n\n self.rigid_body_states = gymtorch.wrap_tensor(rigid_body_tensor).view(self.num_envs, -1, 13)\n self.num_bodies = self.rigid_body_states.shape[1]\n\n self.root_state_tensor = gymtorch.wrap_tensor(actor_root_state_tensor).view(-1, 13)\n\n if self.cfg.env.rew.pen_tb_contact:\n _net_cf = self.gym.acquire_net_contact_force_tensor(self.sim)\n self.net_contact_force = gymtorch.wrap_tensor(_net_cf).view(self.num_envs, -1, 3)\n table_handle = self.gym.find_actor_handle(self.envs[0], 'table')\n self.table_body_index = self.gym.find_actor_rigid_body_index(self.envs[0],\n table_handle,\n 'table',\n gymapi.DOMAIN_ENV)\n logger.warning(f'Table body index:{self.table_body_index}')\n self.table_contact_force = self.net_contact_force[:, self.table_body_index]\n\n self.num_dofs = self.gym.get_sim_dof_count(self.sim) // self.num_envs\n self.prev_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)\n self.cur_targets = torch.zeros((self.num_envs, self.num_dofs), dtype=torch.float, device=self.device)\n\n self.global_indices = torch.arange(self.num_envs * 3, dtype=torch.int32, device=self.device).view(self.num_envs, -1)\n\n self.reset_goal_buf = self.reset_buf.clone()\n self.successes = torch.zeros(self.num_envs, dtype=torch.float, device=self.device)\n self.consecutive_successes = torch.zeros(1, dtype=torch.float, device=self.device)\n\n self.av_factor = to_torch(self.av_factor, dtype=torch.float, device=self.device)\n\n self.total_successes = 0\n self.total_resets = 0\n\n self.force_decay = to_torch(self.force_decay, dtype=torch.float, device=self.device)\n self.force_prob_range = to_torch(self.force_prob_range, dtype=torch.float, device=self.device)\n self.random_force_prob = torch.exp((torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))\n * torch.rand(self.num_envs, device=self.device) + torch.log(\n self.force_prob_range[1]))\n\n self.rb_forces = torch.zeros((self.num_envs, self.num_bodies, 3), dtype=torch.float, device=self.device)\n\n self.num_actions = self.num_dclaw_dofs\n self.actions = self.zero_actions()\n DClawBase.compute_observations(self)\n self.num_observations = self.obs_buf.shape[-1]\n self.cfg.env.numObservations = self.num_observations\n self.create_ob_act_space()\n\n def create_sim(self):\n self.dt = self.cfg[\"sim\"][\"dt\"]\n self.up_axis_idx = self.set_sim_params_up_axis(self.sim_params, self.up_axis)\n\n self.sim = super().create_sim(self.device_id, self.graphics_device_id, self.physics_engine, self.sim_params)\n self._create_ground_plane()\n self._create_envs(self.num_envs, self.cfg[\"env\"]['envSpacing'], int(np.sqrt(self.num_envs)))\n\n if self.randomize:\n self.apply_randomizations(self.randomization_params)\n\n def _create_ground_plane(self):\n plane_params = gymapi.PlaneParams()\n plane_params.normal = gymapi.Vec3(0.0, 0.0, 1.0)\n plane_params.distance = 0.1\n self.gym.add_ground(self.sim, plane_params)\n\n def _create_envs(self, num_envs, spacing, num_per_row):\n lower = gymapi.Vec3(-spacing, -spacing, 0.0)\n upper = gymapi.Vec3(spacing, spacing, spacing)\n\n asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix()\n object_asset_file = self.asset_files_dict[self.object_type]\n\n dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root)\n table_asset = self.get_table_asset()\n table_pose = self.get_table_pose()\n\n if self.obs_type == \"full_state\":\n sensor_pose = gymapi.Transform()\n for ft_handle in self.fingertip_handles:\n self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose)\n\n if self.object_type in self.objs_in_isaacgym:\n asset_root = get_module_path('isaacgymenvs').parent.joinpath('assets').as_posix()\n else:\n asset_root = dexenv.LIB_PATH.joinpath('assets').as_posix()\n\n object_asset_options = gymapi.AssetOptions()\n if self.cfg.env.vhacd:\n object_asset_options.convex_decomposition_from_submeshes = True\n\n object_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)\n\n object_asset_options.disable_gravity = True\n goal_asset = self.gym.load_asset(self.sim, asset_root, object_asset_file, object_asset_options)\n\n dclaw_start_pose = self.get_dclaw_start_pose()\n object_start_pose = self.get_object_start_pose(dclaw_start_pose)\n\n goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose)\n\n self.dclaws = []\n self.envs = []\n\n self.object_init_state = []\n self.hand_start_states = []\n\n self.hand_indices = []\n self.fingertip_indices = []\n self.object_indices = []\n self.goal_object_indices = []\n\n self.render_camera_handles = []\n if self.cfg.rgb_render:\n render_cam_pose, render_cam_params = self.get_visual_render_camera_setup()\n\n self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in\n self.fingertips]\n print(f'Fingertip handles:{self.fingertip_handles}')\n\n dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset)\n object_rb_count = self.gym.get_asset_rigid_body_count(object_asset)\n object_rs_count = self.gym.get_asset_rigid_shape_count(object_asset)\n self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count))\n self.object_handles = []\n\n max_agg_bodies = self.num_dclaw_bodies + 2 * object_rb_count + 1\n max_agg_shapes = self.num_dclaw_shapes + 2 * object_rs_count + 1\n\n for i in range(self.num_envs):\n env_ptr = self.gym.create_env(\n self.sim, lower, upper, num_per_row\n )\n\n if self.aggregate_mode >= 1:\n self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True)\n\n self.create_hand_actor(env_ptr=env_ptr,\n dclaw_asset=dclaw_asset,\n dclaw_start_pose=dclaw_start_pose,\n dclaw_dof_props=dclaw_dof_props,\n env_id=i)\n\n object_handle = self.gym.create_actor(env_ptr, object_asset, object_start_pose, \"object\", i, 0, 1)\n self.object_handles.append(object_handle)\n self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z,\n object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z,\n object_start_pose.r.w,\n 0, 0, 0, 0, 0, 0])\n object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM)\n self.object_indices.append(object_idx)\n\n goal_handle = self.gym.create_actor(env_ptr, goal_asset, goal_start_pose, \"goal_object\", i + self.num_envs,\n 0, 2)\n goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM)\n self.goal_object_indices.append(goal_object_idx)\n\n if self.cfg.env.blockscale is not None and self.cfg.env.objectType == 'block':\n blockscale = float(self.cfg.env.blockscale)\n self.gym.set_actor_scale(env_ptr, object_handle, blockscale)\n self.gym.set_actor_scale(env_ptr, goal_handle, blockscale)\n\n if self.object_type != \"block\":\n self.gym.set_rigid_body_color(\n env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))\n self.gym.set_rigid_body_color(\n env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(0.6, 0.72, 0.98))\n table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, \"table\", i, 0)\n\n if self.cfg.rgb_render:\n render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params)\n self.render_camera_handles.append(render_camera_handle[0])\n\n if self.aggregate_mode > 0:\n self.gym.end_aggregate(env_ptr)\n\n self.envs.append(env_ptr)\n\n self.setup_torch_states()\n\n def create_camera(self, camera_poses, env_ptr, camera_params):\n cam_handles = []\n for ic in range(min(len(camera_poses), self.cfg.cam.cam_num)):\n camera_handle = self.gym.create_camera_sensor(env_ptr, camera_params)\n if isinstance(camera_poses[ic], tuple):\n self.gym.set_camera_location(camera_handle, env_ptr, camera_poses[ic][0], camera_poses[ic][1])\n else:\n self.gym.set_camera_transform(camera_handle, env_ptr, camera_poses[ic])\n cam_handles.append(camera_handle)\n return cam_handles\n\n def get_visual_render_camera_setup(self):\n cam_pos = np.array([-0.7, 0, 0.5])\n cam_focus_pt = np.array([0.08, 0, 0.15])\n cam_focus_pt = gymapi.Vec3(*cam_focus_pt)\n cam_pos = gymapi.Vec3(*cam_pos)\n camera_poses = [(cam_pos, cam_focus_pt)]\n camera_params = get_camera_params(width=self.cfg.cam.visual_render_width,\n height=self.cfg.cam.visual_render_height,\n hov=45,\n cuda=False)\n return camera_poses, camera_params\n\n def create_hand_actor(self, env_ptr, dclaw_asset, dclaw_start_pose, dclaw_dof_props, env_id):\n dclaw_actor = self.gym.create_actor(env_ptr, dclaw_asset, dclaw_start_pose, \"hand\", env_id, 0, 0)\n if self.cfg.env.dof_torque_on:\n self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor)\n self.hand_start_states.append(\n [dclaw_start_pose.p.x, dclaw_start_pose.p.y, dclaw_start_pose.p.z,\n dclaw_start_pose.r.x, dclaw_start_pose.r.y, dclaw_start_pose.r.z,\n dclaw_start_pose.r.w,\n 0, 0, 0, 0, 0, 0])\n self.gym.set_actor_dof_properties(env_ptr, dclaw_actor, dclaw_dof_props)\n hand_idx = self.gym.get_actor_index(env_ptr, dclaw_actor, gymapi.DOMAIN_SIM)\n self.hand_indices.append(hand_idx)\n\n self.gym.set_actor_dof_states(env_ptr, dclaw_actor, self.dclaw_default_dof_states, gymapi.STATE_ALL)\n if self.obs_type == \"full_state\":\n self.gym.enable_actor_dof_force_sensors(env_ptr, dclaw_actor)\n self.dclaws.append(dclaw_actor)\n self.set_hand_color(env_ptr, dclaw_actor)\n\n def set_hand_color(self, env_ptr, dclaw_actor):\n rgd_dict = self.gym.get_actor_rigid_body_dict(env_ptr, dclaw_actor)\n for bd, bd_id in rgd_dict.items():\n if bd not in dclaw_body_color_mapping:\n continue\n color = gymapi.Vec3(*dclaw_body_color_mapping[bd])\n self.gym.set_rigid_body_color(env_ptr, dclaw_actor,\n bd_id, gymapi.MESH_VISUAL,\n color)\n\n def get_table_asset(self):\n asset_options = gymapi.AssetOptions()\n asset_options.armature = 0.001\n asset_options.fix_base_link = True\n asset_options.thickness = 0.001\n asset_options.disable_gravity = True\n table_dims = gymapi.Vec3(0.6, 0.6, 0.1)\n table_asset = self.gym.create_box(self.sim,\n table_dims.x,\n table_dims.y,\n table_dims.z,\n asset_options)\n table_props = self.gym.get_asset_rigid_shape_properties(table_asset)\n for p in table_props:\n p.friction = self.cfg.env.table.friction\n p.torsion_friction = self.cfg.env.table.torsion_friction\n p.restitution = self.cfg.env.table.restitution\n p.rolling_friction = self.cfg.env.table.rolling_friction\n self.gym.set_asset_rigid_shape_properties(table_asset, table_props)\n return table_asset\n\n def get_table_pose(self):\n object_start_pose = gymapi.Transform()\n object_start_pose.p = gymapi.Vec3()\n object_start_pose.p.x = 0\n object_start_pose.p.y = 0\n object_start_pose.p.z = -0.05\n return object_start_pose\n\n def get_dclaw_start_pose(self):\n dclaw_start_pose = gymapi.Transform()\n dclaw_start_pose.p = gymapi.Vec3(*get_axis_params(0.25, self.up_axis_idx))\n dclaw_start_pose.r = gymapi.Quat.from_axis_angle(gymapi.Vec3(0, 1, 0), np.pi)\n return dclaw_start_pose\n\n def setup_torch_states(self):\n self.render_rgb_obs_buf = None\n if self.cfg.rgb_render:\n self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9),\n gymapi.Vec3(0.9, 0.9, 0.9), gymapi.Vec3(0, 0, 0))\n else:\n self.gym.set_light_parameters(self.sim, 0, gymapi.Vec3(0.9, 0.9, 0.9),\n gymapi.Vec3(0.7, 0.7, 0.7), gymapi.Vec3(0, 0, 0))\n self.object_init_state = to_torch(self.object_init_state, device=self.device, dtype=torch.float).view(\n self.num_envs, 13)\n self.goal_states = self.object_init_state.clone()\n self.goal_states[:, self.up_axis_idx] -= 0.04\n self.goal_init_state = self.goal_states.clone()\n self.hand_start_states = to_torch(self.hand_start_states, device=self.device).view(self.num_envs, 13)\n\n self.fingertip_handles = to_torch(self.fingertip_handles, dtype=torch.long, device=self.device)\n self.object_rb_handles = to_torch(self.object_rb_handles, dtype=torch.long, device=self.device)\n self.object_rb_masses = None\n self.update_obj_mass()\n self.hand_indices = to_torch(self.hand_indices, dtype=torch.long, device=self.device)\n self.object_indices = to_torch(self.object_indices, dtype=torch.long, device=self.device)\n self.goal_object_indices = to_torch(self.goal_object_indices, dtype=torch.long, device=self.device)\n\n def get_dclaw_asset(self, asset_root=None, asset_options=None):\n # load dclaw asset\n if asset_options is None:\n asset_options = gymapi.AssetOptions()\n asset_options.flip_visual_attachments = False\n asset_options.fix_base_link = True\n asset_options.collapse_fixed_joints = False\n asset_options.disable_gravity = False\n asset_options.thickness = 0.001\n asset_options.angular_damping = 0.01\n asset_options.override_inertia = True\n asset_options.override_com = True\n logger.info(f'VHACD:{self.cfg.env.vhacd}')\n if self.cfg.env.vhacd:\n asset_options.convex_decomposition_from_submeshes = True\n if self.cfg.physics_engine == \"physx\":\n # if self.physics_engine == gymapi.SIM_PHYSX:\n asset_options.use_physx_armature = True\n asset_options.default_dof_drive_mode = gymapi.DOF_MODE_POS\n\n if asset_root is None:\n asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw_4f').as_posix()\n robot_name = self.cfg.env.robot\n asset_root = pathlib_file(asset_root).parent.joinpath(f'{robot_name}').as_posix()\n dclaw_asset = self.gym.load_asset(self.sim, asset_root, f\"{robot_name}.urdf\", asset_options)\n print(f'Dclaw asset root:{asset_root} robot name:{robot_name}')\n\n self.num_dclaw_bodies = self.gym.get_asset_rigid_body_count(dclaw_asset)\n self.num_dclaw_shapes = self.gym.get_asset_rigid_shape_count(dclaw_asset)\n self.num_dclaw_dofs = self.gym.get_asset_dof_count(dclaw_asset)\n\n print(f'D-Claw:')\n print(f'\\t Number of bodies: {self.num_dclaw_bodies}')\n print(f'\\t Number of shapes: {self.num_dclaw_shapes}')\n print(f'\\t Number of dofs: {self.num_dclaw_dofs}')\n\n self.dclaw_asset_dof_dict = self.gym.get_asset_dof_dict(dclaw_asset)\n joint_names = self.dclaw_asset_dof_dict.keys()\n logger.info(f'Joint names:{joint_names}')\n\n self.dof_joint_indices = list(self.dclaw_asset_dof_dict.values())\n dinds = np.array(self.dof_joint_indices)\n assert np.all(np.diff(dinds) > 0) # check if it's in a sorted order (ascending)\n\n rb_links = self.gym.get_asset_rigid_body_names(dclaw_asset)\n self.fingertips = [x for x in rb_links if 'tip_link' in x] # [\"one_tip_link\", \"two_tip_link\", \"three_tip_link\"]\n self.num_fingertips = len(self.fingertips)\n\n print(f'Number of fingertips:{self.num_fingertips} Fingertips:{self.fingertips}')\n\n print(f'Actuator --- DoF Index')\n for act_name, act_index in zip(joint_names, self.dof_joint_indices):\n print(f'\\t {act_name} {act_index}')\n\n dclaw_dof_props = self.gym.get_asset_dof_properties(dclaw_asset)\n\n def set_dof_prop(props, prop_name, val):\n if np.isscalar(val):\n props[prop_name].fill(val)\n elif len(val) == 3:\n props[prop_name] = np.array(list(val) * int(len(props[prop_name]) / 3))\n else:\n props[prop_name] = np.array(val)\n\n if self.cfg[\"env\"][\"dof_vel_hard_limit\"] is not None:\n vel_hard_limit = self.cfg[\"env\"][\"dof_vel_hard_limit\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_dof_vel_hard_limit\"]\n print(f'Setting DOF velocity limit to:{vel_hard_limit}')\n set_dof_prop(dclaw_dof_props, 'velocity', vel_hard_limit)\n if self.cfg[\"env\"][\"effort_limit\"] is not None:\n effort_limit = self.cfg[\"env\"][\"effort_limit\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_effort_limit\"]\n print(f'Setting DOF effort limit to:{effort_limit}')\n set_dof_prop(dclaw_dof_props, 'effort', effort_limit)\n if self.cfg[\"env\"][\"stiffness\"] is not None:\n stiffness = self.cfg[\"env\"][\"stiffness\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_stiffness\"]\n print(f'Setting stiffness to:{stiffness}')\n set_dof_prop(dclaw_dof_props, 'stiffness', stiffness)\n if self.cfg[\"env\"][\"damping\"] is not None:\n damping = self.cfg[\"env\"][\"damping\"] if not self.cfg.env.soft_control else self.cfg[\"env\"][\"soft_damping\"]\n print(f'Setting damping to:{damping}')\n set_dof_prop(dclaw_dof_props, 'damping', damping)\n\n self.dclaw_dof_lower_limits = []\n self.dclaw_dof_upper_limits = []\n\n self.dclaw_default_dof_states = np.zeros(self.num_dclaw_dofs, dtype=gymapi.DofState.dtype)\n self.dclaw_default_dof_pos = self.dclaw_default_dof_states['pos']\n self.dclaw_default_dof_vel = self.dclaw_default_dof_states['vel']\n for i in range(self.num_dclaw_dofs):\n self.dclaw_dof_lower_limits.append(dclaw_dof_props['lower'][i])\n self.dclaw_dof_upper_limits.append(dclaw_dof_props['upper'][i])\n if i % 3 == 1:\n self.dclaw_default_dof_pos[i] = 0.8\n elif i % 3 == 2:\n self.dclaw_default_dof_pos[i] = -1.1\n else:\n self.dclaw_default_dof_pos[i] = 0.\n self.dclaw_default_dof_vel[i] = 0.0\n\n self.dof_joint_indices = to_torch(self.dof_joint_indices, dtype=torch.long, device=self.device)\n self.dclaw_dof_lower_limits = to_torch(self.dclaw_dof_lower_limits, device=self.device)\n self.dclaw_dof_upper_limits = to_torch(self.dclaw_dof_upper_limits, device=self.device)\n self.dclaw_default_dof_pos = to_torch(self.dclaw_default_dof_pos, device=self.device)\n self.dclaw_default_dof_vel = to_torch(self.dclaw_default_dof_vel, device=self.device)\n\n self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in\n self.fingertips]\n\n dclaw_asset_props = self.gym.get_asset_rigid_shape_properties(dclaw_asset)\n for p in dclaw_asset_props:\n p.friction = self.cfg.env.hand.friction\n p.torsion_friction = self.cfg.env.hand.torsion_friction\n p.rolling_friction = self.cfg.env.hand.rolling_friction\n p.restitution = self.cfg.env.hand.restitution\n self.gym.set_asset_rigid_shape_properties(dclaw_asset, dclaw_asset_props)\n return dclaw_asset, dclaw_dof_props\n\n def get_object_start_pose(self, dclaw_start_pose):\n object_start_pose = gymapi.Transform()\n object_start_pose.p = gymapi.Vec3()\n if self.cfg.env.obj_init_delta_pos is not None:\n delta_pos = self.cfg.env.obj_init_delta_pos\n object_start_pose.p.x = dclaw_start_pose.p.x + delta_pos[0]\n object_start_pose.p.y = dclaw_start_pose.p.y + delta_pos[1]\n object_start_pose.p.z = dclaw_start_pose.p.z + delta_pos[2]\n else:\n object_start_pose.p.x = dclaw_start_pose.p.x\n pose_dy, pose_dz = 0., -0.13\n object_start_pose.p.y = dclaw_start_pose.p.y + pose_dy\n object_start_pose.p.z = dclaw_start_pose.p.z + pose_dz\n return object_start_pose\n\n def get_goal_object_start_pose(self, object_start_pose):\n self.goal_displacement = gymapi.Vec3(0., 0, 0.25)\n self.goal_displacement_tensor = to_torch(\n [self.goal_displacement.x, self.goal_displacement.y, self.goal_displacement.z], device=self.device)\n goal_start_pose = gymapi.Transform()\n goal_start_pose.p = object_start_pose.p + self.goal_displacement\n return goal_start_pose\n\n def set_dof_props(self, props_dict):\n param_setters_map = get_property_setter_map(self.gym)\n param_getters_map = get_property_getter_map(self.gym)\n prop_name = 'dof_properties'\n setter = param_setters_map[prop_name]\n for env_id in range(len(self.envs)):\n env = self.envs[env_id]\n handle = self.gym.find_actor_handle(env, 'hand')\n prop = param_getters_map[prop_name](env, handle)\n for dof_prop_name, dof_prop_values in props_dict.items():\n if env_id == 0:\n assert len(dof_prop_values) == len(self.envs)\n prop_val = dof_prop_values[env_id]\n prop[dof_prop_name].fill(prop_val)\n success = setter(env, handle, prop)\n if not success:\n logger.warning(f'Setting dof properties is not successful!')\n\n def update_obj_mass(self, env_ids=None):\n object_rb_masses = []\n env_pool = env_ids if env_ids is not None else list(range(self.num_envs))\n if len(env_pool) < 1:\n return\n for env_id, object_handle in zip(env_pool, self.object_handles):\n env_ptr = self.envs[env_id]\n object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle)\n object_rb_masses.append([prop.mass for prop in object_rb_props])\n if self.object_rb_masses is None:\n self.object_rb_masses = to_torch(object_rb_masses, dtype=torch.float, device=self.device)\n else:\n self.object_rb_masses[env_pool] = to_torch(object_rb_masses, dtype=torch.float, device=self.device)\n\n def reset(self) -> torch.Tensor:\n \"\"\"Reset the environment.\n Returns:\n Observation dictionary\n \"\"\"\n zero_actions = self.zero_actions()\n self.reset_buf.fill_(1)\n self.reset_goal_buf.fill_(1)\n if self.cfg.env.action_ema is not None:\n self.action_ema_val = zero_actions.clone()\n # step the simulator\n\n self.step(zero_actions)\n\n return self.update_obs()\n\n def compute_reward(self, actions):\n res = compute_dclaw_reward(\n self.reset_buf, self.reset_goal_buf, self.progress_buf,\n self.successes, self.max_episode_length,\n self.object_pos, self.object_rot, self.goal_pos, self.goal_rot,\n self.cfg['env']['rew'], self.actions,\n self.fingertip_pos, self.fingertip_vel, self.object_linvel, self.object_angvel,\n self.dclaw_dof_vel, self.dclaw_dof_torque,\n table_cf=self.table_contact_force if self.cfg.env.rew.pen_tb_contact else None\n )\n self.rew_buf[:] = res[0] * self.cfg.env.rew.rew_scale\n self.done_buf[:] = res[1]\n self.reset_buf[:] = res[2]\n self.reset_goal_buf[:] = res[3]\n self.progress_buf[:] = res[4]\n self.successes[:] = res[5]\n abs_rot_dist = res[6]\n reward_terms = res[7]\n timeout_envs = res[8]\n\n self.extras['success'] = self.reset_goal_buf.detach().to(self.rl_device).flatten()\n self.extras['abs_dist'] = abs_rot_dist.detach().to(self.rl_device)\n self.extras['TimeLimit.truncated'] = timeout_envs.detach().to(self.rl_device)\n for reward_key, reward_val in reward_terms.items():\n self.extras[reward_key] = reward_val.detach()\n\n def get_images(self):\n rgb = self.render_rgb_obs_buf\n return rgb\n\n def compute_observations(self):\n self.gym.refresh_dof_state_tensor(self.sim)\n if self.cfg.env.dof_torque_on:\n self.gym.refresh_dof_force_tensor(self.sim)\n self.gym.refresh_actor_root_state_tensor(self.sim)\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n\n if self.obs_type == \"full_state\":\n self.gym.refresh_force_sensor_tensor(self.sim)\n self.gym.refresh_dof_force_tensor(self.sim)\n\n if self.cfg.env.rew.pen_tb_contact:\n self.gym.refresh_net_contact_force_tensor(self.sim)\n\n self.object_pose = self.root_state_tensor[self.object_indices, 0:7]\n self.object_pos = self.root_state_tensor[self.object_indices, 0:3]\n self.object_rot = self.root_state_tensor[self.object_indices, 3:7]\n self.object_linvel = self.root_state_tensor[self.object_indices, 7:10]\n self.object_angvel = self.root_state_tensor[self.object_indices, 10:13]\n\n self.goal_pose = self.goal_states[:, 0:7]\n self.goal_pos = self.goal_states[:, 0:3]\n self.goal_rot = self.goal_states[:, 3:7]\n\n self.fingertip_state = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:13]\n self.fingertip_pos = self.rigid_body_states[:, self.fingertip_handles][:, :, 0:3]\n self.fingertip_vel = self.rigid_body_states[:, self.fingertip_handles][:, :, 7:13]\n\n if self.obs_type == \"full_no_vel\":\n obs_buf = self.compute_full_observations(no_vel=True)\n elif self.obs_type == \"full\":\n obs_buf = self.compute_full_observations()\n elif self.obs_type == \"full_state\":\n obs_buf = self.compute_full_state()\n else:\n print(\"Unkown observations type!\")\n self.obs_buf = obs_buf\n\n if self.cfg.rgb_render:\n self.gym.fetch_results(self.sim, True)\n self.gym.step_graphics(self.sim)\n self.gym.render_all_camera_sensors(self.sim)\n self.gym.start_access_image_tensors(self.sim)\n self.render_rgb_obs_buf = self.get_numpy_rgb_images(self.render_camera_handles)\n self.gym.end_access_image_tensors(self.sim)\n\n def allocate_ob_buffers(self):\n self.obs_buf = torch.zeros(\n (self.num_envs, self.num_obs), device=self.device, dtype=torch.float)\n\n def compute_full_observations(self, no_vel=False):\n scaled_dof_pos = unscale(\n self.dclaw_dof_pos,\n self.dclaw_dof_lower_limits,\n self.dclaw_dof_upper_limits\n )\n quat_dist = quat_mul(self.object_rot, quat_conjugate(self.goal_rot))\n\n if no_vel:\n out = torch.cat(\n [\n scaled_dof_pos,\n self.object_pose,\n self.goal_rot,\n quat_dist,\n self.fingertip_pos.reshape(self.num_envs, 3 * self.num_fingertips),\n self.actions\n ],\n dim=-1\n )\n else:\n out = torch.cat(\n [\n scaled_dof_pos,\n self.vel_obs_scale * self.dclaw_dof_vel,\n self.object_pose,\n self.object_linvel,\n self.vel_obs_scale * self.object_angvel,\n self.goal_rot,\n quat_dist,\n self.fingertip_state.reshape(self.num_envs, 13 * self.num_fingertips),\n self.actions\n ],\n dim=-1\n )\n return out\n\n def compute_full_state(self):\n obs_buf = self.compute_full_observations()\n obs_no_actions = obs_buf[:, :-9]\n actions = obs_buf[:, -9:]\n out = torch.cat(\n [\n obs_no_actions,\n self.force_torque_obs_scale * self.dof_force_tensor,\n self.force_torque_obs_scale * self.vec_sensor_tensor,\n actions\n ],\n dim=-1\n )\n\n return out\n\n def update_obs(self):\n if self.randomize:\n self.obs_buf = self.dr_randomizations['observations']['noise_lambda'](self.obs_buf)\n\n self.obs_dict[\"ob\"] = torch.clamp(self.obs_buf, -self.clip_obs, self.clip_obs).to(self.rl_device)\n if self.num_states > 0:\n self.obs_dict[\"state\"] = self.get_state()\n return self.obs_dict\n\n def reset_target_pose(self, env_ids, apply_reset=False):\n new_rot = random_quaternions(num=len(env_ids), device=self.device, order='xyzw')\n\n self.goal_states[env_ids, 0:3] = self.goal_init_state[env_ids, 0:3]\n self.goal_states[env_ids, 3:7] = new_rot\n self.root_state_tensor[self.goal_object_indices[env_ids], 0:3] = self.goal_states[env_ids, 0:3] + self.goal_displacement_tensor\n self.root_state_tensor[self.goal_object_indices[env_ids], 3:7] = self.goal_states[env_ids, 3:7]\n self.root_state_tensor[self.goal_object_indices[env_ids], 7:13] = torch.zeros_like(\n self.root_state_tensor[self.goal_object_indices[env_ids], 7:13])\n\n if apply_reset:\n goal_object_indices = self.goal_object_indices[env_ids].to(torch.int32)\n self.gym.set_actor_root_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.root_state_tensor),\n gymtorch.unwrap_tensor(goal_object_indices), len(env_ids))\n self.reset_goal_buf[env_ids] = 0\n\n def reset_idx(self, env_ids, goal_env_ids):\n if self.randomize and not self.cfg.env.rand_once:\n self.apply_randomizations(self.randomization_params)\n\n rand_floats = torch_rand_float(-1.0, 1.0, (len(env_ids), self.num_dclaw_dofs * 2 + 3), device=self.device)\n\n self.reset_target_pose(env_ids)\n self.rb_forces[env_ids, :, :] = 0.0\n\n self.root_state_tensor[self.object_indices[env_ids]] = self.object_init_state[env_ids].clone()\n self.root_state_tensor[self.object_indices[env_ids], 0:3] = self.object_init_state[env_ids, 0:3] + \\\n self.reset_position_noise * rand_floats[:, 0:3]\n\n new_object_rot = random_quaternions(num=len(env_ids), device=self.device, order='xyzw')\n\n self.root_state_tensor[self.object_indices[env_ids], 3:7] = new_object_rot\n self.root_state_tensor[self.object_indices[env_ids], 7:13] = torch.zeros_like(\n self.root_state_tensor[self.object_indices[env_ids], 7:13])\n\n object_indices = torch.unique(torch.cat([self.object_indices[env_ids],\n self.goal_object_indices[env_ids],\n self.goal_object_indices[goal_env_ids]]).to(torch.int32))\n self.gym.set_actor_root_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.root_state_tensor),\n gymtorch.unwrap_tensor(object_indices), len(object_indices))\n self.random_force_prob[env_ids] = torch.exp(\n (torch.log(self.force_prob_range[0]) - torch.log(self.force_prob_range[1]))\n * torch.rand(len(env_ids), device=self.device) + torch.log(self.force_prob_range[1]))\n\n delta_max = self.dclaw_dof_upper_limits - self.dclaw_default_dof_pos\n delta_min = self.dclaw_dof_lower_limits - self.dclaw_default_dof_pos\n rand_delta = delta_min + (delta_max - delta_min) * rand_floats[:, 3:3 + self.num_dclaw_dofs]\n\n pos = self.dclaw_default_dof_pos + self.reset_dof_pos_noise * rand_delta\n self.dclaw_dof_pos[env_ids, :] = pos\n self.dclaw_dof_vel[env_ids, :] = self.dclaw_default_dof_vel + \\\n self.reset_dof_vel_noise * rand_floats[:,\n 3 + self.num_dclaw_dofs:3 + self.num_dclaw_dofs * 2]\n self.prev_targets[env_ids, :self.num_dclaw_dofs] = pos\n self.cur_targets[env_ids, :self.num_dclaw_dofs] = pos\n\n hand_indices = self.hand_indices[env_ids].to(torch.int32)\n self.gym.set_dof_position_target_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.prev_targets),\n gymtorch.unwrap_tensor(hand_indices), len(env_ids))\n self.gym.set_dof_state_tensor_indexed(self.sim,\n gymtorch.unwrap_tensor(self.dof_state),\n gymtorch.unwrap_tensor(hand_indices), len(env_ids))\n\n self.progress_buf[env_ids] = 0\n self.reset_buf[env_ids] = 0\n self.successes[env_ids] = 0\n\n def get_numpy_rgb_images(self, camera_handles):\n rgb_obs_buf = []\n for cam_handles, env in zip(camera_handles, self.envs):\n cam_ob = []\n if isinstance(cam_handles, list):\n for cam_handle in cam_handles:\n color_image = self.gym.get_camera_image(self.sim, env, cam_handle, gymapi.IMAGE_COLOR)\n color_image = color_image.reshape(color_image.shape[0], -1, 4)[..., :3]\n cam_ob.append(color_image)\n rgb_obs_buf.append(cam_ob)\n else:\n color_image = self.gym.get_camera_image(self.sim, env, cam_handles, gymapi.IMAGE_COLOR)\n color_image = color_image.reshape(color_image.shape[0], -1, 4)[..., :3]\n rgb_obs_buf.append(color_image)\n rgb_obs_buf = np.stack(rgb_obs_buf)\n return rgb_obs_buf\n\n def pre_physics_step(self, actions):\n env_ids = self.reset_buf.nonzero(as_tuple=False).squeeze(-1)\n goal_env_ids = self.reset_goal_buf.nonzero(as_tuple=False).squeeze(-1)\n\n if len(goal_env_ids) > 0 and len(env_ids) == 0:\n self.reset_target_pose(goal_env_ids, apply_reset=True)\n elif len(goal_env_ids) > 0:\n self.reset_target_pose(goal_env_ids)\n\n if len(env_ids) > 0:\n self.reset_idx(env_ids, goal_env_ids)\n\n self.actions = actions.clone().to(self.device)\n\n if self.cfg.env.action_ema is not None:\n self.action_ema_val[env_ids] = 0\n self.action_ema_val[goal_env_ids] = 0\n self.actions = self.actions * self.cfg.env.action_ema + self.action_ema_val * (1 - self.cfg.env.action_ema)\n self.action_ema_val = self.actions.clone()\n if self.cfg.env.dof_vel_pol_limit is not None:\n delta_action = self.actions * self.cfg.env.dof_vel_pol_limit * (self.dt * self.cfg.env.controlFrequencyInv)\n else:\n delta_action = self.dclaw_dof_speed_scale * self.dt * self.actions\n if self.cfg.env.relativeToPrevTarget:\n targets = self.prev_targets[:, self.dof_joint_indices] + delta_action\n else:\n targets = self.dclaw_dof_pos + delta_action\n\n self.cur_targets[:, self.dof_joint_indices] = tensor_clamp(targets,\n self.dclaw_dof_lower_limits[\n self.dof_joint_indices],\n self.dclaw_dof_upper_limits[\n self.dof_joint_indices])\n\n self.prev_targets[:, self.dof_joint_indices] = self.cur_targets[:, self.dof_joint_indices]\n self.gym.set_dof_position_target_tensor(self.sim, gymtorch.unwrap_tensor(self.cur_targets))\n\n if self.force_scale > 0.0:\n self.rb_forces *= torch.pow(self.force_decay, self.dt / self.force_decay_interval)\n # apply new forces\n force_indices = (torch.rand(self.num_envs, device=self.device) < self.random_force_prob).nonzero()\n rb_force_shape = self.rb_forces[force_indices, self.object_rb_handles, :].shape\n rb_force_dir = torch.randn(rb_force_shape, device=self.device)\n rb_force_dir = rb_force_dir / rb_force_dir.norm(dim=-1, keepdim=True)\n self.rb_forces[force_indices, self.object_rb_handles, :] = rb_force_dir * self.object_rb_masses[force_indices] * self.force_scale\n self.gym.apply_rigid_body_force_tensors(self.sim, gymtorch.unwrap_tensor(self.rb_forces), None,\n gymapi.LOCAL_SPACE)\n\n def post_physics_step(self):\n self.progress_buf += 1\n self.randomize_buf += 1\n\n self.compute_observations()\n self.compute_reward(self.actions)\n\n if self.viewer and self.debug_viz:\n # draw axes on target object\n self.gym.clear_lines(self.viewer)\n self.gym.refresh_rigid_body_state_tensor(self.sim)\n\n for i in range(self.num_envs):\n targetx = (self.goal_pos[i] + quat_apply(self.goal_rot[i],\n to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()\n targety = (self.goal_pos[i] + quat_apply(self.goal_rot[i],\n to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()\n targetz = (self.goal_pos[i] + quat_apply(self.goal_rot[i],\n to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()\n\n p0 = self.goal_pos[i].cpu().numpy() + self.goal_displacement_tensor.cpu().numpy()\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], targetx[0], targetx[1], targetx[2]], [0.85, 0.1, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], targety[0], targety[1], targety[2]], [0.1, 0.85, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], targetz[0], targetz[1], targetz[2]], [0.1, 0.1, 0.85])\n\n objectx = (self.object_pos[i] + quat_apply(self.object_rot[i],\n to_torch([1, 0, 0], device=self.device) * 0.2)).cpu().numpy()\n objecty = (self.object_pos[i] + quat_apply(self.object_rot[i],\n to_torch([0, 1, 0], device=self.device) * 0.2)).cpu().numpy()\n objectz = (self.object_pos[i] + quat_apply(self.object_rot[i],\n to_torch([0, 0, 1], device=self.device) * 0.2)).cpu().numpy()\n\n p0 = self.object_pos[i].cpu().numpy()\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], objectx[0], objectx[1], objectx[2]], [0.85, 0.1, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], objecty[0], objecty[1], objecty[2]], [0.1, 0.85, 0.1])\n self.gym.add_lines(self.viewer, self.envs[i], 1,\n [p0[0], p0[1], p0[2], objectz[0], objectz[1], objectz[2]], [0.1, 0.1, 0.85])" }, { "identifier": "chunker_list", "path": "dexenv/utils/common.py", "snippet": "def chunker_list(seq_list, nchunks):\n # split the list into n parts/chunks\n return [seq_list[i::nchunks] for i in range(nchunks)]" }, { "identifier": "get_all_files_with_name", "path": "dexenv/utils/common.py", "snippet": "def get_all_files_with_name(directory, name,\n exclude_patterns=None,\n include_patterns=None,\n sort=True,\n ):\n directory = pathlib_file(directory)\n files = directory.glob(f'**/{name}')\n files = [x for x in files if x.is_file() and x.name == name]\n if exclude_patterns is not None:\n files = filter_with_exclude_patterns(files, exclude_patterns)\n if include_patterns is not None:\n files = filter_with_include_patterns(files, include_patterns)\n if sort:\n files = sorted(files)\n return files" }, { "identifier": "load_from_pickle", "path": "dexenv/utils/common.py", "snippet": "def load_from_pickle(file_name):\n file_name = pathlib_file(file_name)\n with file_name.open('rb') as f:\n data = pkl.load(f)\n return data" }, { "identifier": "load_a_goal_object_asset", "path": "dexenv/utils/isaac_utils.py", "snippet": "@torch.no_grad()\ndef load_a_goal_object_asset(gym, sim, asset_root, object_urdf, asset_options=None, vhacd=True):\n if asset_options is None:\n asset_options = gymapi.AssetOptions()\n if vhacd:\n asset_options.convex_decomposition_from_submeshes = True\n asset_options.thickness = 0.001\n asset_options.disable_gravity = True\n asset_options.override_inertia = True\n # asset_options.override_com = True\n\n rela_file = object_urdf.relative_to(asset_root).as_posix()\n obj_asset = gym.load_asset(sim,\n asset_root.as_posix(),\n rela_file,\n asset_options)\n return obj_asset" }, { "identifier": "load_an_object_asset", "path": "dexenv/utils/isaac_utils.py", "snippet": "@torch.no_grad()\ndef load_an_object_asset(gym, sim, asset_root, object_urdf, asset_options=None, vhacd=True):\n if asset_options is None:\n asset_options = gymapi.AssetOptions()\n asset_options.thickness = 0.001\n asset_options.override_inertia = True\n # asset_options.override_com = True\n if vhacd:\n asset_options.convex_decomposition_from_submeshes = True\n rela_file = object_urdf.relative_to(asset_root).as_posix()\n obj_asset = gym.load_asset(sim,\n asset_root.as_posix(),\n rela_file,\n asset_options)\n return obj_asset" }, { "identifier": "load_obj_texture", "path": "dexenv/utils/isaac_utils.py", "snippet": "@torch.no_grad()\ndef load_obj_texture(gym, sim, object_urdf):\n texture_files = get_all_files_with_suffix(object_urdf.parent, 'png')\n num_textures = len(texture_files)\n if num_textures > 1:\n logger.warning(f'Multiple image files exist, will use the first image as the texture!')\n elif num_textures == 0:\n raise RuntimeError(f'No texture file is found!')\n texture_file = texture_files[0]\n texture_handle = gym.create_texture_from_file(sim,\n texture_file.as_posix(),\n )\n return texture_handle" } ]
import numpy as np import torch import dexenv from gym.utils import seeding from isaacgym import gymapi from loguru import logger from tqdm import tqdm from dexenv.envs.dclaw_base import DClawBase from dexenv.utils.common import chunker_list from dexenv.utils.common import get_all_files_with_name from dexenv.utils.common import load_from_pickle from dexenv.utils.isaac_utils import load_a_goal_object_asset from dexenv.utils.isaac_utils import load_an_object_asset from dexenv.utils.isaac_utils import load_obj_texture
15,505
object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}') urdf_files = get_all_files_with_name(dataset_path, name='model.urdf') permute_ids = self.np_random.permutation(np.arange(len(urdf_files))) permuted_urdfs = [urdf_files[i] for i in permute_ids] object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs]))) obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)} return permuted_urdfs, dataset_path, obj_name_to_id def get_object_category(self, urdf_path): cat = urdf_path.parents[0].name if 'var_' in cat: cat = urdf_path.parents[1].name return cat def load_object_asset(self): asset_root = dexenv.LIB_PATH.joinpath('assets') object_urdfs = self.object_urdfs object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], [] object_cat_ids = [] if self.cfg.obj.object_id is not None: urdf_to_load = self.object_urdfs[self.cfg.obj.object_id] logger.info(f'Loading a single object: {urdf_to_load}') obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) else: if self.cfg.obj.start_id is None: start = 0 end = min(len(object_urdfs), self.cfg.obj.num_objs) else: start = self.cfg.obj.start_id end = min(start + self.cfg.obj.num_objs, len(object_urdfs)) iters = range(start, end) logger.info(f'Loading object IDs from {start} to {end}.') for idx in tqdm(iters, desc='Loading Asset'): urdf_to_load = object_urdfs[idx] obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids def load_an_object(self, asset_root, object_urdf): out = [] obj_asset = load_an_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=self.cfg.env.vhacd) obj_asset = self.change_obj_asset_dyn(obj_asset)
class DclawMultiObjs(DClawBase): def __init__(self, cfg, sim_device, rl_device, graphics_device_id): self.set_random_gen() self.object_urdfs, self.dataset_path, self.obj_name_to_cat_id = self.parse_obj_dataset(cfg.obj.dataset) self.num_objects = len(self.object_urdfs) logger.info(f'Object urdf root path:{self.dataset_path}.') logger.info(f'Number of available objects:{self.num_objects}.') super().__init__(cfg=cfg, sim_device=sim_device, rl_device=rl_device, graphics_device_id=graphics_device_id) def set_random_gen(self, seed=12345): self.np_random, seed = seeding.np_random(seed) def _create_envs(self, num_envs, spacing, num_per_row): lower = gymapi.Vec3(-spacing, -spacing, 0.0) upper = gymapi.Vec3(spacing, spacing, spacing) asset_root = dexenv.LIB_PATH.joinpath('assets', 'dclaw').as_posix() dclaw_asset, dclaw_dof_props = self.get_dclaw_asset(asset_root=asset_root) # load manipulated object and goal assets table_asset = self.get_table_asset() table_pose = self.get_table_pose() object_assets, goal_assets, object_ids, object_textures, object_ptds, object_cat_ids = self.load_object_asset() # create fingertip force sensors, if needed if self.obs_type == "full_state": sensor_pose = gymapi.Transform() for ft_handle in self.fingertip_handles: self.gym.create_asset_force_sensor(dclaw_asset, ft_handle, sensor_pose) dclaw_start_pose = self.get_dclaw_start_pose() object_start_pose = self.get_object_start_pose(dclaw_start_pose) goal_start_pose = self.get_goal_object_start_pose(object_start_pose=object_start_pose) self.dclaws = [] self.envs = [] self.object_init_state = [] self.hand_start_states = [] self.hand_indices = [] self.fingertip_indices = [] self.object_indices = [] self.object_cat_indices = [] self.goal_object_indices = [] self.render_camera_handles = [] if self.cfg.rgb_render: render_cam_pose, render_cam_params = self.get_visual_render_camera_setup() self.fingertip_handles = [self.gym.find_asset_rigid_body_index(dclaw_asset, name) for name in self.fingertips] dclaw_rb_count = self.gym.get_asset_rigid_body_count(dclaw_asset) object_rb_count = self.gym.get_asset_rigid_body_count(object_assets[0]) self.object_rb_handles = list(range(dclaw_rb_count, dclaw_rb_count + object_rb_count)) self.object_handles = [] num_object_assets = len(object_assets) env_obj_ids = [] for i in range(self.num_envs): # create env instance obj_asset_id = i % num_object_assets env_obj_ids.append(object_ids[obj_asset_id]) env_ptr = self.gym.create_env( self.sim, lower, upper, num_per_row ) if self.aggregate_mode >= 1: # compute aggregate size obj_num_bodies = self.gym.get_asset_rigid_body_count(object_assets[obj_asset_id]) obj_num_shapes = self.gym.get_asset_rigid_shape_count(object_assets[obj_asset_id]) max_agg_bodies = self.num_dclaw_bodies + obj_num_bodies * 2 + 1 max_agg_shapes = self.num_dclaw_shapes + obj_num_shapes * 2 + 1 self.gym.begin_aggregate(env_ptr, max_agg_bodies, max_agg_shapes, True) self.create_hand_actor(env_ptr=env_ptr, dclaw_asset=dclaw_asset, dclaw_start_pose=dclaw_start_pose, dclaw_dof_props=dclaw_dof_props, env_id=i) # add object object_handle = self.gym.create_actor(env_ptr, object_assets[obj_asset_id], object_start_pose, "object", i, 0, 1) self.object_handles.append(object_handle) self.object_init_state.append([object_start_pose.p.x, object_start_pose.p.y, object_start_pose.p.z, object_start_pose.r.x, object_start_pose.r.y, object_start_pose.r.z, object_start_pose.r.w, 0, 0, 0, 0, 0, 0]) object_idx = self.gym.get_actor_index(env_ptr, object_handle, gymapi.DOMAIN_SIM) self.object_indices.append(object_idx) self.object_cat_indices.append(object_cat_ids[obj_asset_id]) # add goal object goal_handle = self.gym.create_actor(env_ptr, goal_assets[obj_asset_id], goal_start_pose, "goal_object", i + self.num_envs, 0, 2) goal_object_idx = self.gym.get_actor_index(env_ptr, goal_handle, gymapi.DOMAIN_SIM) self.goal_object_indices.append(goal_object_idx) if self.cfg.obj.load_texture: self.gym.set_rigid_body_texture(env_ptr, object_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) self.gym.set_rigid_body_texture(env_ptr, goal_handle, 0, gymapi.MESH_VISUAL_AND_COLLISION, object_textures[obj_asset_id] ) else: color = np.array([179, 193, 134]) / 255.0 self.gym.set_rigid_body_color( env_ptr, object_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) self.gym.set_rigid_body_color( env_ptr, goal_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(*color)) table_handle = self.gym.create_actor(env_ptr, table_asset, table_pose, "table", i, 0) self.gym.set_rigid_body_color(env_ptr, table_handle, 0, gymapi.MESH_VISUAL, gymapi.Vec3(180 / 255., 180 / 255., 180 / 255.)) if self.cfg.rgb_render: render_camera_handle = self.create_camera(render_cam_pose, env_ptr, render_cam_params) self.render_camera_handles.append(render_camera_handle[0]) if self.aggregate_mode > 0: self.gym.end_aggregate(env_ptr) self.envs.append(env_ptr) object_rb_props = self.gym.get_actor_rigid_body_properties(env_ptr, object_handle) self.object_rb_masses = [prop.mass for prop in object_rb_props] self.setup_torch_states() self.env_obj_ids = torch.LongTensor(env_obj_ids).to(self.device).view(-1, 1) self.object_cat_indices = torch.LongTensor(self.object_cat_indices).to(self.device).view(-1, 1) def parse_obj_dataset(self, dataset): asset_root = dexenv.LIB_PATH.joinpath('assets') split_dataset_name = dataset.split(':') if len(split_dataset_name) == 1: dataset_path = asset_root.joinpath(dataset, 'train') else: target_object = split_dataset_name[1] dataset_path = asset_root.joinpath(split_dataset_name[0], 'train', target_object) logger.warning(f'Dataset path:{dataset_path}') urdf_files = get_all_files_with_name(dataset_path, name='model.urdf') permute_ids = self.np_random.permutation(np.arange(len(urdf_files))) permuted_urdfs = [urdf_files[i] for i in permute_ids] object_categories = sorted(list(set([self.get_object_category(urdf) for urdf in permuted_urdfs]))) obj_name_to_id = {name: idx for idx, name in enumerate(object_categories)} return permuted_urdfs, dataset_path, obj_name_to_id def get_object_category(self, urdf_path): cat = urdf_path.parents[0].name if 'var_' in cat: cat = urdf_path.parents[1].name return cat def load_object_asset(self): asset_root = dexenv.LIB_PATH.joinpath('assets') object_urdfs = self.object_urdfs object_assets, goal_assets, object_ids, object_tex_handles, object_ptds = [], [], [], [], [] object_cat_ids = [] if self.cfg.obj.object_id is not None: urdf_to_load = self.object_urdfs[self.cfg.obj.object_id] logger.info(f'Loading a single object: {urdf_to_load}') obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) else: if self.cfg.obj.start_id is None: start = 0 end = min(len(object_urdfs), self.cfg.obj.num_objs) else: start = self.cfg.obj.start_id end = min(start + self.cfg.obj.num_objs, len(object_urdfs)) iters = range(start, end) logger.info(f'Loading object IDs from {start} to {end}.') for idx in tqdm(iters, desc='Loading Asset'): urdf_to_load = object_urdfs[idx] obj_asset, goal_asset, texture_handle, ptd = self.load_an_object(asset_root, urdf_to_load) object_assets.append(obj_asset) goal_assets.append(goal_asset) object_ids.append(self.object_urdfs.index(urdf_to_load)) object_tex_handles.append(texture_handle) object_ptds.append(ptd) object_cat_ids.append(self.obj_name_to_cat_id[self.get_object_category(urdf_to_load)]) return object_assets, goal_assets, object_ids, object_tex_handles, object_ptds, object_cat_ids def load_an_object(self, asset_root, object_urdf): out = [] obj_asset = load_an_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=self.cfg.env.vhacd) obj_asset = self.change_obj_asset_dyn(obj_asset)
goal_obj_asset = load_a_goal_object_asset(self.gym, self.sim, asset_root, object_urdf, vhacd=False)
4
2023-10-25 17:22:41+00:00
24k
ai-safety-foundation/sparse_autoencoder
sparse_autoencoder/train/pipeline.py
[ { "identifier": "ActivationResampler", "path": "sparse_autoencoder/activation_resampler/activation_resampler.py", "snippet": "class ActivationResampler:\n \"\"\"Activation resampler.\n\n Collates the number of times each neuron fires over a set number of learned activation vectors,\n and then provides the parameters necessary to reset any dead neurons.\n\n Motivation:\n Over the course of training, a subset of autoencoder neurons will have zero activity across\n a large number of datapoints. The authors of *Towards Monosemanticity: Decomposing Language\n Models With Dictionary Learning* found that “resampling” these dead neurons during training\n improves the number of likely-interpretable features (i.e., those in the high density\n cluster) and reduces total loss. This resampling may be compatible with the Lottery Ticket\n Hypothesis and increase the number of chances the network has to find promising feature\n directions.\n\n An interesting nuance around dead neurons involves the ultralow density cluster. They found\n that if we increase the number of training steps then networks will kill off more of these\n ultralow density neurons. This reinforces the use of the high density cluster as a useful\n metric because there can exist neurons that are de facto dead but will not appear to be when\n looking at the number of dead neurons alone.\n\n This approach is designed to seed new features to fit inputs where the current autoencoder\n performs worst. Resetting the encoder norm and bias are crucial to ensuring this resampled\n neuron will only fire weakly for inputs similar to the one used for its reinitialization.\n This was done to minimize interference with the rest of the network.\n\n Warning:\n The optimizer should be reset after applying this function, as the Adam state will be\n incorrect for the modified weights and biases.\n\n Warning:\n This approach is also known to create sudden loss spikes, and resampling too frequently\n causes training to diverge.\n \"\"\"\n\n _activations_seen_since_last_resample: int = 0\n \"\"\"Number of activations since we last resampled.\"\"\"\n\n _collated_neuron_activity: Float[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)]\n \"\"\"Collated neuron activity, over the current data collection window.\"\"\"\n\n _threshold_is_dead_portion_fires: float\n \"\"\"Threshold for determining if a neuron has fired (or is dead).\"\"\"\n\n _max_n_resamples: int\n \"\"\"Maximum number of times that resampling should be performed.\"\"\"\n\n _n_activations_collated_since_last_resample: int = 0\n \"\"\"Number of activations collated since we last resampled.\n\n Number of vectors used to collate neuron activity, over the current collation window.\n \"\"\"\n\n _n_components: int\n \"\"\"Number of components.\"\"\"\n\n _n_times_resampled: int = 0\n \"\"\"Number of times that resampling has been performed.\"\"\"\n\n neuron_activity_window_end: int\n \"\"\"End of the window for collecting neuron activity.\"\"\"\n\n neuron_activity_window_start: int\n \"\"\"Start of the window for collecting neuron activity.\"\"\"\n\n @validate_call\n def __init__(\n self,\n n_learned_features: PositiveInt,\n n_components: NonNegativeInt = 1,\n resample_interval: PositiveInt = 200_000_000,\n max_n_resamples: NonNegativeInt = 4,\n n_activations_activity_collate: PositiveInt = 100_000_000,\n resample_dataset_size: PositiveInt = 819_200,\n threshold_is_dead_portion_fires: Annotated[float, Field(strict=True, ge=0, le=1)] = 0.0,\n ) -> None:\n r\"\"\"Initialize the activation resampler.\n\n Defaults to values used in the Anthropic Towards Monosemanticity paper.\n\n Args:\n n_learned_features: Number of learned features\n n_components: Number of components that the SAE is being trained on.\n resample_interval: Interval in number of autoencoder input activation vectors trained\n on, before resampling.\n max_n_resamples: Maximum number of resamples to perform throughout the entire pipeline.\n Set to inf if you want to have no limit.\n n_activations_activity_collate: Number of autoencoder learned activation vectors to\n collate before resampling (the activation resampler will start collecting on vector\n $\\text{resample_interval} - \\text{n_steps_collate}$).\n resample_dataset_size: Number of autoencoder input activations to use for calculating\n the loss, as part of the resampling process to create the reset neuron weights.\n threshold_is_dead_portion_fires: Threshold for determining if a neuron is dead (has\n \"fired\" in less than this portion of the collated sample).\n\n Raises:\n ValueError: If any of the arguments are invalid (e.g. negative integers).\n \"\"\"\n if n_activations_activity_collate > resample_interval:\n error_message = (\n \"Number of steps to collate must be less than or equal to the resample interval.\"\n )\n raise ValueError(error_message)\n\n super().__init__()\n self.neuron_activity_window_end = resample_interval\n self.neuron_activity_window_start = resample_interval - n_activations_activity_collate\n self._max_n_resamples = max_n_resamples\n self._collated_neuron_activity = torch.zeros(\n (n_components, n_learned_features), dtype=torch.int64\n )\n self._resample_dataset_size = resample_dataset_size\n self._threshold_is_dead_portion_fires = threshold_is_dead_portion_fires\n self._n_components = n_components\n\n def _get_dead_neuron_indices(\n self,\n ) -> list[Int64[Tensor, Axis.names(Axis.LEARNT_FEATURE_IDX)]]:\n \"\"\"Identify the indices of neurons that are dead.\n\n Identifies any neurons that have fired less than the threshold portion of the collated\n sample size.\n\n Example:\n >>> resampler = ActivationResampler(n_learned_features=6, n_components=2)\n >>> resampler._collated_neuron_activity = torch.tensor(\n ... [[1, 1, 0, 0, 1, 1], [1, 1, 1, 1, 1, 0]]\n ... )\n >>> resampler._get_dead_neuron_indices()\n [tensor([2, 3]), tensor([5])]\n\n Returns:\n List of dead neuron indices for each component.\n\n Raises:\n ValueError: If no neuron activity has been collated yet.\n \"\"\"\n # Check we have already collated some neuron activity\n if torch.all(self._collated_neuron_activity == 0):\n error_message = \"Cannot get dead neuron indices without neuron activity.\"\n raise ValueError(error_message)\n\n # Find any neurons that fire less than the threshold portion of times\n threshold_is_dead_n_fires: int = int(\n self._n_activations_collated_since_last_resample * self._threshold_is_dead_portion_fires\n )\n\n return [\n torch.where(self._collated_neuron_activity[component_idx] <= threshold_is_dead_n_fires)[\n 0\n ].to(dtype=torch.int64)\n for component_idx in range(self._n_components)\n ]\n\n def compute_loss_and_get_activations(\n self,\n store: ActivationStore,\n autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine,\n loss_fn: AbstractLoss,\n train_batch_size: int,\n ) -> LossInputActivationsTuple:\n \"\"\"Compute the loss on a random subset of inputs.\n\n Motivation:\n Helps find input vectors that have high SAE loss, so that we can resample dead neurons\n in a way that improves performance on these specific input vectors.\n\n Args:\n store: Activation store.\n autoencoder: Sparse autoencoder model.\n loss_fn: Loss function.\n train_batch_size: Train batch size (also used for resampling).\n\n Returns:\n A tuple of loss per item, and all input activations.\n\n Raises:\n ValueError: If the number of items in the store is less than the number of inputs\n \"\"\"\n with torch.no_grad():\n loss_batches: list[Float[Tensor, Axis.BATCH]] = []\n input_activations_batches: list[\n Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)]\n ] = []\n dataloader = DataLoader(store, batch_size=train_batch_size)\n n_inputs = self._resample_dataset_size\n n_batches_required: int = n_inputs // train_batch_size\n model_device: torch.device = get_model_device(autoencoder)\n\n for batch_idx, batch in enumerate(iter(dataloader)):\n input_activations_batches.append(batch)\n source_activations = batch.to(model_device)\n learned_activations, reconstructed_activations = autoencoder(source_activations)\n loss_batches.append(\n loss_fn.forward(\n source_activations, learned_activations, reconstructed_activations\n )\n )\n if batch_idx >= n_batches_required:\n break\n\n loss_per_item = torch.cat(loss_batches).to(model_device)\n input_activations = torch.cat(input_activations_batches).to(model_device)\n\n # Check we generated enough data\n if len(loss_per_item) < n_inputs:\n error_message = (\n f\"Cannot get {n_inputs} items from the store, \"\n f\"as only {len(loss_per_item)} were available.\"\n )\n raise ValueError(error_message)\n\n return LossInputActivationsTuple(loss_per_item, input_activations)\n\n @staticmethod\n def assign_sampling_probabilities(\n loss: Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)],\n ) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)]:\n \"\"\"Assign the sampling probabilities for each input activations vector.\n\n Assign each input vector a probability of being picked that is proportional to the square of\n the autoencoder's loss on that input.\n\n Examples:\n >>> loss = torch.tensor([1.0, 2.0, 3.0])\n >>> ActivationResampler.assign_sampling_probabilities(loss).round(decimals=2)\n tensor([0.0700, 0.2900, 0.6400])\n\n >>> loss = torch.tensor([[1.0, 2], [2, 4], [3, 6]])\n >>> ActivationResampler.assign_sampling_probabilities(loss).round(decimals=2)\n tensor([[0.0700, 0.0700],\n [0.2900, 0.2900],\n [0.6400, 0.6400]])\n\n Args:\n loss: Loss per item.\n\n Returns:\n A tensor of probabilities for each item.\n \"\"\"\n square_loss = loss.pow(2)\n return square_loss / square_loss.sum(0)\n\n @staticmethod\n def sample_input(\n probabilities: Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)],\n input_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n n_samples: list[int],\n ) -> list[Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]]:\n \"\"\"Sample an input vector based on the provided probabilities.\n\n Example:\n >>> probabilities = torch.tensor([[0.1], [0.2], [0.7]])\n >>> input_activations = torch.tensor([[[1.0, 2.0]], [[3.0, 4.0]], [[5.0, 6.0]]])\n >>> _seed = torch.manual_seed(0) # For reproducibility in example\n >>> sampled_input = ActivationResampler.sample_input(\n ... probabilities, input_activations, [2]\n ... )\n >>> sampled_input[0].tolist()\n [[5.0, 6.0], [3.0, 4.0]]\n\n Args:\n probabilities: Probabilities for each input.\n input_activations: Input activation vectors.\n n_samples: Number of samples to take (number of dead neurons).\n\n Returns:\n Sampled input activation vector.\n\n Raises:\n ValueError: If the number of samples is greater than the number of input activations.\n \"\"\"\n sampled_inputs: list[\n Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]\n ] = []\n\n for component_idx, component_n_samples in enumerate(n_samples):\n component_probabilities: Float[Tensor, Axis.BATCH] = get_component_slice_tensor(\n input_tensor=probabilities,\n n_dim_with_component=2,\n component_dim=1,\n component_idx=component_idx,\n )\n\n component_input_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)\n ] = get_component_slice_tensor(\n input_tensor=input_activations,\n n_dim_with_component=3,\n component_dim=1,\n component_idx=component_idx,\n )\n\n if component_n_samples > len(component_input_activations):\n exception_message = (\n f\"Cannot sample {component_n_samples} inputs from \"\n f\"{len(component_input_activations)} input activations.\"\n )\n raise ValueError(exception_message)\n\n # Handle the 0 dead neurons case\n if component_n_samples == 0:\n sampled_inputs.append(\n torch.empty(\n (0, component_input_activations.shape[-1]),\n dtype=component_input_activations.dtype,\n device=component_input_activations.device,\n )\n )\n continue\n\n # Handle the 1+ dead neuron case\n component_sample_indices: Int64[Tensor, Axis.LEARNT_FEATURE_IDX] = torch.multinomial(\n component_probabilities, num_samples=component_n_samples\n )\n sampled_inputs.append(component_input_activations[component_sample_indices, :])\n\n return sampled_inputs\n\n @staticmethod\n def renormalize_and_scale(\n sampled_input: Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)],\n neuron_activity: Int64[Tensor, Axis.names(Axis.LEARNT_FEATURE)],\n encoder_weight: Float[Tensor, Axis.names(Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE)],\n ) -> Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]:\n \"\"\"Renormalize and scale the resampled dictionary vectors.\n\n Renormalize the input vector to equal the average norm of the encoder weights for alive\n neurons times 0.2.\n\n Example:\n >>> from torch.nn import Parameter\n >>> _seed = torch.manual_seed(0) # For reproducibility in example\n >>> sampled_input = torch.tensor([[3.0, 4.0]])\n >>> neuron_activity = torch.tensor([3, 0, 5, 0, 1, 3])\n >>> encoder_weight = Parameter(torch.ones((6, 2)))\n >>> rescaled_input = ActivationResampler.renormalize_and_scale(\n ... sampled_input,\n ... neuron_activity,\n ... encoder_weight\n ... )\n >>> rescaled_input.round(decimals=1)\n tensor([[0.2000, 0.2000]])\n\n Args:\n sampled_input: Tensor of the sampled input activation.\n neuron_activity: Tensor representing the number of times each neuron fired.\n encoder_weight: Tensor of encoder weights.\n\n Returns:\n Rescaled sampled input.\n\n Raises:\n ValueError: If there are no alive neurons.\n \"\"\"\n alive_neuron_mask: Bool[Tensor, \" learned_features\"] = neuron_activity > 0\n\n # Check there is at least one alive neuron\n if not torch.any(alive_neuron_mask):\n error_message = \"No alive neurons found.\"\n raise ValueError(error_message)\n\n # Handle no dead neurons\n n_dead_neurons = len(sampled_input)\n if n_dead_neurons == 0:\n return torch.empty(\n (0, sampled_input.shape[-1]), dtype=sampled_input.dtype, device=sampled_input.device\n )\n\n # Calculate the average norm of the encoder weights for alive neurons.\n detached_encoder_weight = encoder_weight.detach() # Don't track gradients\n alive_encoder_weights: Float[\n Tensor, Axis.names(Axis.ALIVE_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = detached_encoder_weight[alive_neuron_mask, :]\n average_alive_norm: Float[Tensor, Axis.SINGLE_ITEM] = alive_encoder_weights.norm(\n dim=-1\n ).mean()\n\n # Renormalize the input vector to equal the average norm of the encoder weights for alive\n # neurons times 0.2.\n renormalized_input: Float[\n Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = torch.nn.functional.normalize(sampled_input, dim=-1)\n return renormalized_input * (average_alive_norm * 0.2)\n\n def resample_dead_neurons(\n self,\n activation_store: ActivationStore,\n autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine,\n loss_fn: AbstractLoss,\n train_batch_size: int,\n ) -> list[ParameterUpdateResults]:\n \"\"\"Resample dead neurons.\n\n Args:\n activation_store: Activation store.\n autoencoder: Sparse autoencoder model.\n loss_fn: Loss function.\n train_batch_size: Train batch size (also used for resampling).\n\n Returns:\n For each component that the SAE is being trained on, the indices of dead neurons and the\n updates for the encoder and decoder weights and biases.\n \"\"\"\n parameter_update_results: list[ParameterUpdateResults] = []\n\n with torch.no_grad():\n dead_neuron_indices: list[\n Int64[Tensor, Axis.names(Axis.LEARNT_FEATURE_IDX)]\n ] = self._get_dead_neuron_indices()\n\n # Compute the loss for the current model on a random subset of inputs and get the\n # activations.\n loss_per_item, input_activations = self.compute_loss_and_get_activations(\n store=activation_store,\n autoencoder=autoencoder,\n loss_fn=loss_fn,\n train_batch_size=train_batch_size,\n )\n\n # Assign each input vector a probability of being picked that is proportional to the\n # square of the autoencoder's loss on that input.\n sample_probabilities: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)\n ] = self.assign_sampling_probabilities(loss_per_item)\n\n # For each dead neuron sample an input according to these probabilities.\n sampled_input: list[\n Float[Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)]\n ] = self.sample_input(\n sample_probabilities, input_activations, [len(dead) for dead in dead_neuron_indices]\n )\n\n for component_idx in range(self._n_components):\n # Renormalize each input vector to have unit L2 norm and set this to be the\n # dictionary vector for the dead autoencoder neuron.\n renormalized_input: Float[\n Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = torch.nn.functional.normalize(sampled_input[component_idx], dim=-1)\n\n dead_decoder_weight_updates = rearrange(\n renormalized_input, \"dead_neuron input_feature -> input_feature dead_neuron\"\n )\n\n # For the corresponding encoder vector, renormalize the input vector to equal the\n # average norm of the encoder weights for alive neurons times 0.2. Set the\n # corresponding encoder bias element to zero.\n encoder_weight: Float[\n Tensor, Axis.names(Axis.LEARNT_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ] = get_component_slice_tensor(autoencoder.encoder.weight, 3, 0, component_idx)\n\n rescaled_sampled_input = self.renormalize_and_scale(\n sampled_input=sampled_input[component_idx],\n neuron_activity=self._collated_neuron_activity[component_idx],\n encoder_weight=encoder_weight,\n )\n\n dead_encoder_bias_updates = torch.zeros_like(\n dead_neuron_indices[component_idx],\n dtype=dead_decoder_weight_updates.dtype,\n device=dead_decoder_weight_updates.device,\n )\n\n parameter_update_results.append(\n ParameterUpdateResults(\n dead_neuron_indices=dead_neuron_indices[component_idx],\n dead_encoder_weight_updates=rescaled_sampled_input,\n dead_encoder_bias_updates=dead_encoder_bias_updates,\n dead_decoder_weight_updates=dead_decoder_weight_updates,\n )\n )\n\n return parameter_update_results\n\n def step_resampler(\n self,\n batch_neuron_activity: Int64[Tensor, Axis.names(Axis.COMPONENT, Axis.LEARNT_FEATURE)],\n activation_store: ActivationStore,\n autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine,\n loss_fn: AbstractLoss,\n train_batch_size: int,\n ) -> list[ParameterUpdateResults] | None:\n \"\"\"Step the resampler, collating neuron activity and resampling if necessary.\n\n Args:\n batch_neuron_activity: Number of times each neuron fired in the current batch.\n activation_store: Activation store.\n autoencoder: Sparse autoencoder model.\n loss_fn: Loss function.\n train_batch_size: Train batch size (also used for resampling).\n\n Returns:\n Parameter update results (for each component that the SAE is being trained on) if\n resampling is due. Otherwise None.\n \"\"\"\n # Update the counter\n self._activations_seen_since_last_resample += len(activation_store)\n\n if self._n_times_resampled < self._max_n_resamples:\n # Collate neuron activity, if in the data collection window. For example in the\n # Anthropic Towards Monosemanticity paper, the window started collecting at 100m\n # activations and stopped at 200m (and then repeated this again a few times until the\n # max times to resample was hit).\n if self._activations_seen_since_last_resample >= self.neuron_activity_window_start:\n detached_neuron_activity = batch_neuron_activity.detach().cpu()\n self._collated_neuron_activity.add_(detached_neuron_activity)\n self._n_activations_collated_since_last_resample += train_batch_size\n\n # Check if we should resample.\n if self._activations_seen_since_last_resample >= self.neuron_activity_window_end:\n # Get resampled dictionary vectors\n resample_res = self.resample_dead_neurons(\n activation_store=activation_store,\n autoencoder=autoencoder,\n loss_fn=loss_fn,\n train_batch_size=train_batch_size,\n )\n\n # Update counters\n self._activations_seen_since_last_resample = 0\n self._n_activations_collated_since_last_resample = 0\n self._n_times_resampled += 1\n\n # Reset the collated neuron activity\n self._collated_neuron_activity.zero_()\n\n return resample_res\n\n return None\n\n def __str__(self) -> str:\n \"\"\"Return a string representation of the activation resampler.\"\"\"\n return (\n f\"ActivationResampler(\"\n f\"n_components={self._n_components}, \"\n f\"neuron_activity_window_start={self.neuron_activity_window_end}, \"\n f\"neuron_activity_window_end={self.neuron_activity_window_end}, \"\n f\"max_resamples={self._max_n_resamples}, \"\n f\"resample_dataset_size={self._resample_dataset_size}, \"\n f\"dead_neuron_threshold={self._threshold_is_dead_portion_fires})\"\n )" }, { "identifier": "ParameterUpdateResults", "path": "sparse_autoencoder/activation_resampler/activation_resampler.py", "snippet": "class ParameterUpdateResults:\n \"\"\"Parameter update results from resampling dead neurons.\"\"\"\n\n dead_neuron_indices: Int64[Tensor, Axis.LEARNT_FEATURE_IDX]\n \"\"\"Dead neuron indices.\"\"\"\n\n dead_encoder_weight_updates: Float[\n Tensor, Axis.names(Axis.DEAD_FEATURE, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Dead encoder weight updates.\"\"\"\n\n dead_encoder_bias_updates: Float[Tensor, Axis.DEAD_FEATURE]\n \"\"\"Dead encoder bias updates.\"\"\"\n\n dead_decoder_weight_updates: Float[\n Tensor, Axis.names(Axis.INPUT_OUTPUT_FEATURE, Axis.DEAD_FEATURE)\n ]\n \"\"\"Dead decoder weight updates.\"\"\"" }, { "identifier": "TensorActivationStore", "path": "sparse_autoencoder/activation_store/tensor_store.py", "snippet": "class TensorActivationStore(ActivationStore):\n \"\"\"Tensor Activation Store.\n\n Stores tensors in a (large) tensor of shape (item, neuron). Requires the number of activation\n vectors to be stored to be known in advance. Multiprocess safe.\n\n Extends the `torch.utils.data.Dataset` class to provide a list-based activation store, with\n additional :meth:`append` and :meth:`extend` methods (the latter of which is non-blocking).\n\n Examples:\n Create an empty activation dataset:\n\n >>> import torch\n >>> store = TensorActivationStore(max_items=1000, n_neurons=100, n_components=2)\n\n Add a single activation vector to the dataset (for a component):\n\n >>> store.append(torch.randn(100), component_idx=0)\n >>> store.append(torch.randn(100), component_idx=1)\n >>> len(store)\n 1\n\n Add a [batch, neurons] activation tensor to the dataset:\n\n >>> store.empty()\n >>> batch = torch.randn(10, 100)\n >>> store.extend(batch, component_idx=0)\n >>> store.extend(batch, component_idx=1)\n >>> len(store)\n 10\n\n Shuffle the dataset **before passing it to the DataLoader**:\n\n >>> store.shuffle() # Faster than using the DataLoader shuffle argument\n\n Use the dataloader to iterate over the dataset:\n\n >>> loader = torch.utils.data.DataLoader(store, shuffle=False, batch_size=2)\n >>> next_item = next(iter(loader))\n >>> next_item.shape\n torch.Size([2, 2, 100])\n \"\"\"\n\n _data: Float[Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT, Axis.INPUT_OUTPUT_FEATURE)]\n \"\"\"Underlying Tensor Data Store.\"\"\"\n\n _items_stored: list[int]\n \"\"\"Number of items stored.\"\"\"\n\n max_items: int\n \"\"\"Maximum Number of Items to Store.\"\"\"\n\n _n_components: int\n \"\"\"Number of components\"\"\"\n\n @property\n def n_components(self) -> int:\n \"\"\"Number of components.\"\"\"\n return self._n_components\n\n @property\n def current_activations_stored_per_component(self) -> list[int]:\n \"\"\"Number of activations stored per component.\"\"\"\n return self._items_stored\n\n @validate_call(config={\"arbitrary_types_allowed\": True})\n def __init__(\n self,\n max_items: PositiveInt,\n n_neurons: PositiveInt,\n n_components: PositiveInt,\n device: torch.device | None = None,\n ) -> None:\n \"\"\"Initialise the Tensor Activation Store.\n\n Args:\n max_items: Maximum number of items to store per component (individual activation\n vectors).\n n_neurons: Number of neurons in each activation vector.\n n_components: Number of components to store (i.e. number of source models).\n device: Device to store the activation vectors on.\n \"\"\"\n self._n_components = n_components\n self._items_stored = [0] * n_components\n self._max_items = max_items\n self._data = torch.empty((max_items, n_components, n_neurons), device=device)\n\n def __len__(self) -> int:\n \"\"\"Length Dunder Method.\n\n Returns the number of activation vectors per component in the dataset.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10_000_000, n_neurons=100, n_components=1)\n >>> store.append(torch.randn(100), component_idx=0)\n >>> store.append(torch.randn(100), component_idx=0)\n >>> len(store)\n 2\n\n Returns:\n The number of activation vectors in the dataset.\n \"\"\"\n # Min as this is the amount of activations that can be fetched by get_item\n return min(self.current_activations_stored_per_component)\n\n def __sizeof__(self) -> int:\n \"\"\"Sizeof Dunder Method.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=2, n_neurons=100, n_components=1)\n >>> store.__sizeof__() # Pre-allocated tensor of 2x100\n 800\n\n Returns:\n The size of the underlying tensor in bytes.\n \"\"\"\n return self._data.element_size() * self._data.nelement()\n\n def __getitem__(\n self, index: tuple[int, ...] | slice | int\n ) -> Float[Tensor, Axis.names(Axis.ANY)]:\n \"\"\"Get Item Dunder Method.\n\n Examples:\n >>> import torch\n >>> store = TensorActivationStore(max_items=2, n_neurons=5, n_components=1)\n >>> store.append(torch.zeros(5), component_idx=0)\n >>> store.append(torch.ones(5), component_idx=0)\n >>> store[1, 0]\n tensor([1., 1., 1., 1., 1.])\n\n Args:\n index: The index of the tensor to fetch.\n\n Returns:\n The activation store item at the given index.\n \"\"\"\n return self._data[index]\n\n def shuffle(self) -> None:\n \"\"\"Shuffle the Data In-Place.\n\n This is much faster than using the shuffle argument on `torch.utils.data.DataLoader`.\n\n Example:\n >>> import torch\n >>> _seed = torch.manual_seed(42)\n >>> store = TensorActivationStore(max_items=10, n_neurons=1, n_components=1)\n >>> store.append(torch.tensor([0.]), component_idx=0)\n >>> store.append(torch.tensor([1.]), component_idx=0)\n >>> store.append(torch.tensor([2.]), component_idx=0)\n >>> store.shuffle()\n >>> [store[i, 0].item() for i in range(3)]\n [0.0, 2.0, 1.0]\n \"\"\"\n # Generate a permutation of the indices for the active data\n perm = torch.randperm(len(self))\n\n # Use this permutation to shuffle the active data in-place\n self._data[: len(self)] = self._data[perm]\n\n def append(self, item: Float[Tensor, Axis.INPUT_OUTPUT_FEATURE], component_idx: int) -> None:\n \"\"\"Add a single item to the store.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10, n_neurons=5, n_components=1)\n >>> store.append(torch.zeros(5), component_idx=0)\n >>> store.append(torch.ones(5), component_idx=0)\n >>> store[1, 0]\n tensor([1., 1., 1., 1., 1.])\n\n Args:\n item: The item to append to the dataset.\n component_idx: The component index to append the item to.\n\n Raises:\n IndexError: If there is no space remaining.\n \"\"\"\n # Check we have space\n if self._items_stored[component_idx] + 1 > self._max_items:\n raise StoreFullError\n\n self._data[self._items_stored[component_idx], component_idx] = item.to(\n self._data.device,\n )\n self._items_stored[component_idx] += 1\n\n def extend(\n self,\n batch: Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)],\n component_idx: int,\n ) -> None:\n \"\"\"Add a batch to the store.\n\n Examples:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10, n_neurons=5, n_components=1)\n >>> store.extend(torch.zeros(2, 5), component_idx=0)\n >>> len(store)\n 2\n\n Args:\n batch: The batch to append to the dataset.\n component_idx: The component index to append the batch to.\n\n Raises:\n IndexError: If there is no space remaining.\n \"\"\"\n # Check we have space\n n_activation_tensors: int = batch.shape[0]\n if self._items_stored[component_idx] + n_activation_tensors > self._max_items:\n raise StoreFullError\n\n self._data[\n self._items_stored[component_idx] : self._items_stored[component_idx]\n + n_activation_tensors,\n component_idx,\n ] = batch.to(self._data.device)\n self._items_stored[component_idx] += n_activation_tensors\n\n def empty(self) -> None:\n \"\"\"Empty the store.\n\n Example:\n >>> import torch\n >>> store = TensorActivationStore(max_items=10, n_neurons=5, n_components=1)\n >>> store.extend(torch.zeros(2, 5), component_idx=0)\n >>> len(store)\n 2\n >>> store.empty()\n >>> len(store)\n 0\n \"\"\"\n # We don't need to zero the data, just reset the number of items stored\n self._items_stored = [0 for _ in self._items_stored]" }, { "identifier": "SparseAutoencoder", "path": "sparse_autoencoder/autoencoder/model.py", "snippet": "class SparseAutoencoder(Module):\n \"\"\"Sparse Autoencoder Model.\"\"\"\n\n config: SparseAutoencoderConfig\n \"\"\"Model config.\"\"\"\n\n geometric_median_dataset: Float[\n Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Estimated Geometric Median of the Dataset.\n\n Used for initialising :attr:`tied_bias`.\n \"\"\"\n\n tied_bias: Float[\n Parameter, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Tied Bias Parameter.\n\n The same bias is used pre-encoder and post-decoder.\n \"\"\"\n\n pre_encoder_bias: TiedBias\n \"\"\"Pre-Encoder Bias.\"\"\"\n\n encoder: LinearEncoder\n \"\"\"Encoder.\"\"\"\n\n decoder: UnitNormDecoder\n \"\"\"Decoder.\"\"\"\n\n post_decoder_bias: TiedBias\n \"\"\"Post-Decoder Bias.\"\"\"\n\n def __init__(\n self,\n config: SparseAutoencoderConfig,\n geometric_median_dataset: Float[\n Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ]\n | None = None,\n ) -> None:\n \"\"\"Initialize the Sparse Autoencoder Model.\n\n Args:\n config: Model config.\n geometric_median_dataset: Estimated geometric median of the dataset.\n \"\"\"\n super().__init__()\n\n self.config = config\n\n # Store the geometric median of the dataset (so that we can reset parameters). This is not a\n # parameter itself (the tied bias parameter is used for that), so gradients are disabled.\n tied_bias_shape = shape_with_optional_dimensions(\n config.n_components, config.n_input_features\n )\n if geometric_median_dataset is not None:\n self.geometric_median_dataset = geometric_median_dataset.clone()\n self.geometric_median_dataset.requires_grad = False\n else:\n self.geometric_median_dataset = torch.zeros(tied_bias_shape)\n self.geometric_median_dataset.requires_grad = False\n\n # Initialize the tied bias\n self.tied_bias = Parameter(torch.empty(tied_bias_shape))\n self.initialize_tied_parameters()\n\n # Initialize the components\n self.pre_encoder_bias = TiedBias(self.tied_bias, TiedBiasPosition.PRE_ENCODER)\n\n self.encoder = LinearEncoder(\n input_features=config.n_input_features,\n learnt_features=config.n_learned_features,\n n_components=config.n_components,\n )\n\n self.decoder = UnitNormDecoder(\n learnt_features=config.n_learned_features,\n decoded_features=config.n_input_features,\n n_components=config.n_components,\n )\n\n self.post_decoder_bias = TiedBias(self.tied_bias, TiedBiasPosition.POST_DECODER)\n\n def forward(\n self,\n x: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> ForwardPassResult:\n \"\"\"Forward Pass.\n\n Args:\n x: Input activations (e.g. activations from an MLP layer in a transformer model).\n\n Returns:\n Tuple of learned activations and decoded activations.\n \"\"\"\n x = self.pre_encoder_bias(x)\n learned_activations = self.encoder(x)\n x = self.decoder(learned_activations)\n decoded_activations = self.post_decoder_bias(x)\n\n return ForwardPassResult(learned_activations, decoded_activations)\n\n def initialize_tied_parameters(self) -> None:\n \"\"\"Initialize the tied parameters.\"\"\"\n # The tied bias is initialised as the geometric median of the dataset\n self.tied_bias.data = self.geometric_median_dataset\n\n def reset_parameters(self) -> None:\n \"\"\"Reset the parameters.\"\"\"\n self.initialize_tied_parameters()\n for module in self.network:\n if \"reset_parameters\" in dir(module):\n module.reset_parameters()\n\n @property\n def reset_optimizer_parameter_details(self) -> list[ResetOptimizerParameterDetails]:\n \"\"\"Reset optimizer parameter details.\n\n Details of the parameters that should be reset in the optimizer, when resetting\n dictionary vectors.\n\n Returns:\n List of tuples of the form `(parameter, axis)`, where `parameter` is the parameter to\n reset (e.g. encoder.weight), and `axis` is the axis of the parameter to reset.\n \"\"\"\n return (\n self.encoder.reset_optimizer_parameter_details\n + self.decoder.reset_optimizer_parameter_details\n )\n\n def post_backwards_hook(self) -> None:\n \"\"\"Hook to be called after each learning step.\n\n This can be used to e.g. constrain weights to unit norm.\n \"\"\"\n self.decoder.constrain_weights_unit_norm()\n\n @staticmethod\n @validate_call\n def get_single_component_state_dict(\n state: SparseAutoencoderState, component_idx: NonNegativeInt\n ) -> dict[str, Tensor]:\n \"\"\"Get the state dict for a single component.\n\n Args:\n state: Sparse Autoencoder state.\n component_idx: Index of the component to get the state dict for.\n\n Returns:\n State dict for the component.\n\n Raises:\n ValueError: If the state dict doesn't contain a components dimension.\n \"\"\"\n # Check the state has a components dimension\n if state.config.n_components is None:\n error_message = (\n \"Trying to load a single component from the state dict, but the state dict \"\n \"doesn't contain a components dimension.\"\n )\n raise ValueError(error_message)\n\n # Return the state dict for the component\n return {key: value[component_idx] for key, value in state.state_dict.items()}\n\n def save(self, file_path: Path) -> None:\n \"\"\"Save the model config and state dict to a file.\n\n Args:\n file_path: Path to save the model to.\n \"\"\"\n file_path.parent.mkdir(parents=True, exist_ok=True)\n state = SparseAutoencoderState(config=self.config, state_dict=self.state_dict())\n torch.save(state, file_path)\n\n @staticmethod\n def load(\n file_path: FILE_LIKE,\n component_idx: PositiveInt | None = None,\n ) -> \"SparseAutoencoder\":\n \"\"\"Load the model from a file.\n\n Args:\n file_path: Path to load the model from.\n component_idx: If loading a state dict from a model that has been trained on multiple\n components (e.g. all MLP layers) you may want to to load just one component. In this\n case you can set `component_idx` to the index of the component to load. Note you\n should not set this if you want to load a state dict from a model that has been\n trained on a single component (or if you want to load all components).\n\n Returns:\n The loaded model.\n \"\"\"\n # Load the file\n serialized_state = torch.load(file_path, map_location=torch.device(\"cpu\"))\n state = SparseAutoencoderState.model_validate(serialized_state)\n\n # Initialise the model\n config = SparseAutoencoderConfig(\n n_input_features=state.config.n_input_features,\n n_learned_features=state.config.n_learned_features,\n n_components=state.config.n_components if component_idx is None else None,\n )\n state_dict = (\n SparseAutoencoder.get_single_component_state_dict(state, component_idx)\n if component_idx is not None\n else state.state_dict\n )\n model = SparseAutoencoder(config)\n model.load_state_dict(state_dict)\n\n return model\n\n def save_to_wandb(\n self,\n artifact_name: str,\n directory: DirectoryPath = DEFAULT_TMP_DIR,\n ) -> str:\n \"\"\"Save the model to wandb.\n\n Args:\n artifact_name: A human-readable name for this artifact, which is how you can identify\n this artifact in the UI or reference it in use_artifact calls. Names can contain\n letters, numbers, underscores, hyphens, and dots. The name must be unique across a\n project. Example: \"sweep_name 1e9 activations\".\n directory: Directory to save the model to.\n\n Returns:\n Name of the wandb artifact.\n\n Raises:\n ValueError: If wandb is not initialised.\n \"\"\"\n # Save the file\n directory.mkdir(parents=True, exist_ok=True)\n file_name = artifact_name + \".pt\"\n file_path = directory / file_name\n self.save(file_path)\n\n # Upload to wandb\n if wandb.run is None:\n error_message = \"Trying to save the model to wandb, but wandb is not initialised.\"\n raise ValueError(error_message)\n artifact = wandb.Artifact(\n artifact_name,\n type=\"model\",\n description=\"Sparse Autoencoder model state, created with `sparse_autoencoder`.\",\n )\n artifact.add_file(str(file_path), name=\"sae-model-state.pt\")\n artifact.save()\n wandb.log_artifact(artifact)\n artifact.wait()\n\n return artifact.source_qualified_name\n\n @staticmethod\n def load_from_wandb(\n wandb_artifact_name: str,\n component_idx: PositiveInt | None = None,\n ) -> \"SparseAutoencoder\":\n \"\"\"Load the model from wandb.\n\n Args:\n wandb_artifact_name: Name of the wandb artifact to load the model from (e.g.\n \"username/project/artifact_name:version\").\n component_idx: If loading a state dict from a model that has been trained on multiple\n components (e.g. all MLP layers) you may want to to load just one component. In this\n case you can set `component_idx` to the index of the component to load. Note you\n should not set this if you want to load a state dict from a model that has been\n trained on a single component (or if you want to load all components).\n\n Returns:\n The loaded model.\n \"\"\"\n api = wandb.Api()\n artifact = api.artifact(wandb_artifact_name, type=\"model\")\n download_path = artifact.download()\n return SparseAutoencoder.load(Path(download_path) / \"sae-model-state.pt\", component_idx)\n\n def save_to_hugging_face(\n self,\n file_name: str,\n repo_id: str,\n directory: DirectoryPath = DEFAULT_TMP_DIR,\n hf_access_token: str | None = None,\n ) -> None:\n \"\"\"Save the model to Hugging Face.\n\n Args:\n file_name: Name of the file (e.g. \"model-something.pt\").\n repo_id: ID of the repo to save the model to.\n directory: Directory to save the model to.\n hf_access_token: Hugging Face access token.\n \"\"\"\n # Save the file\n directory.mkdir(parents=True, exist_ok=True)\n file_path = directory / file_name\n self.save(file_path)\n\n # Upload to Hugging Face\n api = HfApi(token=hf_access_token)\n api.upload_file(\n path_or_fileobj=file_path,\n path_in_repo=file_name,\n repo_id=repo_id,\n repo_type=\"model\",\n )\n\n @staticmethod\n def load_from_hugging_face(\n file_name: str,\n repo_id: str,\n component_idx: PositiveInt | None = None,\n ) -> \"SparseAutoencoder\":\n \"\"\"Load the model from Hugging Face.\n\n Args:\n file_name: File name of the .pt state file.\n repo_id: ID of the repo to load the model from.\n component_idx: If loading a state dict from a model that has been trained on multiple\n components (e.g. all MLP layers) you may want to to load just one component. In this\n case you can set `component_idx` to the index of the component to load. Note you\n should not set this if you want to load a state dict from a model that has been\n trained on a single component (or if you want to load all components).\n\n Returns:\n The loaded model.\n \"\"\"\n local_file = hf_hub_download(\n repo_id=repo_id,\n repo_type=\"model\",\n filename=file_name,\n revision=\"main\",\n )\n\n return SparseAutoencoder.load(Path(local_file), component_idx)" }, { "identifier": "AbstractLoss", "path": "sparse_autoencoder/loss/abstract_loss.py", "snippet": "class AbstractLoss(Module, ABC):\n \"\"\"Abstract loss interface.\n\n Interface for implementing batch itemwise loss functions.\n \"\"\"\n\n _modules: dict[str, \"AbstractLoss\"] # type: ignore[assignment] (narrowing)\n \"\"\"Children loss modules.\"\"\"\n\n @abstractmethod\n def log_name(self) -> str:\n \"\"\"Log name.\n\n Returns:\n Name of the loss module for logging.\n \"\"\"\n\n @abstractmethod\n def forward(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL)]:\n \"\"\"Batch itemwise loss.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n\n Returns:\n Loss per batch item.\n \"\"\"\n\n @final\n def batch_loss(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n batch_reduction: LossReductionType = LossReductionType.MEAN,\n ) -> Float[Tensor, Axis.COMPONENT_OPTIONAL]:\n \"\"\"Batch loss (reduced across the batch axis).\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n batch_reduction: Loss reduction type. Typically you would choose LossReductionType.MEAN\n to make the loss independent of the batch size.\n\n Returns:\n Loss for the batch.\n\n Raises:\n ValueError: If the batch reduction type is NONE.\n \"\"\"\n itemwise_loss = self.forward(source_activations, learned_activations, decoded_activations)\n\n # Reduction parameter is over the batch dimension (not the component dimension)\n match batch_reduction:\n case LossReductionType.MEAN:\n return itemwise_loss.mean(dim=0)\n case LossReductionType.SUM:\n return itemwise_loss.sum(dim=0)\n case LossReductionType.NONE:\n error_message = \"Batch reduction type NONE not supported.\"\n raise ValueError(error_message)\n\n def scalar_loss_with_log(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n batch_reduction: LossReductionType = LossReductionType.MEAN,\n component_reduction: LossReductionType = LossReductionType.NONE,\n ) -> LossResultWithMetrics:\n \"\"\"Scalar loss (reduced across the batch and component axis) with logging.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n batch_reduction: Batch reduction type. Typically you would choose LossReductionType.MEAN\n to make the loss independent of the batch size.\n component_reduction: Component reduction type.\n\n Returns:\n Tuple of the batch scalar loss and a dict of any properties to log.\n \"\"\"\n children_loss_scalars: list[\n Float[Tensor, Axis.COMPONENT] | Float[Tensor, Axis.SINGLE_ITEM]\n ] = []\n metrics: list[MetricResult] = []\n\n # If the loss module has children (e.g. it is a reducer):\n if len(self._modules) > 0:\n for loss_module in self._modules.values():\n child_loss, child_metrics = loss_module.scalar_loss_with_log(\n source_activations,\n learned_activations,\n decoded_activations,\n batch_reduction=batch_reduction,\n # Note we don't pass through component reduction, as that would prevent logging\n # component-wise losses in reducers.\n )\n children_loss_scalars.append(child_loss)\n metrics.extend(child_metrics)\n\n # Get the total loss & metric\n current_module_loss = torch.stack(children_loss_scalars).sum(0)\n\n # Otherwise if it is a leaf loss module:\n else:\n current_module_loss = self.batch_loss(\n source_activations, learned_activations, decoded_activations, batch_reduction\n )\n # Add in the current loss module's metric\n log = MetricResult(\n location=MetricLocation.TRAIN,\n name=\"loss\",\n postfix=self.log_name(),\n component_wise_values=current_module_loss.unsqueeze(0)\n if current_module_loss.ndim == 0\n else current_module_loss,\n )\n metrics.append(log)\n\n # Reduce the current module loss across the component dimension\n match component_reduction:\n case LossReductionType.MEAN:\n current_module_loss = current_module_loss.mean(0)\n case LossReductionType.SUM:\n current_module_loss = current_module_loss.sum(0)\n case LossReductionType.NONE:\n pass\n\n return LossResultWithMetrics(loss=current_module_loss, loss_metrics=metrics)\n\n @final\n def __call__(\n self,\n source_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n reduction: LossReductionType = LossReductionType.MEAN,\n ) -> LossResultWithMetrics:\n \"\"\"Batch scalar loss.\n\n Args:\n source_activations: Source activations (input activations to the autoencoder from the\n source model).\n learned_activations: Learned activations (intermediate activations in the autoencoder).\n decoded_activations: Decoded activations.\n reduction: Loss reduction type. Typically you would choose LossReductionType.MEAN to\n make the loss independent of the batch size.\n\n Returns:\n Tuple of the batch scalar loss and a dict of any properties to log.\n \"\"\"\n return self.scalar_loss_with_log(\n source_activations, learned_activations, decoded_activations, reduction\n )" }, { "identifier": "LossReductionType", "path": "sparse_autoencoder/loss/abstract_loss.py", "snippet": "class LossReductionType(LowercaseStrEnum):\n \"\"\"Loss reduction type.\"\"\"\n\n MEAN = \"mean\"\n\n SUM = \"sum\"\n\n NONE = \"none\"" }, { "identifier": "MetricsContainer", "path": "sparse_autoencoder/metrics/metrics_container.py", "snippet": "class MetricsContainer:" }, { "identifier": "TrainMetricData", "path": "sparse_autoencoder/metrics/train/abstract_train_metric.py", "snippet": "class TrainMetricData:\n \"\"\"Train metric data.\"\"\"\n\n input_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Input activations.\"\"\"\n\n learned_activations: Float[Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT, Axis.LEARNT_FEATURE)]\n \"\"\"Learned activations.\"\"\"\n\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT, Axis.INPUT_OUTPUT_FEATURE)\n ]\n \"\"\"Decoded activations.\"\"\"\n\n def __init__(\n self,\n input_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n learned_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE)\n ],\n decoded_activations: Float[\n Tensor, Axis.names(Axis.BATCH, Axis.COMPONENT_OPTIONAL, Axis.INPUT_OUTPUT_FEATURE)\n ],\n ) -> None:\n \"\"\"Initialize the train metric data.\"\"\"\n self.input_activations = add_component_axis_if_missing(\n input_activations, dimensions_without_component=2\n ).detach()\n self.learned_activations = add_component_axis_if_missing(\n learned_activations, dimensions_without_component=2\n ).detach()\n self.decoded_activations = add_component_axis_if_missing(\n decoded_activations, dimensions_without_component=2\n ).detach()" }, { "identifier": "ValidationMetricData", "path": "sparse_autoencoder/metrics/validate/abstract_validate_metric.py", "snippet": "class ValidationMetricData:\n \"\"\"Validation metric data.\n\n Dataclass that always has a component axis.\n \"\"\"\n\n source_model_loss: Float[Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT)]\n \"\"\"Source model loss (without the SAE).\"\"\"\n\n source_model_loss_with_reconstruction: Float[Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT)]\n \"\"\"Source model loss with SAE reconstruction.\"\"\"\n\n source_model_loss_with_zero_ablation: Float[Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT)]\n \"\"\"Source model loss with zero ablation.\"\"\"\n\n def __init__(\n self,\n source_model_loss: Float[Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT_OPTIONAL)],\n source_model_loss_with_reconstruction: Float[\n Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT_OPTIONAL)\n ],\n source_model_loss_with_zero_ablation: Float[\n Tensor, Axis.names(Axis.ITEMS, Axis.COMPONENT_OPTIONAL)\n ],\n ) -> None:\n \"\"\"Initialize the validation metric data.\"\"\"\n self.source_model_loss = add_component_axis_if_missing(source_model_loss).detach()\n self.source_model_loss_with_reconstruction = add_component_axis_if_missing(\n source_model_loss_with_reconstruction\n ).detach()\n self.source_model_loss_with_zero_ablation = add_component_axis_if_missing(\n source_model_loss_with_zero_ablation\n ).detach()" }, { "identifier": "AbstractOptimizerWithReset", "path": "sparse_autoencoder/optimizer/abstract_optimizer.py", "snippet": "class AbstractOptimizerWithReset(Optimizer, ABC):\n \"\"\"Abstract optimizer with reset.\n\n When implementing this interface, we recommend adding a `named_parameters` argument to the\n constructor, which can be obtained from `named_parameters=model.named_parameters()` by the end\n user. This is so that the optimizer can find the parameters to reset.\n \"\"\"\n\n @abstractmethod\n def reset_state_all_parameters(self) -> None:\n \"\"\"Reset the state for all parameters.\n\n Resets any optimizer state (e.g. momentum). This is for use after manually editing model\n parameters (e.g. with activation resampling).\n \"\"\"\n\n @abstractmethod\n def reset_neurons_state(\n self,\n parameter: Parameter,\n neuron_indices: Int64[Tensor, Axis.names(Axis.COMPONENT_OPTIONAL, Axis.LEARNT_FEATURE_IDX)],\n axis: int,\n component_idx: int,\n ) -> None:\n \"\"\"Reset the state for specific neurons, on a specific parameter.\n\n Args:\n parameter: The parameter to reset, e.g. `encoder.Linear.weight`, `encoder.Linear.bias`,\n neuron_indices: The indices of the neurons to reset.\n axis: The axis of the parameter to reset.\n component_idx: The component index of the state values to reset.\n\n Raises:\n ValueError: If the parameter name is not found.\n \"\"\"" }, { "identifier": "SourceDataset", "path": "sparse_autoencoder/source_data/abstract_dataset.py", "snippet": "class SourceDataset(ABC, Generic[HuggingFaceDatasetItem]):\n \"\"\"Abstract source dataset.\n\n Source dataset that is used to generate the activations dataset (by running forward passes of\n the source model with this data). It should contain prompts that have been tokenized with no\n padding tokens (apart from an optional single first padding token). This enables efficient\n generation of the activations dataset.\n\n Wraps an HuggingFace IterableDataset.\n \"\"\"\n\n context_size: int\n \"\"\"Number of tokens in the context window.\n\n The paper *Towards Monosemanticity: Decomposing Language Models With Dictionary Learning* used\n a context size of 250.\n \"\"\"\n\n dataset: Dataset | IterableDataset\n \"\"\"Underlying HuggingFace Dataset.\n\n Warning:\n Hugging Face `Dataset` objects are confusingly not the same as PyTorch `Dataset` objects.\n \"\"\"\n\n _dataset_column_name: str\n \"\"\"Dataset column name for the prompts.\"\"\"\n\n @abstractmethod\n def preprocess(\n self,\n source_batch: HuggingFaceDatasetItem,\n *,\n context_size: int,\n ) -> TokenizedPrompts:\n \"\"\"Preprocess function.\n\n Takes a `preprocess_batch_size` ($m$) batch of source data (which may e.g. include string\n prompts), and returns a dict with a single key of `input_ids` and a value of an arbitrary\n length list ($n$) of tokenized prompts. Note that $m$ does not have to be equal to $n$.\n\n Applied to the dataset with the [Hugging Face\n Dataset](https://huggingface.co/docs/datasets/v2.14.5/en/package_reference/main_classes#datasets.Dataset.map)\n `map` function.\n\n Warning:\n The returned tokenized prompts should not have any padding tokens (apart from an\n optional single first padding token).\n\n Args:\n source_batch: A batch of source data. For example, with The Pile dataset this would be a\n dict including the key \"text\" with a value of a list of strings (not yet tokenized).\n context_size: The context size to use when returning a list of tokenized prompts.\n *Towards Monosemanticity: Decomposing Language Models With Dictionary Learning* used\n a context size of 250.\n\n Returns:\n Tokenized prompts.\n \"\"\"\n\n @abstractmethod\n @validate_call\n def __init__(\n self,\n dataset_path: str,\n dataset_split: str,\n context_size: PositiveInt,\n buffer_size: PositiveInt = 1000,\n dataset_dir: str | None = None,\n dataset_files: str | Sequence[str] | Mapping[str, str | Sequence[str]] | None = None,\n dataset_column_name: str = \"input_ids\",\n n_processes_preprocessing: PositiveInt | None = None,\n preprocess_batch_size: PositiveInt = 1000,\n *,\n pre_download: bool = False,\n ):\n \"\"\"Initialise the dataset.\n\n Loads the dataset with streaming from HuggingFace, dds preprocessing and shuffling to the\n underlying Hugging Face `IterableDataset`.\n\n Args:\n dataset_path: The path to the dataset on Hugging Face.\n dataset_split: Dataset split (e.g. `train`).\n context_size: The context size to use when returning a list of tokenized prompts.\n *Towards Monosemanticity: Decomposing Language Models With Dictionary Learning* used\n a context size of 250.\n buffer_size: The buffer size to use when shuffling the dataset when streaming. When\n streaming a dataset, this just pre-downloads at least `buffer_size` items and then\n shuffles just that buffer. Note that the generated activations should also be\n shuffled before training the sparse autoencoder, so a large buffer may not be\n strictly necessary here. Note also that this is the number of items in the dataset\n (e.g. number of prompts) and is typically significantly less than the number of\n tokenized prompts once the preprocessing function has been applied.\n dataset_dir: Defining the `data_dir` of the dataset configuration.\n dataset_files: Path(s) to source data file(s).\n dataset_column_name: The column name for the prompts.\n n_processes_preprocessing: The number of processes to use for preprocessing.\n preprocess_batch_size: The batch size to use just for preprocessing the dataset (e.g.\n tokenizing prompts).\n pre_download: Whether to pre-download the whole dataset.\n\n Raises:\n TypeError: If the loaded dataset is not a Hugging Face `Dataset` or `IterableDataset`.\n \"\"\"\n self.context_size = context_size\n self._dataset_column_name = dataset_column_name\n\n # Load the dataset\n should_stream = not pre_download\n dataset = load_dataset(\n dataset_path,\n streaming=should_stream,\n split=dataset_split,\n data_dir=dataset_dir,\n data_files=dataset_files,\n verification_mode=VerificationMode.NO_CHECKS, # As it fails when data_files is set\n )\n\n # Setup preprocessing (we remove all columns except for input ids)\n remove_columns: list[str] = list(next(iter(dataset)).keys())\n if \"input_ids\" in remove_columns:\n remove_columns.remove(\"input_ids\")\n\n if pre_download:\n if not isinstance(dataset, Dataset):\n error_message = (\n f\"Expected Hugging Face dataset to be a Dataset when pre-downloading, but got \"\n f\"{type(dataset)}.\"\n )\n raise TypeError(error_message)\n\n # Download the whole dataset\n mapped_dataset = dataset.map(\n self.preprocess,\n batched=True,\n batch_size=preprocess_batch_size,\n fn_kwargs={\"context_size\": context_size},\n remove_columns=remove_columns,\n num_proc=n_processes_preprocessing,\n )\n self.dataset = mapped_dataset.shuffle()\n else:\n # Setup approximate shuffling. As the dataset is streamed, this just pre-downloads at\n # least `buffer_size` items and then shuffles just that buffer.\n # https://huggingface.co/docs/datasets/v2.14.5/stream#shuffle\n if not isinstance(dataset, IterableDataset):\n error_message = (\n f\"Expected Hugging Face dataset to be an IterableDataset when streaming, but \"\n f\"got {type(dataset)}.\"\n )\n raise TypeError(error_message)\n\n mapped_dataset = dataset.map(\n self.preprocess,\n batched=True,\n batch_size=preprocess_batch_size,\n fn_kwargs={\"context_size\": context_size},\n remove_columns=remove_columns,\n )\n self.dataset = mapped_dataset.shuffle(buffer_size=buffer_size) # type: ignore\n\n @final\n def __iter__(self) -> Any: # noqa: ANN401\n \"\"\"Iterate Dunder Method.\n\n Enables direct access to :attr:`dataset` with e.g. `for` loops.\n \"\"\"\n return self.dataset.__iter__()\n\n @final\n def get_dataloader(\n self, batch_size: int, num_workers: NonNegativeInt = 0\n ) -> DataLoader[TorchTokenizedPrompts]:\n \"\"\"Get a PyTorch DataLoader.\n\n Args:\n batch_size: The batch size to use.\n num_workers: Number of CPU workers.\n\n Returns:\n PyTorch DataLoader.\n \"\"\"\n torch_dataset: TorchDataset[TorchTokenizedPrompts] = self.dataset.with_format(\"torch\") # type: ignore\n\n return DataLoader[TorchTokenizedPrompts](\n torch_dataset,\n batch_size=batch_size,\n # Shuffle is most efficiently done with the `shuffle` method on the dataset itself, not\n # here.\n shuffle=False,\n num_workers=num_workers,\n )" }, { "identifier": "TorchTokenizedPrompts", "path": "sparse_autoencoder/source_data/abstract_dataset.py", "snippet": "class TorchTokenizedPrompts(TypedDict):\n \"\"\"Tokenized prompts prepared for PyTorch.\"\"\"\n\n input_ids: Int[Tensor, Axis.names(Axis.SOURCE_DATA_BATCH, Axis.POSITION)]" }, { "identifier": "replace_activations_hook", "path": "sparse_autoencoder/source_model/replace_activations_hook.py", "snippet": "def replace_activations_hook(\n value: Tensor,\n hook: HookPoint, # noqa: ARG001\n sparse_autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine,\n component_idx: int | None = None,\n n_components: int | None = None,\n) -> Tensor:\n \"\"\"Replace activations hook.\n\n This should be pre-initialised with `functools.partial`.\n\n Args:\n value: The activations to replace.\n hook: The hook point.\n sparse_autoencoder: The sparse autoencoder.\n component_idx: The component index to replace the activations with, if just replacing\n activations for a single component. Requires the model to have a component axis.\n n_components: The number of components that the SAE is trained on.\n\n Returns:\n Replaced activations.\n\n Raises:\n RuntimeError: If `component_idx` is specified, but the model does not have a component\n \"\"\"\n # Squash to just have a \"*items\" and a \"batch\" dimension\n original_shape = value.shape\n\n squashed_value: Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)] = value.view(\n -1, value.size(-1)\n )\n\n if component_idx is not None:\n if n_components is None:\n error_message = \"The number of model components must be set if component_idx is set.\"\n raise RuntimeError(error_message)\n\n # The approach here is to run a forward pass with dummy values for all components other than\n # the one we want to replace. This is done by expanding the inputs to the SAE for a specific\n # component across all components. We then simply discard the activations for all other\n # components.\n expanded_shape = [\n squashed_value.shape[0],\n n_components,\n squashed_value.shape[-1],\n ]\n expanded = squashed_value.unsqueeze(1).expand(*expanded_shape)\n\n _learned_activations, output_activations = sparse_autoencoder.forward(expanded)\n component_output_activations = output_activations[:, component_idx]\n\n return component_output_activations.view(*original_shape)\n\n # Get the output activations from a forward pass of the SAE\n _learned_activations, output_activations = sparse_autoencoder.forward(squashed_value)\n\n # Reshape to the original shape\n return output_activations.view(*original_shape)" }, { "identifier": "store_activations_hook", "path": "sparse_autoencoder/source_model/store_activations_hook.py", "snippet": "def store_activations_hook(\n value: Float[Tensor, Axis.names(Axis.ANY)],\n hook: HookPoint, # noqa: ARG001\n store: ActivationStore,\n reshape_method: ReshapeActivationsFunction = reshape_to_last_dimension,\n component_idx: int = 0,\n) -> Float[Tensor, Axis.names(Axis.ANY)]:\n \"\"\"Store Activations Hook.\n\n Useful for getting just the specific activations wanted, rather than the full cache.\n\n Example:\n First we'll need a source model from TransformerLens and an activation store.\n\n >>> from functools import partial\n >>> from transformer_lens import HookedTransformer\n >>> from sparse_autoencoder.activation_store.tensor_store import TensorActivationStore\n >>> store = TensorActivationStore(max_items=1000, n_neurons=64, n_components=1)\n >>> model = HookedTransformer.from_pretrained(\"tiny-stories-1M\")\n Loaded pretrained model tiny-stories-1M into HookedTransformer\n\n Next we can add the hook to specific neurons (in this case the first MLP neurons), and\n create the tokens for a forward pass.\n\n >>> model.add_hook(\n ... \"blocks.0.hook_mlp_out\", partial(store_activations_hook, store=store)\n ... )\n >>> tokens = model.to_tokens(\"Hello world\")\n >>> tokens.shape\n torch.Size([1, 3])\n\n Then when we run the model, we should get one activation vector for each token (as we just\n have one batch item). Note we also set `stop_at_layer=1` as we don't need the logits or any\n other activations after the hook point that we've specified (in this case the first MLP\n layer).\n\n >>> _output = model.forward(\"Hello world\", stop_at_layer=1) # Change this layer as required\n >>> len(store)\n 3\n\n Args:\n value: The activations to store.\n hook: The hook point.\n store: The activation store. This should be pre-initialised with `functools.partial`.\n reshape_method: The method to reshape the activations before storing them.\n component_idx: The component index of the activations to store.\n\n Returns:\n Unmodified activations.\n \"\"\"\n reshaped: Float[\n Tensor, Axis.names(Axis.STORE_BATCH, Axis.INPUT_OUTPUT_FEATURE)\n ] = reshape_method(value)\n\n store.extend(reshaped, component_idx=component_idx)\n\n # Return the unmodified value\n return value" }, { "identifier": "zero_ablate_hook", "path": "sparse_autoencoder/source_model/zero_ablate_hook.py", "snippet": "def zero_ablate_hook(\n value: Tensor,\n hook: HookPoint, # noqa: ARG001\n) -> Tensor:\n \"\"\"Zero ablate hook.\n\n Args:\n value: The activations to store.\n hook: The hook point.\n\n Example:\n >>> dummy_hook_point = HookPoint()\n >>> value = torch.ones(2, 3)\n >>> zero_ablate_hook(value, dummy_hook_point)\n tensor([[0., 0., 0.],\n [0., 0., 0.]])\n\n Returns:\n Replaced activations.\n \"\"\"\n return torch.zeros_like(value)" }, { "identifier": "Axis", "path": "sparse_autoencoder/tensor_types.py", "snippet": "class Axis(LowercaseStrEnum):\n \"\"\"Tensor axis names.\n\n Used to annotate tensor types.\n\n Example:\n When used directly it prints a string:\n\n >>> print(Axis.INPUT_OUTPUT_FEATURE)\n input_output_feature\n\n The primary use is to annotate tensor types:\n\n >>> from jaxtyping import Float\n >>> from torch import Tensor\n >>> from typing import TypeAlias\n >>> batch: TypeAlias = Float[Tensor, Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE)]\n >>> print(batch)\n <class 'jaxtyping.Float[Tensor, 'batch input_output_feature']'>\n\n You can also join multiple axis together to represent the dimensions of a tensor:\n\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n \"\"\"\n\n # Component idx\n COMPONENT = auto()\n \"\"\"Component index.\"\"\"\n\n COMPONENT_OPTIONAL = \"*component\"\n \"\"\"Optional component index.\"\"\"\n\n # Batches\n SOURCE_DATA_BATCH = auto()\n \"\"\"Batch of prompts used to generate source model activations.\"\"\"\n\n BATCH = auto()\n \"\"\"Batch of items that the SAE is being trained on.\"\"\"\n\n STORE_BATCH = auto()\n \"\"\"Batch of items to be written to the store.\"\"\"\n\n ITEMS = auto()\n \"\"\"Arbitrary number of items.\"\"\"\n\n # Features\n INPUT_OUTPUT_FEATURE = auto()\n \"\"\"Input or output feature (e.g. feature in activation vector from source model).\"\"\"\n\n LEARNT_FEATURE = auto()\n \"\"\"Learn feature (e.g. feature in learnt activation vector).\"\"\"\n\n DEAD_FEATURE = auto()\n \"\"\"Dead feature.\"\"\"\n\n ALIVE_FEATURE = auto()\n \"\"\"Alive feature.\"\"\"\n\n # Feature indices\n INPUT_OUTPUT_FEATURE_IDX = auto()\n \"\"\"Input or output feature index.\"\"\"\n\n LEARNT_FEATURE_IDX = auto()\n \"\"\"Learn feature index.\"\"\"\n\n # Other\n POSITION = auto()\n \"\"\"Token position.\"\"\"\n\n SINGLE_ITEM = \"\"\n \"\"\"Single item axis.\"\"\"\n\n ANY = \"...\"\n \"\"\"Any number of axis.\"\"\"\n\n @staticmethod\n def names(*axis: \"Axis\") -> str:\n \"\"\"Join multiple axis together, to represent the dimensions of a tensor.\n\n Example:\n >>> print(Axis.names(Axis.BATCH, Axis.INPUT_OUTPUT_FEATURE))\n batch input_output_feature\n\n Args:\n *axis: Axis to join.\n\n Returns:\n Joined axis string.\n \"\"\"\n return \" \".join(a.value for a in axis)" }, { "identifier": "get_model_device", "path": "sparse_autoencoder/train/utils/get_model_device.py", "snippet": "def get_model_device(model: Module | DataParallel | DeepSpeedEngine) -> torch.device:\n \"\"\"Get the device on which a PyTorch model is on.\n\n Args:\n model: The PyTorch model.\n\n Returns:\n The device ('cuda' or 'cpu') where the model is located.\n\n Raises:\n ValueError: If the model has no parameters.\n \"\"\"\n # Deepspeed models already have a device property, so just return that\n if hasattr(model, \"device\"):\n return model.device\n\n # Check if the model has parameters\n if len(list(model.parameters())) == 0:\n exception_message = \"The model has no parameters.\"\n raise ValueError(exception_message)\n\n # Return the device of the first parameter\n return next(model.parameters()).device" } ]
from collections.abc import Iterator from functools import partial from pathlib import Path from tempfile import gettempdir from typing import TYPE_CHECKING, final from deepspeed import DeepSpeedEngine from jaxtyping import Float, Int, Int64 from pydantic import NonNegativeInt, PositiveInt, validate_call from torch import Tensor from torch.nn.parallel import DataParallel from torch.optim.lr_scheduler import LRScheduler from torch.utils.data import DataLoader from tqdm.auto import tqdm from transformer_lens import HookedTransformer from sparse_autoencoder.activation_resampler.activation_resampler import ( ActivationResampler, ParameterUpdateResults, ) from sparse_autoencoder.activation_store.tensor_store import TensorActivationStore from sparse_autoencoder.autoencoder.model import SparseAutoencoder from sparse_autoencoder.loss.abstract_loss import AbstractLoss, LossReductionType from sparse_autoencoder.metrics.metrics_container import MetricsContainer, default_metrics from sparse_autoencoder.metrics.train.abstract_train_metric import TrainMetricData from sparse_autoencoder.metrics.validate.abstract_validate_metric import ValidationMetricData from sparse_autoencoder.optimizer.abstract_optimizer import AbstractOptimizerWithReset from sparse_autoencoder.source_data.abstract_dataset import SourceDataset, TorchTokenizedPrompts from sparse_autoencoder.source_model.replace_activations_hook import replace_activations_hook from sparse_autoencoder.source_model.store_activations_hook import store_activations_hook from sparse_autoencoder.source_model.zero_ablate_hook import zero_ablate_hook from sparse_autoencoder.tensor_types import Axis from sparse_autoencoder.train.utils.get_model_device import get_model_device from sparse_autoencoder.metrics.abstract_metric import MetricResult import torch import wandb
18,523
"""Default pipeline.""" if TYPE_CHECKING: DEFAULT_CHECKPOINT_DIRECTORY: Path = Path(gettempdir()) / "sparse_autoencoder" class Pipeline: """Pipeline for training a Sparse Autoencoder on TransformerLens activations. Includes all the key functionality to train a sparse autoencoder, with a specific set of hyperparameters. """ activation_resampler: ActivationResampler | None """Activation resampler to use.""" autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine """Sparse autoencoder to train.""" n_input_features: int """Number of input features in the sparse autoencoder.""" n_learned_features: int """Number of learned features in the sparse autoencoder.""" cache_names: list[str] """Names of the cache hook points to use in the source model.""" layer: int """Layer to stope the source model at (if we don't need activations after this layer).""" log_frequency: int """Frequency at which to log metrics (in steps).""" loss: AbstractLoss """Loss function to use."""
"""Default pipeline.""" if TYPE_CHECKING: DEFAULT_CHECKPOINT_DIRECTORY: Path = Path(gettempdir()) / "sparse_autoencoder" class Pipeline: """Pipeline for training a Sparse Autoencoder on TransformerLens activations. Includes all the key functionality to train a sparse autoencoder, with a specific set of hyperparameters. """ activation_resampler: ActivationResampler | None """Activation resampler to use.""" autoencoder: SparseAutoencoder | DataParallel[SparseAutoencoder] | DeepSpeedEngine """Sparse autoencoder to train.""" n_input_features: int """Number of input features in the sparse autoencoder.""" n_learned_features: int """Number of learned features in the sparse autoencoder.""" cache_names: list[str] """Names of the cache hook points to use in the source model.""" layer: int """Layer to stope the source model at (if we don't need activations after this layer).""" log_frequency: int """Frequency at which to log metrics (in steps).""" loss: AbstractLoss """Loss function to use."""
metrics: MetricsContainer
6
2023-10-27 07:37:15+00:00
24k
OATML-Markslab/ProteinNPT
scripts/train.py
[ { "identifier": "ProteinNPTModel", "path": "proteinnpt/model.py", "snippet": "class ProteinNPTModel(nn.Module):\n def __init__(self, args, alphabet):\n super().__init__()\n self.args = args\n self.alphabet = alphabet\n self.alphabet_size = len(alphabet)\n self.padding_idx = alphabet.padding_idx\n self.mask_idx = alphabet.mask_idx\n self.cls_idx = alphabet.cls_idx\n self.eos_idx = alphabet.eos_idx\n self.prepend_bos = alphabet.prepend_bos\n self.append_eos = alphabet.append_eos\n self.target_names_input = self.args.target_config.keys()\n self.target_names = [x for x in self.args.target_config.keys() if self.args.target_config[x][\"in_NPT_loss\"]]\n self.num_targets_input = len(self.target_names_input) #Includes all targets, incl. zero-shot fitness predictions\n self.num_targets = len(self.target_names) #Number of actual targets we want to predict\n self.MSA_sample_sequences = None\n self.training_sample_sequences_indices = None\n self.device = None\n self.optimizer = None\n self.model_type = args.model_type\n self.PNPT_ensemble_test_num_seeds = -1\n self.PNPT_no_reconstruction_error = False\n self.deactivate_col_attention = False\n self.tranception_attention = False\n \n assert self.args.embed_dim % self.args.attention_heads ==0, \"Embedding size {} needs to be a multiple of number of heads {}\".format(self.args.embed_dim, self.args.attention_heads)\n if self.args.aa_embeddings in [\"MSA_Transformer\",\"ESM1v\"]:\n model, _ = utils.esm.pretrained.load_model_and_alphabet(args.embedding_model_location)\n self.aa_embedding = model\n self.aa_embedding_dim = self.aa_embedding.embed_tokens.weight.shape[-1]\n elif self.args.aa_embeddings == \"Tranception\":\n self.aa_embedding_dim = 1280\n config = json.load(open(args.embedding_model_location+os.sep+'config.json'))\n config = utils.tranception.config.TranceptionConfig(**config)\n config.tokenizer = self.alphabet\n config.inference_time_retrieval_type = None\n config.retrieval_aggregation_mode = None\n self.aa_embedding = utils.tranception.model_pytorch.TranceptionLMHeadModel.from_pretrained(pretrained_model_name_or_path=args.embedding_model_location,config=config)\n elif self.args.aa_embeddings == \"Linear_embedding\":\n self.aa_embedding = nn.Embedding(\n self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx\n )\n self.aa_positions_embedding = LearnedPositionalEmbedding(\n self.args.max_positions,\n self.args.embed_dim,\n self.padding_idx,\n )\n self.aa_embedding_dim = self.args.embed_dim\n\n if self.aa_embedding_dim != self.args.embed_dim: #Need to project internally\n self.token_embedding_projection = nn.Linear(\n self.aa_embedding_dim,\n self.args.embed_dim\n )\n self.token_embedding_expansion = nn.Linear(\n self.args.embed_dim,\n self.aa_embedding_dim\n )\n\n self.target_embedding = nn.ModuleDict(\n { \n target_name:\n nn.Linear(\n self.args.target_config[target_name][\"dim\"] + 1, #Need to add one as we append the mask flag to each input target \n self.args.embed_dim\n )\n if self.args.target_config[target_name][\"type\"]==\"continuous\"\n else \n nn.Embedding(\n self.args.target_config[target_name][\"dim\"],\n self.args.embed_dim\n )\n for target_name in self.target_names_input\n }\n )\n \n self.dropout_module = nn.Dropout(self.args.dropout)\n\n self.layers = nn.ModuleList(\n [\n AxialTransformerLayer(\n self.args.embed_dim,\n self.args.ffn_embed_dim,\n self.args.attention_heads,\n self.args.dropout,\n self.args.attention_dropout,\n self.args.activation_dropout,\n getattr(self.args, \"max_tokens_per_msa\", self.args.max_tokens_per_msa),\n self.deactivate_col_attention,\n self.tranception_attention,\n self.num_targets_input,\n )\n for _ in range(self.args.num_protein_npt_layers)\n ]\n )\n self.emb_layer_norm_before = ESM1bLayerNorm(self.args.embed_dim)\n self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim)\n \n if self.args.aa_embeddings in [\"MSA_Transformer\",\"ESM1v\"]:\n weight = self.aa_embedding.embed_tokens.weight\n elif self.args.aa_embeddings == \"Tranception\":\n weight = self.aa_embedding.lm_head.weight\n else:\n weight = self.aa_embedding.weight\n\n self.lm_head = RobertaLMHead(\n embed_dim=self.aa_embedding_dim,\n output_dim=self.alphabet_size,\n weight=weight\n )\n \n target_pred_input_dim = self.args.embed_dim\n\n if args.target_prediction_model==\"MLP\": \n self.layer_pre_head = nn.ModuleDict(\n {\n target_name:\n nn.Sequential(\n nn.Linear(target_pred_input_dim, target_pred_input_dim),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n ) \n for target_name in self.target_names\n }\n )\n \n if args.target_prediction_model==\"ConvBERT\":\n configuration = ConvBertConfig(\n hidden_size = self.args.embed_dim,\n num_attention_heads = self.args.attention_heads,\n conv_kernel_size = self.args.conv_kernel_size,\n hidden_act = \"gelu\",\n hidden_dropout_prob = self.args.dropout,\n attention_probs_dropout_prob = self.args.dropout\n )\n self.layer_pre_head = ConvBertLayer(configuration)\n \n if args.target_prediction_model==\"CNN\":\n self.layer_pre_head = nn.Sequential(\n nn.Conv1d(in_channels=target_pred_input_dim, out_channels=target_pred_input_dim, kernel_size = self.args.conv_kernel_size, padding='same'),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n )\n \n if self.args.target_prediction_head == \"Target_embeddings_only\":\n target_pred_input_dim = target_pred_input_dim\n elif self.args.target_prediction_head == \"Target_embeddings_and_AA_embeddings_mean_pooled\":\n target_pred_input_dim = target_pred_input_dim * (1 + self.num_targets_input)\n\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n self.zero_shot_fitness_prediction_weight = nn.ModuleDict(\n { \n target_name: nn.Linear(1, self.args.target_config[target_name][\"dim\"], bias=False)\n for target_name in self.target_names\n }\n )\n for target_name in self.target_names:\n torch.nn.init.constant_(self.zero_shot_fitness_prediction_weight[target_name].weight,1e-4)\n\n self.target_pred_head = nn.ModuleDict(\n { \n target_name: nn.Linear(target_pred_input_dim, self.args.target_config[target_name][\"dim\"])\n for target_name in self.target_names\n }\n )\n \n def set_device(self):\n if self.device is None:\n self.device = next(self.parameters()).device\n print(\"Model device: {}\".format(self.device))\n \n def forward(self, tokens, targets=None, zero_shot_fitness_predictions=None, sequence_embeddings=None, repr_layers=[], need_head_weights=False):\n padding_mask = tokens.eq(self.padding_idx) \n if not padding_mask.any(): padding_mask = None\n \n if self.args.aa_embeddings == \"MSA_Transformer\" and self.args.sequence_embeddings_location is None:\n assert tokens.ndim == 3, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n num_MSAs_in_batch, num_sequences_in_alignments, seqlen = tokens.size() # N, B, L (seqs with labels, seqs in MSA, seq length)\n batch_size = num_MSAs_in_batch\n else:\n assert tokens.ndim == 2, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n batch_size, seqlen = tokens.size() # N, L (seqs with labels, seq length)\n \n if sequence_embeddings is not None:\n x = sequence_embeddings.to(self.device)\n else:\n if self.args.aa_embeddings == \"MSA_Transformer\":\n output = self.aa_embedding(tokens, repr_layers=[12])\n x = output[\"representations\"][12][:] # N, B, L, D\n x = x[:,0,:,:] # N, L, D. #In each MSA batch the first sequence is what we care about. The other MSA sequences were just to compute embeddings and logits\n elif self.args.aa_embeddings == \"ESM1v\":\n last_layer_index = 33\n output = self.aa_embedding(tokens, repr_layers=[last_layer_index])\n x = output[\"representations\"][last_layer_index][:] # N, L, D\n elif self.args.aa_embeddings ==\"Linear_embedding\":\n x = self.aa_embedding(tokens)\n x = x + self.aa_positions_embedding(tokens.view(batch_size, seqlen)).view(x.size()) # Need position embedding in PNPT since we will apply axial attention\n else:\n print(\"AA embeddings not recognized\")\n sys.exit(0)\n \n if self.aa_embedding_dim != self.args.embed_dim: x = self.token_embedding_projection(x)\n \n if self.args.target_prediction_head != \"Target_embeddings_and_AA_embeddings_mean_pooled\": #We mix AA embeddings pre NPT\n if self.args.target_prediction_model == \"CNN\": \n assert len(x.size())==3, \"Size error input\"\n N, L, D = x.size()\n x = x.permute(0,2,1) #N, D, L\n x = self.layer_pre_head(x)\n x = x.permute(0,2,1)\n elif self.args.target_prediction_model == \"ConvBERT\":\n x = self.layer_pre_head(x)[0]\n\n x = x.view(1, batch_size, seqlen, self.args.embed_dim) # 1, N, L, D\n \n #Dimensions for each target (there are self.num_targets of them):\n y = []\n for target_name in self.target_names_input:\n num_sequences_with_target, dim_targets = targets[target_name].shape # N, D_t #In most cases dim_targets = D_t = 2 (original dimension of continuous input + 1 dim for mask)\n y.append(self.target_embedding[target_name](targets[target_name]).view(num_sequences_with_target,1,self.args.embed_dim))\n y = torch.cat(y, dim=-2) #concatenate across second to last dimension # N, num_targets, D\n assert y.shape == (num_sequences_with_target, self.num_targets_input, self.args.embed_dim), \"Error in y shape: {}\".format(y.shape)\n y = y.view(1, num_sequences_with_target, self.num_targets_input, self.args.embed_dim) # 1, N, num_targets, D\n \n #Concatenate AA tokens and targets\n x = torch.cat((x,y),dim=-2) # 1, N, (L+num_targets), D\n x = self.emb_layer_norm_before(x)\n x = self.dropout_module(x)\n\n if padding_mask is not None:\n padding_mask_with_targets = torch.zeros(num_MSAs_in_batch, num_sequences_in_alignments, seqlen + self.num_targets_input)\n padding_mask_with_targets[...,:seqlen] = padding_mask\n padding_mask = padding_mask_with_targets\n x = x * (1 - padding_mask.unsqueeze(-1).type_as(x))\n \n repr_layers = set(repr_layers)\n hidden_representations = {}\n if 0 in repr_layers: hidden_representations[0] = x\n if need_head_weights:\n row_attn_weights = []\n col_attn_weights = []\n\n # 1 x N x L x D -> N x L x 1 x D\n x = x.permute(1, 2, 0, 3)\n for layer_idx, layer in enumerate(self.layers):\n x = layer(\n x,\n self_attn_padding_mask=padding_mask,\n need_head_weights=need_head_weights,\n )\n if need_head_weights:\n x, col_attn, row_attn = x\n col_attn_weights.append(col_attn.permute(2, 0, 1, 3, 4).cpu())\n row_attn_weights.append(row_attn.permute(1, 0, 2, 3).cpu())\n if (layer_idx + 1) in repr_layers:\n hidden_representations[layer_idx + 1] = x.permute(2, 0, 1, 3)\n x = self.emb_layer_norm_after(x)\n x = x.permute(2, 0, 1, 3) # N x L x 1 x D -> 1 x N x L x D\n assert x.shape == (1, num_sequences_with_target, seqlen + self.num_targets_input, self.args.embed_dim), \"Error with axial transformer\"\n # last hidden representation should have layer norm applied\n if (layer_idx + 1) in repr_layers: hidden_representations[layer_idx + 1] = x\n \n # Loss over NPT MLM objective\n if self.aa_embedding_dim != self.args.embed_dim:\n logits_protein_sequence = self.lm_head(self.token_embedding_expansion(x[...,:seqlen,:]))\n else:\n logits_protein_sequence = self.lm_head(x[...,:seqlen,:]) #Remove dependency on targets for final AA predictions. logits size: (1, N, L, Vocab)\n \n x = x.view(num_sequences_with_target, seqlen + self.num_targets_input, self.args.embed_dim)\n x, y = x[:,:seqlen,:], x[:,seqlen:,:] # (N,L,D) and (N,num_targets,D)\n assert y.shape == (num_sequences_with_target, self.num_targets_input, self.args.embed_dim)\n if self.args.target_prediction_head == \"Target_embeddings_and_AA_embeddings_mean_pooled\": \n if self.args.target_prediction_model == \"CNN\": \n assert len(x.size())==3, \"Size error input\"\n N, L, D = x.size()\n x = x.permute(0,2,1) #N, D, L\n x = self.layer_pre_head(x)\n x = x.permute(0,2,1)\n elif self.args.target_prediction_model == \"ConvBERT\":\n x = self.layer_pre_head(x)[0]\n x = x.mean(dim=-2) # N, D\n y = y.view(num_sequences_with_target,self.num_targets_input * self.args.embed_dim)\n y = torch.cat((x,y),dim=-1) # N, (1+num_targets) * D\n \n target_predictions = {}\n for target_index, target_name in enumerate(self.target_names):\n if self.args.target_prediction_head == \"Target_embeddings_and_AA_embeddings_mean_pooled\": \n target_predictions[target_name] = self.target_pred_head[target_name](y).view(-1) #We use the concatenated X and target embeddings (all of them) to predict each target\n else:\n if self.args.target_prediction_model == \"MLP\": y[:,target_index,:] = self.layer_pre_head[target_name](y[:,target_index,:])\n target_predictions[target_name] = self.target_pred_head[target_name](y[:,target_index,:]).view(-1) #input the embedding with the relevant target_index\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n target_predictions[target_name] += self.zero_shot_fitness_prediction_weight[target_name](zero_shot_fitness_predictions).squeeze()\n \n result = {\"logits_protein_sequence\": logits_protein_sequence, \"target_predictions\": target_predictions, \"representations\": hidden_representations}\n \n if need_head_weights:\n col_attentions = torch.stack(col_attn_weights, 1)\n row_attentions = torch.stack(row_attn_weights, 1)\n result[\"col_attentions\"] = col_attentions\n result[\"row_attentions\"] = row_attentions\n\n return result\n\n def forward_with_uncertainty(self, tokens, targets, zero_shot_fitness_predictions=None, sequence_embeddings=None, num_MC_dropout_samples=10, number_of_mutated_seqs_to_score=None):\n \"\"\"\n Performs MC dropout to compute predictions and the corresponding uncertainties.\n Assumes 1D predictions (eg., prediction of continuous output)\n \"\"\"\n self.eval() \n for m in self.modules(): #Move all dropout layers in train mode to support MC dropout. Keep everything else in eval mode.\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n with torch.no_grad():\n predictions_dict = defaultdict(list)\n for _ in range(num_MC_dropout_samples):\n target_predictions_sample = self.forward(tokens, targets, zero_shot_fitness_predictions=zero_shot_fitness_predictions, sequence_embeddings=sequence_embeddings)[\"target_predictions\"]\n for target_name in self.target_names:\n predictions_dict[target_name].append(target_predictions_sample[target_name])\n results_with_uncertainty={}\n for target_name in self.target_names:\n concatenated_target_pred = torch.cat([x.view(-1,1) for x in predictions_dict[target_name]],dim=-1)\n results_with_uncertainty[target_name] = {}\n results_with_uncertainty[target_name]['predictions_avg'] = concatenated_target_pred.mean(dim=-1)\n results_with_uncertainty[target_name]['uncertainty'] = concatenated_target_pred.std(dim=-1)\n return results_with_uncertainty\n \n @property\n def num_layers(self):\n return self.args.num_protein_npt_layers\n \n def max_tokens_per_msa_(self, value: int) -> None:\n \"\"\"\n Batching attention computations when gradients are disabled as per MSA_Transformer\n Set this value to infinity to disable this behavior.\n \"\"\"\n for module in self.modules():\n if isinstance(module, (RowSelfAttention, ColumnSelfAttention)):\n module.max_tokens_per_msa = value\n\n def protein_npt_loss(self, token_predictions_logits, token_labels, target_predictions, target_labels, MLM_reconstruction_loss_weight, label_smoothing=0.0):\n target_prediction_loss_weight = 1.0 - MLM_reconstruction_loss_weight\n total_loss = 0.0\n if (token_labels is not None) and (MLM_reconstruction_loss_weight > 0.0):\n if self.args.aa_embeddings == \"MSA_Transformer\" and self.args.sequence_embeddings_location is None: token_labels = token_labels[:,0,:] #Only keep the token labels for seq to score. Drops the token labels for MSA sequences\n masked_lm_loss = CrossEntropyLoss(reduction=\"mean\", label_smoothing=label_smoothing)(token_predictions_logits.reshape(-1, self.alphabet_size), token_labels.reshape(-1))\n reconstruction_loss = masked_lm_loss\n total_loss += MLM_reconstruction_loss_weight * reconstruction_loss\n else:\n reconstruction_loss = torch.tensor(0.0)\n target_prediction_loss = {}\n for target_name in self.target_names:\n if self.args.target_config[target_name][\"in_NPT_loss\"]:\n if self.args.target_config[target_name][\"type\"]==\"continuous\":\n loss_masked_targets = ~target_labels[target_name].eq(-100) #Masked items are the ones for which the label was not set to -100\n if loss_masked_targets.sum()==0 or torch.isnan(target_labels[target_name][loss_masked_targets]).sum() > 0: #First condition true if we dont mask anything (eg., all target missing at eval). Second condition true if we force-mask one value at train time (to satisfy min_num_labels_masked in mast_target()) and corresponding target value is missing\n tgt_loss = torch.tensor(0.0)\n else:\n tgt_loss = MSELoss(reduction=\"mean\")(target_predictions[target_name][loss_masked_targets], target_labels[target_name][loss_masked_targets]) #we do not average the loss per batch, so that it's easier to do 1 full average across all batches\n if torch.isnan(tgt_loss).sum() > 0:\n print(\"Detected nan loss\")\n print(target_predictions[target_name])\n else:\n tgt_loss = CrossEntropyLoss(reduction=\"mean\", label_smoothing=label_smoothing)(target_predictions[target_name].view(-1, self.args.target_config[target_name][\"dim\"]), target_labels[target_name].view(-1)) # Note: we dont add one to the # of categories in the CE loss here (we dont predict <mask>)\n target_prediction_loss[target_name] = tgt_loss\n \n total_loss += target_prediction_loss_weight * target_prediction_loss[target_name]\n return total_loss, reconstruction_loss, target_prediction_loss\n\n def create_optimizer(self):\n \"\"\"\n Setup the optimizer.\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through `optimizers`, or subclass and override this method in a subclass.\n Adapted from Huggingface Transformers library.\n \"\"\"\n if self.optimizer is None:\n all_parameters = utils.model_utils.get_parameter_names(self, [nn.LayerNorm])\n decay_parameters = [name for name in all_parameters if (\"bias\" not in name and \"pseudo_likelihood_weight\" not in name and 'zero_shot_fitness_prediction_weight' not in name)]\n psl_decay_parameters = [name for name in all_parameters if (\"bias\" not in name and (\"pseudo_likelihood_weight\" in name or \"zero_shot_fitness_prediction_weight\" in name))]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.named_parameters() if n in decay_parameters],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.named_parameters() if n in psl_decay_parameters],\n \"weight_decay\": 1e-8, #Small decay on pseudo-likelihood as in Hsu et al.\n },\n {\n \"params\": [p for n, p in self.named_parameters() if (n not in decay_parameters and n not in psl_decay_parameters)],\n \"weight_decay\": 0.0,\n },\n ] \n optimizer_kwargs = {\n \"betas\": (self.args.adam_beta1, self.args.adam_beta2),\n \"eps\": self.args.adam_epsilon,\n \"lr\": self.args.max_learning_rate\n }\n optimizer = AdamW(optimizer_grouped_parameters, **optimizer_kwargs)\n return optimizer" }, { "identifier": "AugmentedPropertyPredictor", "path": "baselines/model.py", "snippet": "class AugmentedPropertyPredictor(nn.Module):\n def __init__(self, args, alphabet):\n super().__init__()\n self.args = args\n self.alphabet = alphabet\n self.alphabet_size = len(alphabet)\n print(\"Alphabet: {}\".format(alphabet))\n print(\"Alphabet size: {}\".format(self.alphabet_size))\n self.padding_idx = alphabet.padding_idx\n self.mask_idx = alphabet.mask_idx\n self.cls_idx = alphabet.cls_idx\n self.eos_idx = alphabet.eos_idx\n self.prepend_bos = alphabet.prepend_bos\n self.append_eos = alphabet.append_eos\n self.target_names = self.args.target_config.keys() \n self.MSA_sample_sequences = None \n self.device = None\n self.model_type = args.model_type \n if self.args.aa_embeddings in [\"MSA_Transformer\",\"ESM1v\"]:\n model, _ = utils.esm.pretrained.load_model_and_alphabet(args.embedding_model_location)\n self.aa_embedding = model\n if self.args.aa_embeddings == \"MSA_Transformer\": self.args.seq_len = self.args.MSA_seq_len #If MSA does not cover full sequence length, we adjust seq_len param to be MSA_len (sequences truncated as needed in preprocessing)\n elif self.args.aa_embeddings == \"Linear_embedding\":\n self.aa_embedding = nn.Sequential(\n nn.Embedding(\n self.alphabet_size, self.args.embed_dim, padding_idx=self.padding_idx\n ),\n nn.ReLU()\n )\n elif self.args.aa_embeddings == \"One_hot_encoding\":\n self.args.target_prediction_head == \"One_hot_encoding\"\n elif self.args.aa_embeddings == \"Tranception\":\n self.aa_embedding_dim = 1280\n config = json.load(open(args.embedding_model_location+os.sep+'config.json'))\n config = utils.tranception.config.TranceptionConfig(**config)\n config.tokenizer = get_tranception_tokenizer()\n config.inference_time_retrieval_type = None\n config.retrieval_aggregation_mode = None\n self.aa_embedding = utils.tranception.model_pytorch.TranceptionLMHeadModel.from_pretrained(pretrained_model_name_or_path=args.embedding_model_location,config=config)\n self.config = config\n else:\n print(\"Error: Specified AA embedding invalid\")\n sys.exit(0)\n\n if self.args.aa_embeddings != \"One_hot_encoding\": \n self.emb_layer_norm_after = ESM1bLayerNorm(self.args.embed_dim)\n self.dropout_module = nn.Dropout(self.args.dropout)\n\n if self.args.target_prediction_head == \"AA_embeddings_mean_pooled\":\n target_pred_input_dim = self.args.embed_dim\n elif self.args.target_prediction_head == \"One_hot_encoding\":\n target_pred_input_dim = (self.args.seq_len + 1) * self.alphabet_size if args.target_prediction_model!=\"CNN\" else self.alphabet_size #Add one for the BOS token\n else:\n print(self.args.target_prediction_head)\n print(\"Error: Specified embedding aggregation invalid\")\n sys.exit(0)\n \n if args.target_prediction_model==\"MLP\":\n self.layer_pre_head = nn.Sequential(\n nn.Linear(target_pred_input_dim, target_pred_input_dim),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n )\n elif args.target_prediction_model==\"ConvBERT\":\n configuration = ConvBertConfig(\n hidden_size = self.args.embed_dim,\n num_attention_heads = self.args.attention_heads if self.args.attention_heads is not None else 4,\n conv_kernel_size = self.args.conv_kernel_size,\n hidden_act = \"gelu\",\n hidden_dropout_prob = self.args.dropout,\n attention_probs_dropout_prob = self.args.dropout\n )\n self.layer_pre_head = ConvBertLayer(configuration)\n elif args.target_prediction_model==\"CNN\":\n self.layer_pre_head = nn.Sequential(\n nn.Conv1d(in_channels=target_pred_input_dim, out_channels=target_pred_input_dim, kernel_size = self.args.conv_kernel_size, padding='same'),\n nn.Dropout(self.args.dropout),\n nn.ReLU()\n )\n target_pred_input_dim = target_pred_input_dim if self.args.target_prediction_head != \"One_hot_encoding\" else target_pred_input_dim * (self.args.seq_len + 1)\n elif args.target_prediction_model==\"light_attention\":\n # Adapted from Stark et al (https://github.com/HannesStark/protein-localization)\n self.feature_convolution = nn.Conv1d(self.args.embed_dim, self.args.embed_dim, self.args.conv_kernel_size, stride=1, padding='same')\n self.attention_convolution = nn.Conv1d(self.args.embed_dim, self.args.embed_dim, self.args.conv_kernel_size, stride=1, padding='same')\n self.softmax = nn.Softmax(dim=-1)\n self.dropout = nn.Dropout(self.args.dropout)\n self.linear = nn.Sequential(\n nn.Linear(2 * self.args.embed_dim, 32),\n nn.Dropout(self.args.dropout),\n nn.ReLU(),\n nn.BatchNorm1d(32)\n )\n target_pred_input_dim = 32\n elif args.target_prediction_model==\"linear\":\n pass\n else:\n print(\"Error: Specified layer_pre_head invalid\")\n sys.exit(0)\n\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n self.zero_shot_fitness_prediction_weight = nn.ModuleDict(\n { \n target_name: nn.Linear(1, self.args.target_config[target_name][\"dim\"], bias=False)\n for target_name in self.target_names\n }\n )\n for target_name in self.target_names:\n torch.nn.init.constant_(self.zero_shot_fitness_prediction_weight[target_name].weight,1.0)\n\n self.target_pred_head = nn.ModuleDict(\n { \n target_name: nn.Linear(target_pred_input_dim, self.args.target_config[target_name][\"dim\"])\n for target_name in self.target_names #If multiple targets, we learn a separate linear head for each separately\n }\n )\n \n def set_device(self):\n if self.device is None:\n self.device = next(self.parameters()).device\n print(\"Model device: {}\".format(self.device))\n\n def forward(self, tokens, zero_shot_fitness_predictions=None, sequence_embeddings=None, repr_layers=[]):\n if self.args.aa_embeddings == \"MSA_Transformer\" and self.args.sequence_embeddings_location is None:\n assert tokens.ndim == 3, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n num_MSAs_in_batch, num_sequences_in_alignments, seqlen = tokens.size()\n batch_size = num_MSAs_in_batch\n else:\n assert tokens.ndim == 2, \"Finding dimension of tokens to be: {}\".format(tokens.ndim)\n batch_size, seqlen = tokens.size()\n \n if sequence_embeddings is not None:\n x = sequence_embeddings.to(self.device)\n else:\n if self.args.aa_embeddings == \"MSA_Transformer\":\n output = self.aa_embedding(tokens, repr_layers=[12])\n x = output[\"representations\"][12][:] # B, N, L, D\n x = x[:,0,:,:] #In each MSA batch the first sequence is what we care about. The other MSA sequences were just to compute embeddings and logits\n elif self.args.aa_embeddings == \"ESM1v\":\n last_layer_index = 33\n output = self.aa_embedding(tokens, repr_layers=[last_layer_index])\n x = output[\"representations\"][last_layer_index][:] # N, L, D\n elif self.args.aa_embeddings == \"Tranception\":\n processed_batch = {'input_ids': tokens, 'labels': tokens}\n output = self.aa_embedding(**processed_batch, return_dict=True, output_hidden_states=True)\n x = output.hidden_states[0]\n elif self.args.aa_embeddings ==\"Linear_embedding\":\n x = self.aa_embedding(tokens)\n elif self.args.aa_embeddings == \"One_hot_encoding\":\n x = nn.functional.one_hot(tokens, num_classes=self.alphabet_size).view(batch_size,-1).float()\n if self.args.target_prediction_model == \"CNN\": x = x.view(batch_size,seqlen,self.alphabet_size)\n\n if self.args.aa_embeddings != \"One_hot_encoding\":\n x = self.emb_layer_norm_after(x)\n x = self.dropout_module(x)\n \n repr_layers = set(repr_layers)\n hidden_representations = {}\n if 0 in repr_layers:\n hidden_representations[0] = x\n\n if self.args.target_prediction_model == \"CNN\": \n assert len(x.size())==3, \"Size error input\"\n N, L, D = x.size()\n x = x.permute(0,2,1) #N, D, L\n x = self.layer_pre_head(x)\n x = x.permute(0,2,1)\n elif self.args.target_prediction_model == \"ConvBERT\":\n x = self.layer_pre_head(x)[0]\n elif self.args.target_prediction_model==\"light_attention\":\n x = x.permute(0,2,1) #N, D, L\n o = self.feature_convolution(x) \n o = self.dropout(o)\n attention = self.attention_convolution(x)\n o1 = torch.sum(o * self.softmax(attention), dim=-1)\n o2, _ = torch.max(o, dim=-1)\n o = torch.cat([o1, o2], dim=-1)\n x = self.linear(o)\n \n if self.args.target_prediction_head == \"AA_embeddings_mean_pooled\": x = x.mean(dim=-2)\n \n if self.args.target_prediction_model == \"MLP\": x = self.layer_pre_head(x)\n \n target_predictions = {}\n for target_name in self.target_names:\n target_predictions[target_name] = self.target_pred_head[target_name](x).view(-1)\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n target_predictions[target_name] += self.zero_shot_fitness_prediction_weight[target_name](zero_shot_fitness_predictions).squeeze()\n\n result = {\"target_predictions\": target_predictions, \"representations\": hidden_representations}\n \n return result\n \n def forward_with_uncertainty(self, tokens, zero_shot_fitness_predictions=None, sequence_embeddings=None, num_MC_dropout_samples=10):\n \"\"\"\n Performs MC dropout to compute predictions and the corresponding uncertainties.\n Assumes 1D predictions (eg., prediction of continuous output).\n \"\"\"\n self.eval() \n for m in self.modules(): #Move all dropout layers in train mode to support MC dropout. Keep everything else in eval mode.\n if m.__class__.__name__.startswith('Dropout'):\n m.train()\n with torch.no_grad(): \n predictions_dict = defaultdict(list)\n for _ in range(num_MC_dropout_samples):\n target_predictions_sample = self.forward(tokens, zero_shot_fitness_predictions=zero_shot_fitness_predictions, sequence_embeddings=sequence_embeddings)[\"target_predictions\"]\n for target_name in self.target_names:\n predictions_dict[target_name].append(target_predictions_sample[target_name])\n results_with_uncertainty={}\n for target_name in self.target_names:\n concatenated_target_pred = torch.cat([x.view(-1,1) for x in predictions_dict[target_name]],dim=-1)\n results_with_uncertainty[target_name] = {}\n results_with_uncertainty[target_name]['predictions_avg'] = concatenated_target_pred.mean(dim=-1)\n results_with_uncertainty[target_name]['uncertainty'] = concatenated_target_pred.std(dim=-1)\n return results_with_uncertainty\n\n @property\n def num_layers(self):\n return self.args.num_protein_npt_layers\n \n def max_tokens_per_msa_(self, value: int) -> None:\n \"\"\"\n Batching attention computations when gradients are disabled as per MSA_Transformer\n Set this value to infinity to disable this behavior.\n \"\"\"\n for module in self.modules():\n if isinstance(module, (RowSelfAttention, ColumnSelfAttention)):\n module.max_tokens_per_msa = value\n\n def prediction_loss(self, target_predictions, target_labels, label_smoothing=0.1):\n total_target_prediction_loss = 0.0\n target_prediction_loss_dict = {}\n for target_name in self.target_names:\n non_missing_target_indicator = ~torch.isnan(target_labels[target_name])\n if self.args.target_config[target_name][\"type\"]==\"continuous\":\n tgt_loss = MSELoss(reduction=\"sum\")(target_predictions[target_name][non_missing_target_indicator], target_labels[target_name][non_missing_target_indicator])\n else:\n tgt_loss = CrossEntropyLoss(reduction=\"none\",label_smoothing=label_smoothing)(target_predictions[target_name].view(-1, self.args.target_config[target_name][\"dim\"]), target_labels[target_name].view(-1))\n target_prediction_loss_dict[target_name] = tgt_loss\n total_target_prediction_loss += tgt_loss\n return total_target_prediction_loss, target_prediction_loss_dict\n\n def create_optimizer(self):\n \"\"\"\n Setup the optimizer.\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through `optimizers`, or subclass and override this method in a subclass.\n Adapted from Huggingface Transformers library.\n \"\"\"\n all_parameters = utils.model_utils.get_parameter_names(self, [nn.LayerNorm])\n decay_parameters = [name for name in all_parameters if (\"bias\" not in name and \"pseudo_likelihood_weight\" not in name and 'zero_shot_fitness_prediction_weight' not in name)]\n psl_decay_parameters = [name for name in all_parameters if (\"bias\" not in name and (\"pseudo_likelihood_weight\" in name or \"zero_shot_fitness_prediction_weight\" in name))]\n optimizer_grouped_parameters = [\n {\n \"params\": [p for n, p in self.named_parameters() if n in decay_parameters],\n \"weight_decay\": self.args.weight_decay,\n },\n {\n \"params\": [p for n, p in self.named_parameters() if n in psl_decay_parameters],\n \"weight_decay\": 1e-8, #Small decay on pseudo-likelihood as in Hsu et al.\n },\n {\n \"params\": [p for n, p in self.named_parameters() if (n not in decay_parameters and n not in psl_decay_parameters)],\n \"weight_decay\": 0.0,\n },\n ] \n optimizer_kwargs = {\n \"betas\": (self.args.adam_beta1, self.args.adam_beta2),\n \"eps\": self.args.adam_epsilon,\n \"lr\": self.args.max_learning_rate\n }\n optimizer = AdamW(optimizer_grouped_parameters, **optimizer_kwargs)\n return optimizer" }, { "identifier": "Alphabet", "path": "utils/esm/data.py", "snippet": "class Alphabet(object):\n def __init__(\n self,\n standard_toks: Sequence[str],\n prepend_toks: Sequence[str] = (\"<null_0>\", \"<pad>\", \"<eos>\", \"<unk>\"),\n append_toks: Sequence[str] = (\"<cls>\", \"<mask>\", \"<sep>\"),\n prepend_bos: bool = True,\n append_eos: bool = False,\n use_msa: bool = False,\n ):\n #ESM Alphabet: {'<cls>': 0, '<pad>': 1, '<eos>': 2, '<unk>': 3, 'L': 4, 'A': 5, 'G': 6, 'V': 7, 'S': 8, 'E': 9, 'R': 10, 'T': 11, 'I': 12, 'D': 13, 'P': 14, 'K': 15, 'Q': 16, 'N': 17, 'F': 18, 'Y': 19, 'M': 20, 'H': 21, 'W': 22, 'C': 23, 'X': 24, 'B': 25, 'U': 26, 'Z': 27, 'O': 28, '.': 29, '-': 30, '<null_1>': 31, '<mask>': 32}\n self.standard_toks = list(standard_toks)\n self.prepend_toks = list(prepend_toks)\n self.append_toks = list(append_toks)\n self.prepend_bos = prepend_bos\n self.append_eos = append_eos\n self.use_msa = use_msa\n\n self.all_toks = list(self.prepend_toks)\n self.all_toks.extend(self.standard_toks)\n for i in range((8 - (len(self.all_toks) % 8)) % 8):\n self.all_toks.append(f\"<null_{i + 1}>\")\n self.all_toks.extend(self.append_toks)\n\n self.tok_to_idx = {tok: i for i, tok in enumerate(self.all_toks)}\n\n self.unk_idx = self.tok_to_idx[\"<unk>\"]\n self.padding_idx = self.get_idx(\"<pad>\")\n self.cls_idx = self.get_idx(\"<cls>\")\n self.mask_idx = self.get_idx(\"<mask>\")\n self.eos_idx = self.get_idx(\"<eos>\")\n self.all_special_tokens = ['<eos>', '<unk>', '<pad>', '<cls>', '<mask>']\n self.unique_no_split_tokens = self.all_toks\n\n def __len__(self):\n return len(self.all_toks)\n\n def get_idx(self, tok):\n return self.tok_to_idx.get(tok, self.unk_idx)\n\n def get_tok(self, ind):\n return self.all_toks[ind]\n\n def to_dict(self):\n return self.tok_to_idx.copy()\n\n def get_batch_converter(self, truncation_seq_length: int = None):\n if self.use_msa:\n return MSABatchConverter(self, truncation_seq_length)\n else:\n return BatchConverter(self, truncation_seq_length)\n\n @classmethod\n def from_architecture(cls, name: str) -> \"Alphabet\":\n if name in (\"ESM-1\", \"protein_bert_base\"):\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks: Tuple[str, ...] = (\"<null_0>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks: Tuple[str, ...] = (\"<cls>\", \"<mask>\", \"<sep>\")\n prepend_bos = True\n append_eos = False\n use_msa = False\n elif name in (\"ESM-1b\", \"roberta_large\"):\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks = (\"<cls>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\",)\n prepend_bos = True\n append_eos = True\n use_msa = False\n elif name in (\"MSA Transformer\", \"msa_transformer\"):\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks = (\"<cls>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\",)\n prepend_bos = True\n append_eos = False\n use_msa = True\n elif \"invariant_gvp\" in name.lower():\n standard_toks = proteinseq_toks[\"toks\"]\n prepend_toks = (\"<null_0>\", \"<pad>\", \"<eos>\", \"<unk>\")\n append_toks = (\"<mask>\", \"<cath>\", \"<af2>\")\n prepend_bos = True\n append_eos = False\n use_msa = False\n else:\n raise ValueError(\"Unknown architecture selected\")\n return cls(standard_toks, prepend_toks, append_toks, prepend_bos, append_eos, use_msa)\n\n def _tokenize(self, text) -> str:\n return text.split()\n\n def tokenize(self, text, **kwargs) -> List[str]:\n \"\"\"\n Inspired by https://github.com/huggingface/transformers/blob/master/src/transformers/tokenization_utils.py\n Converts a string in a sequence of tokens, using the tokenizer.\n\n Args:\n text (:obj:`str`):\n The sequence to be encoded.\n\n Returns:\n :obj:`List[str]`: The list of tokens.\n \"\"\"\n\n def split_on_token(tok, text):\n result = []\n split_text = text.split(tok)\n for i, sub_text in enumerate(split_text):\n # AddedToken can control whitespace stripping around them.\n # We use them for GPT2 and Roberta to have different behavior depending on the special token\n # Cf. https://github.com/huggingface/transformers/pull/2778\n # and https://github.com/huggingface/transformers/issues/3788\n # We strip left and right by default\n if i < len(split_text) - 1:\n sub_text = sub_text.rstrip()\n if i > 0:\n sub_text = sub_text.lstrip()\n\n if i == 0 and not sub_text:\n result.append(tok)\n elif i == len(split_text) - 1:\n if sub_text:\n result.append(sub_text)\n else:\n pass\n else:\n if sub_text:\n result.append(sub_text)\n result.append(tok)\n return result\n\n def split_on_tokens(tok_list, text):\n if not text.strip():\n return []\n\n tokenized_text = []\n text_list = [text]\n for tok in tok_list:\n tokenized_text = []\n for sub_text in text_list:\n if sub_text not in self.unique_no_split_tokens:\n tokenized_text.extend(split_on_token(tok, sub_text))\n else:\n tokenized_text.append(sub_text)\n text_list = tokenized_text\n\n return list(\n itertools.chain.from_iterable(\n (\n self._tokenize(token)\n if token not in self.unique_no_split_tokens\n else [token]\n for token in tokenized_text\n )\n )\n )\n\n no_split_token = self.unique_no_split_tokens\n tokenized_text = split_on_tokens(no_split_token, text)\n return tokenized_text\n\n def encode(self, text):\n return [self.tok_to_idx[tok] for tok in self.tokenize(text)]" }, { "identifier": "get_tranception_tokenizer", "path": "utils/tranception/model_pytorch.py", "snippet": "def get_tranception_tokenizer():\n #Tranception Alphabet: \"vocab\":{\"[UNK]\":0,\"[CLS]\":1,\"[SEP]\":2,\"[PAD]\":3,\"[MASK]\":4,\"A\":5,\"C\":6,\"D\":7,\"E\":8,\"F\":9,\"G\":10,\"H\":11,\"I\":12,\"K\":13,\"L\":14,\"M\":15,\"N\":16,\"P\":17,\"Q\":18,\"R\":19,\"S\":20,\"T\":21,\"V\":22,\"W\":23,\"Y\":24}\n dir_path = os.path.dirname(os.path.abspath(__file__))\n tokenizer = PreTrainedTokenizerFast(tokenizer_file=dir_path + os.sep + \"utils/tokenizers/Basic_tokenizer\", unk_token=\"[UNK]\", sep_token=\"[SEP]\", pad_token=\"[PAD]\", cls_token=\"[CLS]\",mask_token=\"[MASK]\")\n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n tokenizer.tok_to_idx = tokenizer.vocab\n tokenizer.padding_idx = tokenizer.tok_to_idx[\"[PAD]\"]\n tokenizer.mask_idx = tokenizer.tok_to_idx[\"[MASK]\"]\n tokenizer.cls_idx = tokenizer.tok_to_idx[\"[CLS]\"]\n tokenizer.eos_idx = tokenizer.tok_to_idx[\"[SEP]\"]\n tokenizer.prepend_bos = True\n tokenizer.append_eos = True\n return tokenizer" }, { "identifier": "get_train_val_test_data", "path": "utils/data_utils.py", "snippet": "def get_train_val_test_data(args, assay_file_names):\n target_names = args.target_config.keys() \n assay_data={}\n merge = None\n main_target_name = None\n main_target_name_count = 0\n for target in target_names:\n if args.target_config[target][\"main_target\"]: \n main_target_name=target\n main_target_name_count+=1\n assert main_target_name is not None, \"No main target referenced. Please update config to select a unique main target.\"\n assert main_target_name_count <= 1, \"Several main targets referenced. Please update config to select a unique main target.\"\n \n assay_data[main_target_name] = pd.read_csv(args.target_config[main_target_name][\"location\"] + os.sep + assay_file_names[main_target_name])[['mutant','mutated_sequence',args.target_config[main_target_name][\"var_name\"],args.fold_variable_name]] \n assay_data[main_target_name].columns = ['mutant','mutated_sequence', main_target_name, args.fold_variable_name]\n merge = assay_data[main_target_name]\n \n for target_name in target_names:\n if target_name!=main_target_name:\n print(target_name)\n print(args.target_config)\n print(assay_file_names)\n assay_data[target_name] = pd.read_csv(args.target_config[target_name][\"location\"] + os.sep + assay_file_names[target_name])[['mutant',args.target_config[target_name][\"var_name\"]]] \n assay_data[target_name].columns = ['mutant',target_name]\n merge = pd.merge(merge, assay_data[target_name], how='left', on='mutant')\n \n if args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n zero_shot_fitness_predictions = pd.read_csv(args.zero_shot_fitness_predictions_location + os.sep + assay_file_names[main_target_name])[['mutant',args.zero_shot_fitness_predictions_var_name]]\n zero_shot_fitness_predictions.columns = ['mutant','zero_shot_fitness_predictions']\n zero_shot_fitness_predictions['zero_shot_fitness_predictions'] = standardize(zero_shot_fitness_predictions['zero_shot_fitness_predictions'])\n merge = pd.merge(merge,zero_shot_fitness_predictions,how='inner',on='mutant')\n\n train_val_test_splits = split_data_based_on_test_fold_index(\n dataframe = merge, \n fold_variable_name = args.fold_variable_name,\n test_fold_index = args.test_fold_index,\n use_validation_set = args.use_validation_set\n )\n splits_dict = {}\n for split_name, split in zip(['train','val','test'], train_val_test_splits):\n if split_name=='val' and not args.use_validation_set: continue\n splits_dict[split_name] = {}\n splits_dict[split_name]['mutant_mutated_seq_pairs'] = list(zip(list(split['mutant']),list(split['mutated_sequence'])))\n raw_targets = {target_name: split[target_name] for target_name in target_names}\n if args.augmentation==\"zero_shot_fitness_predictions_covariate\": raw_targets['zero_shot_fitness_predictions'] = split['zero_shot_fitness_predictions']\n if split_name==\"train\":\n raw_targets, target_processing = preprocess_training_targets(raw_targets, args.target_config)\n else:\n raw_targets = preprocess_test_targets(raw_targets, args.target_config, target_processing)\n for target_name in target_names: \n splits_dict[split_name][target_name] = raw_targets[target_name]\n if args.augmentation==\"zero_shot_fitness_predictions_covariate\": splits_dict[split_name]['zero_shot_fitness_predictions'] = raw_targets['zero_shot_fitness_predictions']\n # load dict into dataset objects\n train_data = Dataset.from_dict(splits_dict['train'])\n val_data = Dataset.from_dict(splits_dict['val']) if args.use_validation_set else None\n test_data = Dataset.from_dict(splits_dict['test'])\n return train_data, val_data, test_data, target_processing" }, { "identifier": "standardize", "path": "utils/data_utils.py", "snippet": "def standardize(x):\n return (x - x.mean()) / x.std()" }, { "identifier": "pnpt_count_non_nan", "path": "utils/data_utils.py", "snippet": "def pnpt_count_non_nan(x):\n missing_mask = np.isnan(x) | np.equal(x,-100)\n return np.count_nonzero(~missing_mask)" }, { "identifier": "pnpt_spearmanr", "path": "utils/data_utils.py", "snippet": "def pnpt_spearmanr(prediction,target):\n mask_missing_values = np.isnan(target) | np.equal(target, -100) #In PNPT missing values are never masked so corresponding labels are always set to -100\n return spearmanr(prediction[~mask_missing_values], target[~mask_missing_values])[0] #first value is spearman rho, second is the corresponding p-value " }, { "identifier": "process_MSA", "path": "utils/msa_utils.py", "snippet": "def process_MSA(args, MSA_filename, MSA_weights_filename):\n filtered_MSA_filename = filter_msa(filename = args.MSA_data_folder + os.sep + MSA_filename, path_to_hhfilter = args.path_to_hhfilter)\n MSA_all_sequences, MSA_non_ref_sequences_weights = compute_sequence_weights(MSA_filename = filtered_MSA_filename, MSA_weights_filename = args.MSA_weight_data_folder + os.sep + MSA_weights_filename)\n return MSA_all_sequences, MSA_non_ref_sequences_weights" }, { "identifier": "Trainer", "path": "utils/model_utils.py", "snippet": "class Trainer():\n def __init__(self, \n model,\n args,\n train_data, \n val_data,\n MSA_sequences, \n MSA_weights,\n MSA_start_position,\n MSA_end_position,\n target_processing,\n distributed_training=False\n ):\n self.model = model\n self.args = args\n self.train_data = train_data\n self.val_data = val_data\n self.MSA_sequences = MSA_sequences\n self.MSA_weights = MSA_weights\n self.MSA_start_position = MSA_start_position\n self.MSA_end_position = MSA_end_position\n self.target_processing = target_processing\n self.distributed_training = distributed_training\n \n def train(self):\n \"\"\"\n Returns the last value of training_step (useful in case of early stopping for isntance)\n \"\"\"\n \n self.model.train()\n self.model.cuda()\n self.model.set_device()\n\n if self.distributed_training:\n self.model = torch.nn.parallel.DistributedDataParallel(self.model)\n train_sampler = torch.utils.data.distributed.DistributedSampler(self.train_data)\n else:\n train_sampler = None\n \n #To ensure reproducibility with seed setting\n def seed_worker(worker_id):\n worker_seed = torch.initial_seed() % 2**32\n np.random.seed(worker_seed)\n random.seed(worker_seed)\n g = torch.Generator()\n g.manual_seed(0)\n train_loader = torch.utils.data.DataLoader(\n dataset=self.train_data, \n batch_size=self.args.training_num_assay_sequences_per_batch_per_gpu, \n shuffle=(train_sampler is None),\n num_workers=self.args.num_data_loaders_workers, \n pin_memory=True, \n sampler=train_sampler,\n collate_fn=collate_fn_protein_npt,\n worker_init_fn=seed_worker,\n generator=g,\n )\n optimizer = self.model.create_optimizer()\n scheduler = learning_rate_scheduler(\n num_warmup_steps=self.args.num_warmup_steps, \n num_total_training_steps=self.args.num_total_training_steps, \n max_learning_rate=self.args.max_learning_rate, \n min_learning_rate=self.args.min_learning_rate\n )\n \n train_iterator = iter(train_loader)\n num_epochs = 0\n prior_log_time = time.time()\n total_train_time = 0\n log_train_total_loss = 0\n if self.model.model_type==\"ProteinNPT\":\n log_train_reconstruction_loss = 0\n log_train_num_masked_tokens = 0\n log_train_num_target_masked_tokens_dict = defaultdict(int)\n else:\n log_num_sequences_predicted = 0\n log_train_target_prediction_loss_dict = defaultdict(int)\n all_spearmans_eval_during_training = []\n max_average_spearman_across_targets = - math.inf\n if self.args.training_fp16: scaler = torch.cuda.amp.GradScaler()\n\n for training_step in tqdm.tqdm(range(1, self.args.num_total_training_steps+1)):\n optimizer.zero_grad(set_to_none=True)\n lr = scheduler(training_step)\n update_lr_optimizer(optimizer, lr)\n reconstruction_loss_coeff = get_reconstruction_loss_coefficient(training_step, num_total_training_steps=self.args.num_total_training_steps) if (self.model.model_type==\"ProteinNPT\" and not self.model.PNPT_no_reconstruction_error) else 0\n for gradient_accum_step in range(self.args.gradient_accumulation):\n try:\n batch = next(train_iterator)\n except:\n num_epochs +=1\n train_iterator = iter(train_loader)\n batch = next(train_iterator)\n \n if self.model.model_type==\"ProteinNPT\":\n processed_batch = proteinnpt.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n target_processing = self.target_processing,\n training_sequences = None,\n proba_target_mask = 0.15,\n proba_aa_mask = 0.15,\n eval_mode = False,\n device=self.model.device,\n indel_mode=self.args.indel_mode\n )\n else:\n processed_batch = baselines.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n device=self.model.device,\n eval_mode=False,\n indel_mode=self.args.indel_mode\n )\n\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n zero_shot_fitness_predictions = processed_batch['target_labels']['zero_shot_fitness_predictions'].view(-1,1)\n del processed_batch['target_labels']['zero_shot_fitness_predictions']\n else:\n zero_shot_fitness_predictions = None\n \n if self.args.training_fp16:\n with torch.cuda.amp.autocast():\n if self.model.model_type==\"ProteinNPT\":\n output = self.model(\n tokens=processed_batch['masked_tokens'],\n targets=processed_batch['masked_targets'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, reconstruction_loss, target_prediction_loss_dict = self.model.protein_npt_loss(\n token_predictions_logits=output['logits_protein_sequence'], \n token_labels=processed_batch['token_labels'], \n target_predictions=output['target_predictions'], \n target_labels=processed_batch['target_labels'], \n MLM_reconstruction_loss_weight=reconstruction_loss_coeff, \n label_smoothing=self.args.label_smoothing\n )\n else:\n output = self.model(\n tokens=processed_batch['input_tokens'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, target_prediction_loss_dict = self.model.prediction_loss(\n target_predictions=output[\"target_predictions\"], \n target_labels=processed_batch['target_labels'],\n label_smoothing=self.args.label_smoothing\n )\n scaler.scale(total_loss).backward()\n else:\n if self.model.model_type==\"ProteinNPT\":\n output = self.model(\n tokens=processed_batch['masked_tokens'],\n targets=processed_batch['masked_targets'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, reconstruction_loss, target_prediction_loss_dict = self.model.protein_npt_loss(\n token_predictions_logits=output['logits_protein_sequence'], \n token_labels=processed_batch['token_labels'], \n target_predictions=output['target_predictions'], \n target_labels=processed_batch['target_labels'], \n MLM_reconstruction_loss_weight=reconstruction_loss_coeff, \n label_smoothing=self.args.label_smoothing\n )\n if total_loss.item() > 10.0 and training_step >= 100:\n print(\"High training loss detected: {}\".format(total_loss.item()))\n else:\n output = self.model(\n tokens=processed_batch['input_tokens'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n total_loss, target_prediction_loss_dict = self.model.prediction_loss(\n target_predictions=output[\"target_predictions\"], \n target_labels=processed_batch['target_labels'],\n label_smoothing=self.args.label_smoothing\n )\n total_loss.backward()\n torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.grad_norm_clip)\n # Taking optimizer update out of the inner loop to support gradient accumulation\n if self.args.training_fp16:\n with torch.cuda.amp.autocast():\n scaler.step(optimizer)\n scaler.update()\n else:\n optimizer.step()\n\n log_train_total_loss += total_loss\n for target_name in self.model.target_names:\n log_train_target_prediction_loss_dict[target_name] += target_prediction_loss_dict[target_name]\n if self.model.model_type==\"ProteinNPT\": \n log_train_reconstruction_loss += reconstruction_loss\n log_train_num_masked_tokens += processed_batch['masked_tokens'].eq(self.model.alphabet.mask_idx).sum()\n for target_name in self.model.target_names:\n log_train_num_target_masked_tokens_dict[target_name] += processed_batch['masked_targets'][target_name][:,-1].eq(1.0).sum().item() # Masked targets are encoded by 1.0. Mask column is the very last one\n else:\n log_num_sequences_predicted += len(batch['mutant_mutated_seq_pairs'])\n \n if training_step % self.args.num_logging_training_steps == 0 and self.args.use_wandb:\n time_end_step = time.time()\n delta_time_since_last_log = time_end_step - prior_log_time\n total_train_time += delta_time_since_last_log\n prior_log_time = time_end_step\n train_logs = {\n \"training_step\": training_step, \n \"step_time\": delta_time_since_last_log / (self.args.num_logging_training_steps)\n }\n if self.model.model_type==\"ProteinNPT\": \n train_logs[\"train_total_loss_per_step\"]: log_train_total_loss / self.args.num_logging_training_steps\n train_logs[\"train_reconstruction_loss_per_masked_token\"] = log_train_reconstruction_loss / log_train_num_masked_tokens\n for target_name in self.model.target_names:\n train_logs[\"train_prediction_\"+str(target_name)+\"_loss_per_masked_token\"] = log_train_target_prediction_loss_dict[target_name] / log_train_num_target_masked_tokens_dict[target_name]\n else:\n train_logs[\"train_total_loss_per_seq\"]: log_train_total_loss / log_num_sequences_predicted\n for target_name in self.model.target_names:\n train_logs[\"train_prediction_\"+str(target_name)+\"_loss_per_seq\"] = log_train_target_prediction_loss_dict[target_name] / log_num_sequences_predicted\n wandb.log(train_logs)\n log_train_total_loss = 0\n log_train_target_prediction_loss_dict = defaultdict(int)\n if self.model.model_type==\"ProteinNPT\":\n log_train_reconstruction_loss = 0\n log_train_num_masked_tokens = 0\n log_train_num_target_masked_tokens_dict = defaultdict(int)\n else:\n log_num_sequences_predicted = 0 \n \n if self.args.save_model_checkpoint and (training_step % self.args.num_saving_training_steps) == 0:\n if not os.path.exists(self.args.model_location): os.mkdir(self.args.model_location)\n if not os.path.exists(self.args.model_location + os.sep + 'checkpoint-'+str(training_step)): os.mkdir(self.args.model_location + os.sep + 'checkpoint-'+str(training_step))\n torch.save({\n 'training_step': training_step,\n 'args': self.args,\n 'state_dict': self.model.state_dict(),\n 'optimizer' : optimizer.state_dict()\n }, \n self.args.model_location + os.sep + 'checkpoint-'+str(training_step) + os.sep + 'checkpoint.t7'\n )\n \n if training_step % self.args.num_eval_steps == 0 and self.args.use_validation_set:\n if self.model.model_type==\"ProteinNPT\":\n eval_results = self.eval(\n test_data=self.val_data,\n train_data=self.train_data,\n reconstruction_loss_weight=0.0,\n output_all_predictions=True\n )\n else:\n eval_results = self.eval(\n test_data=self.val_data, \n output_all_predictions=True\n )\n eval_logs = {\"Training step\": training_step} \n if self.model.model_type==\"ProteinNPT\":\n normalization = 0\n for target_name in self.model.target_names: normalization += eval_results['eval_num_masked_targets'][target_name]\n else:\n normalization = eval_results['eval_num_predicted_targets']\n eval_logs['Eval total loss per seq.']: eval_results['eval_total_loss'] / normalization\n average_spearman_across_targets = 0 #If early stopping based on validation spearman and multiple targets, we check that avg spearman is not decreasing for a certain # of times in a row\n for target_name in self.model.target_names:\n if self.model.model_type==\"ProteinNPT\": normalization = eval_results['eval_num_masked_targets'][target_name] #Update for PNPT (keeep the same normalization constant otherwise)\n eval_logs['Eval loss '+str(target_name)+' per seq.'] = eval_results['eval_target_prediction_loss_dict'][target_name] / normalization\n eval_logs['Eval spearman '+target_name] = spearmanr(eval_results['output_scores']['predictions_'+target_name], eval_results['output_scores']['labels_'+target_name])[0]\n average_spearman_across_targets += eval_logs['Eval spearman '+target_name]\n average_spearman_across_targets /= len(self.model.target_names)\n print(\" | \".join([key + \": \"+str(round(eval_logs[key],5)) for key in eval_logs.keys()]))\n if self.args.use_wandb: wandb.log(eval_logs)\n # Early stopping\n all_spearmans_eval_during_training.append(average_spearman_across_targets)\n if average_spearman_across_targets > max_average_spearman_across_targets: max_average_spearman_across_targets = average_spearman_across_targets\n if (training_step >= 1000) and (self.args.early_stopping_patience is not None) and (np.array(all_spearmans_eval_during_training)[-self.args.early_stopping_patience:].max() < max_average_spearman_across_targets):\n print(\"Early stopping. Training step: {}. Total eval loss: {}. Avg spearman: {}\".format(training_step, eval_results['eval_total_loss'], average_spearman_across_targets))\n break\n self.model.train() #Move back the model to train mode after eval loop\n trainer_final_status = {\n 'total_training_steps': training_step,\n 'total_train_time': total_train_time,\n 'total_training_epochs': num_epochs\n }\n return trainer_final_status\n\n def eval(self, test_data, output_all_predictions=False, need_head_weights=False, train_data = None, reconstruction_loss_weight=0.5, selected_indices_seed=0):\n \"\"\"\n total_eval_target_prediction_loss is the sum of all target prediction losses across all targets\n total_eval_target_prediction_loss contains the breakdown by target\n num_predicted_targets has the number of predicted items\n output_scores is a dict with sequences, predictions and labels\n \"\"\"\n self.model.eval()\n self.model.cuda()\n self.model.set_device()\n with torch.no_grad():\n eval_loader = torch.utils.data.DataLoader(\n dataset=test_data, \n batch_size=self.args.eval_num_sequences_to_score_per_batch_per_gpu, \n shuffle=False,\n num_workers=self.args.num_data_loaders_workers, \n pin_memory=True,\n collate_fn=collate_fn_protein_npt\n )\n eval_iterator = iter(eval_loader)\n \n eval_total_loss = 0\n if self.model.model_type==\"ProteinNPT\": \n eval_reconstruction_loss = 0\n eval_num_masked_tokens = 0\n eval_num_masked_targets = defaultdict(int)\n else:\n num_predicted_targets = 0\n eval_target_prediction_loss_dict = defaultdict(int)\n output_scores = defaultdict(list) if output_all_predictions else None\n\n if need_head_weights:\n col_attentions=[]\n row_attentions=[]\n\n for batch in tqdm.tqdm(eval_iterator):\n if output_all_predictions: \n output_scores['mutated_sequence'] += list(zip(*batch['mutant_mutated_seq_pairs']))[1]\n output_scores['mutant'] += list(zip(*batch['mutant_mutated_seq_pairs']))[0]\n if self.model.model_type==\"ProteinNPT\":\n processed_batch = proteinnpt.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n target_processing = self.target_processing,\n training_sequences = train_data,\n proba_target_mask = 1.0, \n proba_aa_mask = 0.0,\n eval_mode = True,\n device=self.model.device,\n selected_indices_seed=selected_indices_seed,\n indel_mode=self.args.indel_mode\n )\n else:\n processed_batch = baselines.data_processing.process_batch(\n batch = batch,\n model = self.model,\n alphabet = self.model.alphabet, \n args = self.args, \n MSA_sequences = self.MSA_sequences, \n MSA_weights = self.MSA_weights,\n MSA_start_position = self.MSA_start_position, \n MSA_end_position = self.MSA_end_position,\n device=self.model.device,\n eval_mode=True,\n indel_mode=self.args.indel_mode\n )\n if self.args.augmentation==\"zero_shot_fitness_predictions_covariate\":\n zero_shot_fitness_predictions = processed_batch['target_labels']['zero_shot_fitness_predictions'].view(-1,1)\n del processed_batch['target_labels']['zero_shot_fitness_predictions']\n else:\n zero_shot_fitness_predictions = None\n \n if self.model.model_type==\"ProteinNPT\":\n output = self.model(\n tokens=processed_batch['masked_tokens'],\n targets=processed_batch['masked_targets'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings'],\n need_head_weights=need_head_weights\n )\n batch_loss, batch_reconstruction_loss, batch_target_prediction_loss_dict = self.model.protein_npt_loss(\n token_predictions_logits=output['logits_protein_sequence'], \n token_labels=processed_batch['token_labels'], \n target_predictions=output['target_predictions'], \n target_labels=processed_batch['target_labels'], \n MLM_reconstruction_loss_weight=reconstruction_loss_weight, \n label_smoothing=self.args.label_smoothing\n )\n if batch_loss.item() > 10.0:\n print(\"High eval loss detected: {}\".format(batch_loss.item()))\n else:\n output = self.model(\n tokens=processed_batch['input_tokens'],\n zero_shot_fitness_predictions=zero_shot_fitness_predictions,\n sequence_embeddings=processed_batch['sequence_embeddings']\n )\n batch_loss, batch_target_prediction_loss_dict = self.model.prediction_loss(\n target_predictions=output[\"target_predictions\"], \n target_labels=processed_batch['target_labels'],\n label_smoothing=self.args.label_smoothing\n )\n \n eval_total_loss += batch_loss.item()\n for target_name in self.model.target_names:\n eval_target_prediction_loss_dict[target_name] += batch_target_prediction_loss_dict[target_name].item()\n if self.model.model_type==\"ProteinNPT\":\n eval_reconstruction_loss += batch_reconstruction_loss.item()\n eval_num_masked_tokens += processed_batch['masked_tokens'].eq(self.model.alphabet.mask_idx).sum().item()\n for target_name in self.model.target_names:\n eval_num_masked_targets[target_name] += processed_batch['masked_targets'][target_name][:,-1].eq(1.0).sum().item()\n else:\n num_predicted_targets += len(batch['mutant_mutated_seq_pairs'])\n if output_all_predictions:\n num_of_mutated_seqs_to_score = processed_batch['num_of_mutated_seqs_to_score'] if self.model.model_type==\"ProteinNPT\" else len(processed_batch['mutant_mutated_seq_pairs'])\n for target_name in self.model.target_names:\n output_scores['predictions_'+target_name] += list(output[\"target_predictions\"][target_name][:num_of_mutated_seqs_to_score].cpu().numpy())\n output_scores['labels_'+target_name] += list(processed_batch['target_labels'][target_name][:num_of_mutated_seqs_to_score].cpu().numpy())\n if need_head_weights:\n col_attentions.append(output[\"col_attentions\"])\n row_attentions.append(output[\"row_attentions\"])\n\n output_scores = pd.DataFrame.from_dict(output_scores)\n output_scores_numeric_cols = [col_name for col_name in output_scores.columns if col_name not in ['mutant','mutated_sequence']]\n output_scores = output_scores.groupby(['mutant'])[output_scores_numeric_cols].mean().reset_index() \n mutated_seqs_dict = {}\n mutant_mutated_seqs = list(zip(*test_data['mutant_mutated_seq_pairs']))\n mutated_seqs_dict['mutant'] = mutant_mutated_seqs[0]\n mutated_seqs_dict['mutated_sequence'] = mutant_mutated_seqs[1]\n mutated_seqs_df = pd.DataFrame.from_dict(mutated_seqs_dict)\n output_scores = pd.merge(output_scores, mutated_seqs_df, on='mutant', how='left')\n \n\n eval_results = {\n 'eval_total_loss':eval_total_loss,\n 'eval_target_prediction_loss_dict':eval_target_prediction_loss_dict,\n 'output_scores': output_scores\n }\n if need_head_weights:\n print(\"dimension of first attention column {}\".format(col_attentions[0].shape))\n eval_results['col_attentions'] = torch.stack(col_attentions, dim=0).cpu().numpy()\n eval_results['row_attentions'] = torch.stack(row_attentions, dim=0).cpu().numpy()\n \n if self.model.model_type==\"ProteinNPT\":\n eval_results['eval_reconstruction_loss']=eval_reconstruction_loss\n eval_results['eval_num_masked_tokens']=eval_num_masked_tokens\n eval_results['eval_num_masked_targets']=eval_num_masked_targets\n else:\n eval_results['eval_num_predicted_targets']=num_predicted_targets\n return eval_results" } ]
import os,gc import json import argparse import random import numpy as np import pandas as pd import wandb import torch import proteinnpt,baselines,utils from collections import defaultdict from proteinnpt.model import ProteinNPTModel from baselines.model import AugmentedPropertyPredictor from utils.esm.data import Alphabet from utils.tranception.model_pytorch import get_tranception_tokenizer from utils.data_utils import get_train_val_test_data, standardize, pnpt_count_non_nan, pnpt_spearmanr from utils.msa_utils import process_MSA from utils.model_utils import Trainer
18,339
def setup_config_and_paths(args): # All parameters that are not defined by end user are fetched from the config file if args.model_config_location is not None: args.main_config=json.load(open(args.model_config_location)) for key in args.main_config: if args.__dict__[key] is None: args.__dict__[key] = args.main_config[key] # File paths config for local_path in ['embedding_model_location','MSA_data_folder','MSA_weight_data_folder','path_to_hhfilter']: if getattr(args, local_path): setattr(args, local_path, args.data_location + os.sep + getattr(args, local_path)) if not os.path.exists(args.data_location + os.sep + 'model_predictions'): os.mkdir(args.data_location + os.sep + 'model_predictions') if not os.path.exists(args.data_location + os.sep + 'checkpoint'): os.mkdir(args.data_location + os.sep + 'checkpoint') args.output_scores_location = args.data_location + os.sep + 'model_predictions' + os.sep + args.model_name_suffix if not os.path.exists(args.output_scores_location): os.mkdir(args.output_scores_location) args.model_location = args.data_location + os.sep + 'checkpoint' + os.sep + args.model_name_suffix if not os.path.exists(args.model_location): os.mkdir(args.model_location) # Target config args.target_config=json.load(open(args.target_config_location)) zero_shot_predictions_mapping={ "MSA_Transformer_pred": "MSA_Transformer_ensemble", "ESM1v_pred": "ESM1v_ensemble", "TranceptEVE_pred": "TranceptEVE_L", "Tranception_pred": "Tranception_L", "DeepSequence_pred": "DeepSequence_ensemble" } if args.model_type=="ProteinNPT": zero_shot_predictions_mapping["ProteinNPT"]=zero_shot_predictions_mapping[args.aa_embeddings+"_pred"] if args.augmentation=="zero_shot_fitness_predictions_auxiliary_labels": # Add auxiliary label to target_config assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as auxiliary labels not properly referenced" print("Using zero-shot fitness predictions as auxiliary labels") args.target_config["zero_shot_fitness_predictions"] = { "type": "continuous", "dim": 1, "var_name": zero_shot_predictions_mapping[args.model_type], #Select the relevant model for zero-shot fitness predictions "location": args.zero_shot_fitness_predictions_location, "in_NPT_loss": False, "main_target": False } args.augmentation_short="auxiliary" elif args.augmentation=="zero_shot_fitness_predictions_covariate": # Will use zero-shot fitness predictions as an additional model covariate assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as model covariate not properly referenced" print("Using zero-shot fitness predictions as covariate") args.augmentation_short="covariate" args.zero_shot_fitness_predictions_var_name = zero_shot_predictions_mapping[args.model_type] else: args.augmentation_short="none" for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization spearmans = {target_name: pnpt_spearmanr(test_eval_results['output_scores']['predictions_'+target_name], test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names}
def setup_config_and_paths(args): # All parameters that are not defined by end user are fetched from the config file if args.model_config_location is not None: args.main_config=json.load(open(args.model_config_location)) for key in args.main_config: if args.__dict__[key] is None: args.__dict__[key] = args.main_config[key] # File paths config for local_path in ['embedding_model_location','MSA_data_folder','MSA_weight_data_folder','path_to_hhfilter']: if getattr(args, local_path): setattr(args, local_path, args.data_location + os.sep + getattr(args, local_path)) if not os.path.exists(args.data_location + os.sep + 'model_predictions'): os.mkdir(args.data_location + os.sep + 'model_predictions') if not os.path.exists(args.data_location + os.sep + 'checkpoint'): os.mkdir(args.data_location + os.sep + 'checkpoint') args.output_scores_location = args.data_location + os.sep + 'model_predictions' + os.sep + args.model_name_suffix if not os.path.exists(args.output_scores_location): os.mkdir(args.output_scores_location) args.model_location = args.data_location + os.sep + 'checkpoint' + os.sep + args.model_name_suffix if not os.path.exists(args.model_location): os.mkdir(args.model_location) # Target config args.target_config=json.load(open(args.target_config_location)) zero_shot_predictions_mapping={ "MSA_Transformer_pred": "MSA_Transformer_ensemble", "ESM1v_pred": "ESM1v_ensemble", "TranceptEVE_pred": "TranceptEVE_L", "Tranception_pred": "Tranception_L", "DeepSequence_pred": "DeepSequence_ensemble" } if args.model_type=="ProteinNPT": zero_shot_predictions_mapping["ProteinNPT"]=zero_shot_predictions_mapping[args.aa_embeddings+"_pred"] if args.augmentation=="zero_shot_fitness_predictions_auxiliary_labels": # Add auxiliary label to target_config assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as auxiliary labels not properly referenced" print("Using zero-shot fitness predictions as auxiliary labels") args.target_config["zero_shot_fitness_predictions"] = { "type": "continuous", "dim": 1, "var_name": zero_shot_predictions_mapping[args.model_type], #Select the relevant model for zero-shot fitness predictions "location": args.zero_shot_fitness_predictions_location, "in_NPT_loss": False, "main_target": False } args.augmentation_short="auxiliary" elif args.augmentation=="zero_shot_fitness_predictions_covariate": # Will use zero-shot fitness predictions as an additional model covariate assert args.zero_shot_fitness_predictions_location is not None, "Location of zero-shot fitness predictions to use as model covariate not properly referenced" print("Using zero-shot fitness predictions as covariate") args.augmentation_short="covariate" args.zero_shot_fitness_predictions_var_name = zero_shot_predictions_mapping[args.model_type] else: args.augmentation_short="none" for target_index,target in enumerate(args.target_config): if "location" not in args.target_config[target].keys(): # Note: the case of zero-shot fitness predictions is already handled above if present if args.assay_location is not None: # We passed at least one path for the assay location num_targets = [x for x in args.target_config.keys() if args.target_config[x]["in_NPT_loss"]] if len(args.assay_location) > 1: assert len(args.assay_location)==num_targets, "Trying to predict {} targets, but only referencing {} distinct paths for them.".format(num_targets,len(args.assay_location)) args.target_config[target]["location"] = args.assay_location[target_index] print("Location used for target {} if {}".format(target,args.assay_location[target_index])) else: args.target_config[target]["location"] = args.assay_location[0] print("Location used for target {} if {}".format(target,args.assay_location[0])) else: print("Assay location not provided. Defaulting to location for single substitutions fitness assays: {}".format(args.data_location + os.sep + 'data/fitness/substitutions_singles')) args.target_config[target]["location"] = args.data_location + os.sep + 'data/fitness/substitutions_singles' return args def log_performance_fold(args,target_names,test_eval_results,trainer_final_status,perf_list,logs_folder=None): test_logs = {'total_training_steps': trainer_final_status['total_training_steps'], 'total_training_epochs': trainer_final_status['total_training_epochs'], 'total_train_time': trainer_final_status['total_train_time']} if logs_folder is None: dir_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) logs_folder = dir_path+os.sep+'output' if not os.path.exists(logs_folder): os.mkdir(logs_folder) if args.model_type=="ProteinNPT": normalization = 0 for target_name in target_names: normalization += test_eval_results['eval_num_masked_targets'][target_name] else: normalization = test_eval_results['eval_num_predicted_targets'] test_logs['Test total loss per seq.'] = test_eval_results['eval_total_loss'] / normalization spearmans = {target_name: pnpt_spearmanr(test_eval_results['output_scores']['predictions_'+target_name], test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names}
num_obs_spearmans = {target_name: pnpt_count_non_nan(test_eval_results['output_scores']['labels_'+target_name]) for target_name in target_names}
6
2023-10-28 11:41:05+00:00
24k
CVHub520/yolov5_obb
detect.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=None, dnn=False):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # CoreML: *.mlmodel\n # TensorFlow: *_saved_model\n # TensorFlow: *.pb\n # TensorFlow Lite: *.tflite\n # ONNX Runtime: *.onnx\n # OpenCV DNN: *.onnx with dnn=True\n # TensorRT: *.engine\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n suffix = Path(w).suffix.lower()\n suffixes = ['.pt', '.torchscript', '.onnx', '.engine', '.tflite', '.pb', '', '.mlmodel']\n check_suffix(w, suffixes) # check weights have acceptable suffix\n pt, jit, onnx, engine, tflite, pb, saved_model, coreml = (suffix == x for x in suffixes) # backend booleans\n stride, names = 64, [f'class{i}' for i in range(1000)] # assign defaults\n w = attempt_download(w) # download if not local\n\n if jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files)\n if extra_files['config.txt']:\n d = json.loads(extra_files['config.txt']) # extra_files dict\n stride, names = int(d['stride']), d['names']\n elif pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, map_location=device)\n stride = int(model.stride.max()) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements(('opencv-python>=4.5.4',))\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n cuda = torch.cuda.is_available()\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '8.0.0', verbose=True) # version requirement\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n bindings = OrderedDict()\n for index in range(model.num_bindings):\n name = model.get_binding_name(index)\n dtype = trt.nptype(model.get_binding_dtype(index))\n shape = tuple(model.get_binding_shape(index))\n data = torch.from_numpy(np.empty(shape, dtype=np.dtype(dtype))).to(device)\n bindings[name] = Binding(name, dtype, shape, data, int(data.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n context = model.create_execution_context()\n batch_size = bindings['images'].shape[0]\n else: # TensorFlow model (TFLite, pb, saved_model)\n if pb: # https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow *.pb inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), []) # wrapped\n return x.prune(tf.nest.map_structure(x.graph.as_graph_element, inputs),\n tf.nest.map_structure(x.graph.as_graph_element, outputs))\n\n graph_def = tf.Graph().as_graph_def()\n graph_def.ParseFromString(open(w, 'rb').read())\n frozen_func = wrap_frozen_graph(gd=graph_def, inputs=\"x:0\", outputs=\"Identity:0\")\n elif saved_model:\n LOGGER.info(f'Loading {w} for TensorFlow saved_model inference...')\n import tensorflow as tf\n model = tf.keras.models.load_model(w)\n elif tflite: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n if 'edgetpu' in w.lower():\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n import tflite_runtime.interpreter as tfli\n delegate = {'Linux': 'libedgetpu.so.1', # install https://coral.ai/software/#edgetpu-runtime\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = tfli.Interpreter(model_path=w, experimental_delegates=[tfli.load_delegate(delegate)])\n else:\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n import tensorflow as tf\n interpreter = tf.lite.Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False, val=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.pt or self.jit: # PyTorch\n y = self.model(im) if self.jit else self.model(im, augment=augment, visualize=visualize)\n return y if val else y[0]\n elif self.coreml: # CoreML\n im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.ANTIALIAS)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n elif self.onnx: # ONNX\n im = im.cpu().numpy() # torch to numpy\n if self.dnn: # ONNX OpenCV DNN\n self.net.setInput(im)\n y = self.net.forward()\n else: # ONNX Runtime\n y = self.session.run([self.session.get_outputs()[0].name], {self.session.get_inputs()[0].name: im})[0]\n elif self.engine: # TensorRT\n assert im.shape == self.bindings['images'].shape, (im.shape, self.bindings['images'].shape)\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = self.bindings['output'].data\n else: # TensorFlow model (TFLite, pb, saved_model)\n im = im.permute(0, 2, 3, 1).cpu().numpy() # torch BCHW to numpy BHWC shape(1,320,192,3)\n if self.pb:\n y = self.frozen_func(x=self.tf.constant(im)).numpy()\n elif self.saved_model:\n y = self.model(im, training=False).numpy()\n elif self.tflite:\n input, output = self.input_details[0], self.output_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n y = (y.astype(np.float32) - zero_point) * scale # re-scale\n y[..., 0] *= w # x\n y[..., 1] *= h # y\n y[..., 2] *= w # w\n y[..., 3] *= h # h\n y = torch.tensor(y) if isinstance(y, np.ndarray) else y\n return (y, []) if val else y\n\n def warmup(self, imgsz=(1, 3, 640, 640), half=False):\n # Warmup model by running inference once\n if self.pt or self.engine or self.onnx: # warmup types\n if isinstance(self.device, torch.device) and self.device.type != 'cpu': # only warmup GPU models\n im = torch.zeros(*imgsz).to(self.device).type(torch.half if half else torch.float) # input image\n self.forward(im) # warmup" }, { "identifier": "IMG_FORMATS", "path": "utils/datasets.py", "snippet": "IMG_FORMATS = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes" }, { "identifier": "VID_FORMATS", "path": "utils/datasets.py", "snippet": "VID_FORMATS = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv'] # acceptable video suffixes" }, { "identifier": "LoadImages", "path": "utils/datasets.py", "snippet": "class LoadImages:\n # YOLOv5 image/video dataloader, i.e. `python detect.py --source image.jpg/vid.mp4`\n def __init__(self, path, img_size=640, stride=32, auto=True):\n p = str(Path(path).resolve()) # os-agnostic absolute path\n if '*' in p:\n files = sorted(glob.glob(p, recursive=True)) # glob\n elif os.path.isdir(p):\n files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir\n elif os.path.isfile(p):\n files = [p] # files\n else:\n raise Exception(f'ERROR: {p} does not exist')\n\n images = [x for x in files if x.split('.')[-1].lower() in IMG_FORMATS]\n videos = [x for x in files if x.split('.')[-1].lower() in VID_FORMATS]\n ni, nv = len(images), len(videos)\n\n self.img_size = img_size\n self.stride = stride\n self.files = images + videos\n self.nf = ni + nv # number of files\n self.video_flag = [False] * ni + [True] * nv\n self.mode = 'image'\n self.auto = auto\n if any(videos):\n self.new_video(videos[0]) # new video\n else:\n self.cap = None\n assert self.nf > 0, f'No images or videos found in {p}. ' \\\n f'Supported formats are:\\nimages: {IMG_FORMATS}\\nvideos: {VID_FORMATS}'\n\n def __iter__(self):\n self.count = 0\n return self\n\n def __next__(self):\n if self.count == self.nf:\n raise StopIteration\n path = self.files[self.count]\n\n if self.video_flag[self.count]:\n # Read video\n self.mode = 'video'\n ret_val, img0 = self.cap.read()\n while not ret_val:\n self.count += 1\n self.cap.release()\n if self.count == self.nf: # last video\n raise StopIteration\n else:\n path = self.files[self.count]\n self.new_video(path)\n ret_val, img0 = self.cap.read()\n\n self.frame += 1\n s = f'video {self.count + 1}/{self.nf} ({self.frame}/{self.frames}) {path}: '\n\n else:\n # Read image\n self.count += 1\n img0 = cv2.imread(path) # BGR\n assert img0 is not None, f'Image Not Found {path}'\n s = f'image {self.count}/{self.nf} {path}: '\n\n # Padded resize\n img = letterbox(img0, self.img_size, stride=self.stride, auto=self.auto)[0]\n\n # Convert\n img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB\n img = np.ascontiguousarray(img)\n\n return path, img, img0, self.cap, s\n\n def new_video(self, path):\n self.frame = 0\n self.cap = cv2.VideoCapture(path)\n self.frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))\n\n def __len__(self):\n return self.nf # number of files" }, { "identifier": "LoadStreams", "path": "utils/datasets.py", "snippet": "class LoadStreams:\n # YOLOv5 streamloader, i.e. `python detect.py --source 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP streams`\n def __init__(self, sources='streams.txt', img_size=640, stride=32, auto=True):\n self.mode = 'stream'\n self.img_size = img_size\n self.stride = stride\n\n if os.path.isfile(sources):\n with open(sources) as f:\n sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]\n else:\n sources = [sources]\n\n n = len(sources)\n self.imgs, self.fps, self.frames, self.threads = [None] * n, [0] * n, [0] * n, [None] * n\n self.sources = [clean_str(x) for x in sources] # clean source names for later\n self.auto = auto\n for i, s in enumerate(sources): # index, source\n # Start thread to read frames from video stream\n st = f'{i + 1}/{n}: {s}... '\n if 'youtube.com/' in s or 'youtu.be/' in s: # if source is YouTube video\n check_requirements(('pafy', 'youtube_dl'))\n import pafy\n s = pafy.new(s).getbest(preftype=\"mp4\").url # YouTube URL\n s = eval(s) if s.isnumeric() else s # i.e. s = '0' local webcam\n cap = cv2.VideoCapture(s)\n assert cap.isOpened(), f'{st}Failed to open {s}'\n w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\n h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))\n self.fps[i] = max(cap.get(cv2.CAP_PROP_FPS) % 100, 0) or 30.0 # 30 FPS fallback\n self.frames[i] = max(int(cap.get(cv2.CAP_PROP_FRAME_COUNT)), 0) or float('inf') # infinite stream fallback\n\n _, self.imgs[i] = cap.read() # guarantee first frame\n self.threads[i] = Thread(target=self.update, args=([i, cap, s]), daemon=True)\n LOGGER.info(f\"{st} Success ({self.frames[i]} frames {w}x{h} at {self.fps[i]:.2f} FPS)\")\n self.threads[i].start()\n LOGGER.info('') # newline\n\n # check for common shapes\n s = np.stack([letterbox(x, self.img_size, stride=self.stride, auto=self.auto)[0].shape for x in self.imgs])\n self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal\n if not self.rect:\n LOGGER.warning('WARNING: Stream shapes differ. For optimal performance supply similarly-shaped streams.')\n\n def update(self, i, cap, stream):\n # Read stream `i` frames in daemon thread\n n, f, read = 0, self.frames[i], 1 # frame number, frame array, inference every 'read' frame\n while cap.isOpened() and n < f:\n n += 1\n # _, self.imgs[index] = cap.read()\n cap.grab()\n if n % read == 0:\n success, im = cap.retrieve()\n if success:\n self.imgs[i] = im\n else:\n LOGGER.warning('WARNING: Video stream unresponsive, please check your IP camera connection.')\n self.imgs[i] = np.zeros_like(self.imgs[i])\n cap.open(stream) # re-open stream if signal was lost\n time.sleep(1 / self.fps[i]) # wait time\n\n def __iter__(self):\n self.count = -1\n return self\n\n def __next__(self):\n self.count += 1\n if not all(x.is_alive() for x in self.threads) or cv2.waitKey(1) == ord('q'): # q to quit\n cv2.destroyAllWindows()\n raise StopIteration\n\n # Letterbox\n img0 = self.imgs.copy()\n img = [letterbox(x, self.img_size, stride=self.stride, auto=self.rect and self.auto)[0] for x in img0]\n\n # Stack\n img = np.stack(img, 0)\n\n # Convert\n img = img[..., ::-1].transpose((0, 3, 1, 2)) # BGR to RGB, BHWC to BCHW\n img = np.ascontiguousarray(img)\n\n return self.sources, img, img0, None, ''\n\n def __len__(self):\n return len(self.sources) # 1E12 frames = 32 streams at 30 FPS for 30 years" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = set_logging(__name__) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "check_file", "path": "utils/general.py", "snippet": "def check_file(file, suffix=''):\n # Search/download file (if necessary) and return path\n check_suffix(file, suffix) # optional\n file = str(file) # convert to str()\n if Path(file).is_file() or file == '': # exists\n return file\n elif file.startswith(('http:/', 'https:/')): # download\n url = str(Path(file)).replace(':/', '://') # Pathlib turns :// -> :/\n file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth\n if Path(file).is_file():\n print(f'Found {url} locally at {file}') # file already exists\n else:\n print(f'Downloading {url} to {file}...')\n torch.hub.download_url_to_file(url, file)\n assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check\n return file\n else: # search\n files = []\n for d in 'data', 'models', 'utils': # search directories\n files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file\n assert len(files), f'File not found: {file}' # assert file was found\n assert len(files) == 1, f\"Multiple files match '{file}', specify exact path: {files}\" # assert unique\n return files[0] # return file" }, { "identifier": "check_img_size", "path": "utils/general.py", "snippet": "def check_img_size(imgsz, s=32, floor=0):\n print(f\"#305 in utils/general.py - s={s}\")\n # Verify image size is a multiple of stride s in each dimension\n if isinstance(imgsz, int): # integer i.e. img_size=640\n new_size = max(make_divisible(imgsz, int(s)), floor)\n else: # list i.e. img_size=[640, 480]\n new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n if new_size != imgsz:\n print(f'WARNING: --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')\n return new_size" }, { "identifier": "check_imshow", "path": "utils/general.py", "snippet": "def check_imshow():\n # Check if environment supports image displays\n try:\n assert not is_docker(), 'cv2.imshow() is disabled in Docker environments'\n assert not is_colab(), 'cv2.imshow() is disabled in Google Colab environments'\n cv2.imshow('test', np.zeros((1, 1, 3)))\n cv2.waitKey(1)\n cv2.destroyAllWindows()\n cv2.waitKey(1)\n return True\n except Exception as e:\n print(f'WARNING: Environment does not support cv2.imshow() or PIL Image.show() image displays\\n{e}')\n return False" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "@try_except\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True):\n # Check installed dependencies meet requirements (pass *.txt file or list of packages)\n prefix = colorstr('red', 'bold', 'requirements:')\n check_python() # check python version\n if isinstance(requirements, (str, Path)): # requirements.txt file\n file = Path(requirements)\n assert file.exists(), f\"{prefix} {file.resolve()} not found, check failed.\"\n with file.open() as f:\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]\n else: # list or tuple of packages\n requirements = [x for x in requirements if x not in exclude]\n\n n = 0 # number of packages updates\n for r in requirements:\n try:\n pkg.require(r)\n except Exception as e: # DistributionNotFound or VersionConflict if requirements not met\n s = f\"{prefix} {r} not found and is required by YOLOv5\"\n if install:\n print(f\"{s}, attempting auto-update...\")\n try:\n assert check_online(), f\"'pip install {r}' skipped (offline)\"\n print(check_output(f\"pip install '{r}'\", shell=True).decode())\n n += 1\n except Exception as e:\n print(f'{prefix} {e}')\n else:\n print(f'{s}. Please install and rerun your command.')\n\n if n: # if packages updated\n source = file.resolve() if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n print(emojis(s))" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "increment_path", "path": "utils/general.py", "snippet": "def increment_path(path, exist_ok=False, sep='', mkdir=False):\n # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.\n path = Path(path) # os-agnostic\n if path.exists() and not exist_ok:\n path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')\n dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n matches = [re.search(rf\"%s{sep}(\\d+)\" % path.stem, d) for d in dirs]\n i = [int(m.groups()[0]) for m in matches if m] # indices\n n = max(i) + 1 if i else 2 # increment number\n path = Path(f\"{path}{sep}{n}{suffix}\") # increment path\n if mkdir:\n path.mkdir(parents=True, exist_ok=True) # make directory\n return path" }, { "identifier": "non_max_suppression", "path": "utils/general.py", "snippet": "def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=300):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n nc = prediction.shape[2] - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 10.0 # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box (center x, center y, width, height) to (x1, y1, x2, y2)\n box = xywh2xyxy(x[:, :4])\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "non_max_suppression_obb", "path": "utils/general.py", "snippet": "def non_max_suppression_obb(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), max_det=1500):\n \"\"\"Runs Non-Maximum Suppression (NMS) on inference results_obb\n Args:\n prediction (tensor): (b, n_all_anchors, [cx cy l s obj num_cls theta_cls])\n agnostic (bool): True = NMS will be applied between elements of different categories\n labels : () or\n\n Returns:\n list of detections, len=batch_size, on (n,7) tensor per image [xylsθ, conf, cls] θ ∈ [-pi/2, pi/2)\n \"\"\"\n\n nc = prediction.shape[2] - 5 - 180 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n class_index = nc + 5\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n max_wh = 4096 # min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 30.0 # seconds to quit after\n # redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n\n t = time.time()\n output = [torch.zeros((0, 7), device=prediction.device)] * prediction.shape[0]\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence, (tensor): (n_conf_thres, [cx cy l s obj num_cls theta_cls])\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n l = labels[xi]\n v = torch.zeros((len(l), nc + 5), device=x.device)\n v[:, :4] = l[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:class_index] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n thete_index, theta_pred = torch.max(x[:, class_index:], 1, keepdim=True) # [n_conf_thres, 1] θ ∈ int[0, 179]\n theta_pred = (theta_pred - 90) / 180 * pi # [n_conf_thres, 1] θ ∈ [-pi/2, pi/2)\n\n # Detections matrix nx7 (xyls, θ, conf, cls) θ ∈ [-pi/2, pi/2)\n if multi_label:\n i, j = (x[:, 5:class_index] > conf_thres).nonzero(as_tuple=False).T # ()\n x = torch.cat((x[i, :4], theta_pred[i], x[i, j + 5, None], j[:, None].float()), 1)\n else: # best class only\n conf, j = x[:, 5:class_index].max(1, keepdim=True)\n x = torch.cat((x[:, :4], theta_pred, conf, j.float()), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 6:7] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 5].argsort(descending=True)[:max_nms]] # sort by confidence\n\n # Batched NMS\n c = x[:, 6:7] * (0 if agnostic else max_wh) # classes\n rboxes = x[:, :5].clone() \n rboxes[:, :2] = rboxes[:, :2] + c # rboxes (offset by class)\n scores = x[:, 5] # scores\n _, i = obb_nms(rboxes, scores, iou_thres)\n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n\n output[xi] = x[i]\n if (time.time() - t) > time_limit:\n print(f'WARNING: NMS time limit {time_limit}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(name, opt):\n # Print argparser arguments\n LOGGER.info(colorstr(f'{name}: ') + ', '.join(f'{k}={v}' for k, v in vars(opt).items()))" }, { "identifier": "scale_coords", "path": "utils/general.py", "snippet": "def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\n # Rescale coords (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n coords[:, [0, 2]] -= pad[0] # x padding\n coords[:, [1, 3]] -= pad[1] # y padding\n coords[:, :4] /= gain\n clip_coords(coords, img0_shape)\n return coords" }, { "identifier": "scale_polys", "path": "utils/general.py", "snippet": "def scale_polys(img1_shape, polys, img0_shape, ratio_pad=None):\n # ratio_pad: [(h_raw, w_raw), (hw_ratios, wh_paddings)]\n # Rescale coords (xyxyxyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = resized / raw\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0] # h_ratios\n pad = ratio_pad[1] # wh_paddings\n\n polys[:, [0, 2, 4, 6]] -= pad[0] # x padding\n polys[:, [1, 3, 5, 7]] -= pad[1] # y padding\n polys[:, :8] /= gain # Rescale poly shape to img0_shape\n #clip_polys(polys, img0_shape)\n return polys" }, { "identifier": "strip_optimizer", "path": "utils/general.py", "snippet": "def strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\n # Strip optimizer from 'f' to finalize training, optionally save as 's'\n x = torch.load(f, map_location=torch.device('cpu'))\n if x.get('ema'):\n x['model'] = x['ema'] # replace model with ema\n for k in 'optimizer', 'best_fitness', 'wandb_id', 'ema', 'updates': # keys\n x[k] = None\n x['epoch'] = -1\n x['model'].half() # to FP16\n for p in x['model'].parameters():\n p.requires_grad = False\n torch.save(x, s or f)\n mb = os.path.getsize(s or f) / 1E6 # filesize\n print(f\"Optimizer stripped from {f},{(' saved as %s,' % s) if s else ''} {mb:.1f}MB\")" }, { "identifier": "xyxy2xywh", "path": "utils/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "Annotator", "path": "utils/plots.py", "snippet": "CONFIG_DIR = user_config_dir() # Ultralytics settings dir\nRANK = int(os.getenv('RANK', -1))\nclass Colors:\nclass Annotator:\n def __init__(self):\n def __call__(self, i, bgr=False):\n def hex2rgb(h): # rgb order (PIL)\ndef check_font(font='Arial.ttf', size=10):\n def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):\n def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def poly_label(self, poly, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):\n def rectangle(self, xy, fill=None, outline=None, width=1):\n def text(self, xy, text, txt_color=(255, 255, 255)):\n def result(self):\ndef feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')):\ndef hist2d(x, y, n=100):\ndef butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5):\n def butter_lowpass(cutoff, fs, order):\ndef output_to_target(output): #list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2)\ndef plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=2048, max_subplots=4):\ndef plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''):\ndef plot_val_txt(): # from utils.plots import *; plot_val()\ndef plot_targets_txt(): # from utils.plots import *; plot_targets_txt()\ndef plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\ndef plot_labels(labels, names=(), save_dir=Path(''), img_size=1024):\ndef plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve()\ndef plot_results(file='path/to/results.csv', dir=''):\ndef profile_idetection(start=0, stop=0, labels=(), save_dir=''):\ndef save_one_box(xyxy, im, file='image.jpg', gain=1.02, pad=10, square=False, BGR=False, save=True):" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2:.0f}MiB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "time_sync", "path": "utils/torch_utils.py", "snippet": "def time_sync():\n # pytorch-accurate time\n if torch.cuda.is_available():\n torch.cuda.synchronize()\n return time.time()" }, { "identifier": "poly2rbox", "path": "utils/rboxs_utils.py", "snippet": "def poly2rbox(polys, num_cls_thata=180, radius=6.0, use_pi=False, use_gaussian=False):\n \"\"\"\n Trans poly format to rbox format.\n Args:\n polys (array): (num_gts, [x1 y1 x2 y2 x3 y3 x4 y4]) \n num_cls_thata (int): [1], theta class num\n radius (float32): [1], window radius for Circular Smooth Label\n use_pi (bool): True θ∈[-pi/2, pi/2) , False θ∈[0, 180)\n\n Returns:\n use_gaussian True:\n rboxes (array): \n csl_labels (array): (num_gts, num_cls_thata)\n elif \n rboxes (array): (num_gts, [cx cy l s θ]) \n \"\"\"\n assert polys.shape[-1] == 8\n if use_gaussian:\n csl_labels = []\n rboxes = []\n for poly in polys:\n poly = np.float32(poly.reshape(4, 2))\n (x, y), (w, h), angle = cv2.minAreaRect(poly) # θ ∈ [0, 90]\n angle = -angle # θ ∈ [-90, 0]\n theta = angle / 180 * pi # 转为pi制\n\n # trans opencv format to longedge format θ ∈ [-pi/2, pi/2]\n if w != max(w, h): \n w, h = h, w\n theta += pi/2\n theta = regular_theta(theta) # limit theta ∈ [-pi/2, pi/2)\n angle = (theta * 180 / pi) + 90 # θ ∈ [0, 180)\n\n if not use_pi: # 采用angle弧度制 θ ∈ [0, 180)\n rboxes.append([x, y, w, h, angle])\n else: # 采用pi制\n rboxes.append([x, y, w, h, theta])\n if use_gaussian:\n csl_label = gaussian_label_cpu(label=angle, num_class=num_cls_thata, u=0, sig=radius)\n csl_labels.append(csl_label)\n if use_gaussian:\n return np.array(rboxes), np.array(csl_labels)\n return np.array(rboxes)" }, { "identifier": "rbox2poly", "path": "utils/rboxs_utils.py", "snippet": "def rbox2poly(obboxes):\n \"\"\"\n Trans rbox format to poly format.\n Args:\n rboxes (array/tensor): (num_gts, [cx cy l s θ]) θ∈[-pi/2, pi/2)\n\n Returns:\n polys (array/tensor): (num_gts, [x1 y1 x2 y2 x3 y3 x4 y4]) \n \"\"\"\n if isinstance(obboxes, torch.Tensor):\n center, w, h, theta = obboxes[:, :2], obboxes[:, 2:3], obboxes[:, 3:4], obboxes[:, 4:5]\n Cos, Sin = torch.cos(theta), torch.sin(theta)\n\n vector1 = torch.cat(\n (w/2 * Cos, -w/2 * Sin), dim=-1)\n vector2 = torch.cat(\n (-h/2 * Sin, -h/2 * Cos), dim=-1)\n point1 = center + vector1 + vector2\n point2 = center + vector1 - vector2\n point3 = center - vector1 - vector2\n point4 = center - vector1 + vector2\n order = obboxes.shape[:-1]\n return torch.cat(\n (point1, point2, point3, point4), dim=-1).reshape(*order, 8)\n else:\n center, w, h, theta = np.split(obboxes, (2, 3, 4), axis=-1)\n Cos, Sin = np.cos(theta), np.sin(theta)\n\n vector1 = np.concatenate(\n [w/2 * Cos, -w/2 * Sin], axis=-1)\n vector2 = np.concatenate(\n [-h/2 * Sin, -h/2 * Cos], axis=-1)\n\n point1 = center + vector1 + vector2\n point2 = center + vector1 - vector2\n point3 = center - vector1 - vector2\n point4 = center - vector1 + vector2\n order = obboxes.shape[:-1]\n return np.concatenate(\n [point1, point2, point3, point4], axis=-1).reshape(*order, 8)" } ]
import argparse import os import sys import cv2 import torch import torch.backends.cudnn as cudnn from pathlib import Path from models.common import DetectMultiBackend from utils.datasets import IMG_FORMATS, VID_FORMATS, LoadImages, LoadStreams from utils.general import (LOGGER, check_file, check_img_size, check_imshow, check_requirements, colorstr, increment_path, non_max_suppression, non_max_suppression_obb, print_args, scale_coords, scale_polys, strip_optimizer, xyxy2xywh) from utils.plots import Annotator, colors, save_one_box from utils.torch_utils import select_device, time_sync from utils.rboxs_utils import poly2rbox, rbox2poly
14,927
# Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS # pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update:
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Run inference on images, videos, directories, streams, etc. Usage: $ python path/to/detect.py --weights yolov5s.pt --source 0 # webcam img.jpg # image vid.mp4 # video path/ # directory path/*.jpg # glob 'https://youtu.be/Zgi9g1ksQHc' # YouTube 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream """ FILE = Path(__file__).resolve() ROOT = FILE.parents[0] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative @torch.no_grad() def run(weights=ROOT / 'yolov5s.pt', # model.pt path(s) source=ROOT / 'data/images', # file/dir/URL/glob, 0 for webcam imgsz=(640, 640), # inference size (height, width) conf_thres=0.25, # confidence threshold iou_thres=0.45, # NMS IOU threshold max_det=1000, # maximum detections per image device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu view_img=False, # show results save_txt=False, # save results to *.txt save_conf=False, # save confidences in --save-txt labels save_crop=False, # save cropped prediction boxes nosave=False, # do not save images/videos classes=None, # filter by class: --class 0, or --class 0 2 3 agnostic_nms=False, # class-agnostic NMS augment=False, # augmented inference visualize=False, # visualize features update=False, # update all models project=ROOT / 'runs/detect', # save results to project/name name='exp', # save results to project/name exist_ok=False, # existing project/name ok, do not increment line_thickness=3, # bounding box thickness (pixels) hide_labels=False, # hide labels hide_conf=False, # hide confidences half=False, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference ): source = str(source) save_img = not nosave and not source.endswith('.txt') # save inference images is_file = Path(source).suffix[1:] in (IMG_FORMATS + VID_FORMATS) is_url = source.lower().startswith(('rtsp://', 'rtmp://', 'http://', 'https://')) webcam = source.isnumeric() or source.endswith('.txt') or (is_url and not is_file) if is_url and is_file: source = check_file(source) # download # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model device = select_device(device) model = DetectMultiBackend(weights, device=device, dnn=dnn) stride, names, pt, jit, onnx, engine = model.stride, model.names, model.pt, model.jit, model.onnx, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size # Half half &= (pt or jit or engine) and device.type != 'cpu' # half precision only supported by PyTorch on CUDA if pt or jit: model.model.half() if half else model.model.float() # Dataloader if webcam: view_img = check_imshow() cudnn.benchmark = True # set True to speed up constant image size inference dataset = LoadStreams(source, img_size=imgsz, stride=stride, auto=pt) bs = len(dataset) # batch_size else: dataset = LoadImages(source, img_size=imgsz, stride=stride, auto=pt) bs = 1 # batch_size vid_path, vid_writer = [None] * bs, [None] * bs # Run inference model.warmup(imgsz=(1, 3, *imgsz), half=half) # warmup dt, seen = [0.0, 0.0, 0.0], 0 for path, im, im0s, vid_cap, s in dataset: t1 = time_sync() im = torch.from_numpy(im).to(device) im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 if len(im.shape) == 3: im = im[None] # expand for batch dim t2 = time_sync() dt[0] += t2 - t1 # Inference visualize = increment_path(save_dir / Path(path).stem, mkdir=True) if visualize else False pred = model(im, augment=augment, visualize=visualize) t3 = time_sync() dt[1] += t3 - t2 # NMS # pred: list*(n, [xylsθ, conf, cls]) θ ∈ [-pi/2, pi/2) pred = non_max_suppression_obb(pred, conf_thres, iou_thres, classes, agnostic_nms, multi_label=True, max_det=max_det) dt[2] += time_sync() - t3 # Second-stage classifier (optional) # pred = utils.general.apply_classifier(pred, classifier_model, im, im0s) # Process predictions for i, det in enumerate(pred): # per image pred_poly = rbox2poly(det[:, :5]) # (n, [x1 y1 x2 y2 x3 y3 x4 y4]) seen += 1 if webcam: # batch_size >= 1 p, im0, frame = path[i], im0s[i].copy(), dataset.count s += f'{i}: ' else: p, im0, frame = path, im0s.copy(), getattr(dataset, 'frame', 0) p = Path(p) # to Path save_path = str(save_dir / p.name) # im.jpg txt_path = str(save_dir / 'labels' / p.stem) + ('' if dataset.mode == 'image' else f'_{frame}') # im.txt s += '%gx%g ' % im.shape[2:] # print string gn = torch.tensor(im0.shape)[[1, 0, 1, 0]] # normalization gain whwh imc = im0.copy() if save_crop else im0 # for save_crop annotator = Annotator(im0, line_width=line_thickness, example=str(names)) if len(det): # Rescale polys from img_size to im0 size # det[:, :4] = scale_coords(im.shape[2:], det[:, :4], im0.shape).round() pred_poly = scale_polys(im.shape[2:], pred_poly, im0.shape) det = torch.cat((pred_poly, det[:, -2:]), dim=1) # (n, [poly conf cls]) # Print results for c in det[:, -1].unique(): n = (det[:, -1] == c).sum() # detections per class s += f"{n} {names[int(c)]}{'s' * (n > 1)}, " # add to string # Write results for *poly, conf, cls in reversed(det): if save_txt: # Write to file # xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh poly = poly.tolist() line = (cls, *poly, conf) if save_conf else (cls, *poly) # label format with open(txt_path + '.txt', 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') if save_img or save_crop or view_img: # Add poly to image c = int(cls) # integer class label = None if hide_labels else (names[c] if hide_conf else f'{names[c]} {conf:.2f}') # annotator.box_label(xyxy, label, color=colors(c, True)) annotator.poly_label(poly, label, color=colors(c, True)) if save_crop: # Yolov5-obb doesn't support it yet # save_one_box(xyxy, imc, file=save_dir / 'crops' / names[c] / f'{p.stem}.jpg', BGR=True) pass # Print time (inference-only) LOGGER.info(f'{s}Done. ({t3 - t2:.3f}s)') # Stream results im0 = annotator.result() if view_img: cv2.imshow(str(p), im0) cv2.waitKey(1) # 1 millisecond # Save results (image with detections) if save_img: if dataset.mode == 'image': cv2.imwrite(save_path, im0) else: # 'video' or 'stream' if vid_path[i] != save_path: # new video vid_path[i] = save_path if isinstance(vid_writer[i], cv2.VideoWriter): vid_writer[i].release() # release previous video writer if vid_cap: # video fps = vid_cap.get(cv2.CAP_PROP_FPS) w = int(vid_cap.get(cv2.CAP_PROP_FRAME_WIDTH)) h = int(vid_cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) else: # stream fps, w, h = 30, im0.shape[1], im0.shape[0] save_path += '.mp4' vid_writer[i] = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'mp4v'), fps, (w, h)) vid_writer[i].write(im0) # Print results t = tuple(x / seen * 1E3 for x in dt) # speeds per image LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {(1, 3, *imgsz)}' % t) if save_txt or save_img: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") if update:
strip_optimizer(weights) # update model (to fix SourceChangeWarning)
17
2023-10-31 06:06:41+00:00
24k
serengil/LightPHE
lightphe/models/Ciphertext.py
[ { "identifier": "Homomorphic", "path": "lightphe/models/Homomorphic.py", "snippet": "class Homomorphic(ABC):\n keys: dict\n plaintext_modulo: int\n ciphertext_modulo: int\n\n @abstractmethod\n def generate_keys(self, key_size: int, s: Optional[int] = None) -> dict:\n pass\n\n @abstractmethod\n def generate_random_key(self) -> int:\n pass\n\n @abstractmethod\n def encrypt(\n self, plaintext: int, random_key: Union[Optional[int], Optional[list]] = None\n ) -> Union[int, tuple, list]:\n pass\n\n @abstractmethod\n def decrypt(self, ciphertext: Union[int, tuple, list]) -> int:\n pass\n\n @abstractmethod\n def add(\n self, ciphertext1: Union[int, tuple, list], ciphertext2: Union[int, tuple, list]\n ) -> Union[int, tuple, list]:\n pass\n\n @abstractmethod\n def multiply(\n self, ciphertext1: Union[int, tuple, list], ciphertext2: Union[int, tuple, list]\n ) -> Union[int, tuple]:\n pass\n\n @abstractmethod\n def xor(self, ciphertext1: list, ciphertext2: list) -> list:\n pass\n\n @abstractmethod\n def multiply_by_contant(self, ciphertext: Union[int, tuple, list], constant: int) -> int:\n pass\n\n @abstractmethod\n def reencrypt(self, ciphertext: Union[int, tuple, list]) -> Union[int, tuple, list]:\n pass" }, { "identifier": "Algorithm", "path": "lightphe/models/Algorithm.py", "snippet": "class Algorithm:\n RSA = \"RSA\"\n ElGamal = \"ElGamal\"\n ExponentialElGamal = \"Exponential-ElGamal\"\n EllipticCurveElGamal = \"EllipticCurve-ElGamal\"\n Paillier = \"Paillier\"\n DamgardJurik = \"Damgard-Jurik\"\n OkamotoUchiyama = \"Okamoto-Uchiyama\"\n Benaloh = \"Benaloh\"\n NaccacheStern = \"Naccache-Stern\"\n GoldwasserMicali = \"Goldwasser-Micali\"" }, { "identifier": "RSA", "path": "lightphe/cryptosystems/RSA.py", "snippet": "class RSA(Homomorphic):\n \"\"\"\n RSA algorithm is partially homomorphic with respect to the multiplication\n Ref: https://sefiks.com/2023/03/06/a-step-by-step-partially-homomorphic-encryption-example-with-rsa-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size: int = 1024, encrypt_with_public=True):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n encrypt_with_public (boolean): RSA has two keys: private (d) and public (e).\n If you encrypt a message with smo's public, then just that person can decrypt it\n with his private (secure message). Otherwise, if you encrypt it with your private,\n one can decrypt it with your public (digital signatures).\n Set this arg to True if you want to do encryption with public key e,\n and do decryption with private key d.\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"n\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n self.encrypt_with_public = encrypt_with_public\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of RSA cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n while True:\n try:\n # picking a prime modulus p and q\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n\n # select public exponent e\n while True:\n e = random.randint(1, phi - 1)\n if math.gcd(e, n) == 1:\n break\n\n d = pow(e, -1, phi)\n break\n except:\n pass\n\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"e\"] = e\n keys[\"private_key\"][\"d\"] = d\n return keys\n\n def generate_random_key(self) -> int:\n pass\n\n def encrypt(self, plaintext: int) -> int:\n \"\"\"\n Encrypt plain messages with RSA\n Args:\n plaintext (int): plain message\n Returns:\n ciphertext (int): ciphertext encrypted with RSA\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n\n if plaintext > n:\n plaintext = plaintext % n\n logger.debug(\n f\"RSA can encrypt messages [1, {n}]. \"\n f\"Seems you exceeded this limit. New plaintext is {plaintext}\"\n )\n\n if self.encrypt_with_public is True:\n e = self.keys[\"public_key\"][\"e\"]\n c = pow(plaintext, e, n)\n else:\n d = self.keys[\"private_key\"][\"d\"]\n c = pow(plaintext, d, n)\n\n return c\n\n def decrypt(self, ciphertext: int) -> int:\n \"\"\"\n Decrypt ciphertexts with RSA\n Args:\n ciphertext (int): encrypted message\n decrypt_with_private (int): RSA has two keys: private (d) and public (e).\n If you encrypt a message with smo's public, then just that person can decrypt it\n with his private (secure message). Otherwise, if you encrypt it with your private,\n one can decrypt it with your public (digital signatures).\n Set this arg to True if you want to do encryption with public key e,\n and do decryption with private key d.\n Returns:\n plaintext (int): restored message\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n if self.encrypt_with_public is True:\n d = self.keys[\"private_key\"][\"d\"]\n p = pow(ciphertext, d, n)\n else:\n e = self.keys[\"public_key\"][\"e\"]\n p = pow(ciphertext, e, n)\n\n return p\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic multiplication on encrypted data.\n Result of this must be equal to E(m1 * m2)\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % n\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"RSA is not homomorphic with respect to the addition\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"RSA is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n raise ValueError(\"RSA is not supporting multiplying ciphertext by a known constant\")\n\n def reencrypt(self, ciphertext: int) -> int:\n raise ValueError(\"RSA does not support re-encryption\")" }, { "identifier": "ElGamal", "path": "lightphe/cryptosystems/ElGamal.py", "snippet": "class ElGamal(Homomorphic):\n \"\"\"\n ElGamal algorithm is either multiplicatively or additively homomorphic\n Ref: https://sefiks.com/2023/03/27/a-step-by-step-partially-homomorphic-encryption-example-with-elgamal-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, exponential=False, key_size: int = 1024):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n exponential (boolean): set this to True to make cryptosystem exponential ElGamal.\n Regular ElGamal is homomorphic with respect to the multiplication whereas\n exponential ElGamal is homomorphic with respect to the addition\n \"\"\"\n self.exponential = exponential\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"p\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"p\"]\n\n def generate_keys(self, key_size: int):\n \"\"\"\n Generate public and private keys of ElGamal cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(100, 2 ** int(key_size / 2) - 1)\n\n # picking a generator g\n g = random.randint(2, int(math.sqrt(p)))\n\n # picking a private key x\n x = random.randint(1, p - 2)\n\n # public key\n y = pow(g, x, p)\n\n keys[\"public_key\"] = {\n \"p\": p,\n \"g\": g,\n \"y\": y,\n }\n\n keys[\"private_key\"] = {\"x\": x}\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n ElGamal requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n p = self.keys[\"public_key\"][\"p\"]\n return random.randint(1, p - 1)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> tuple:\n \"\"\"\n Encrypt plaintext with ElGamal\n Args:\n plaintext (int): message to encrypt\n random_key (int): random key for encryption. Do not set this to a static value.\n Returns\n ciphertext (tuple): c1 and c2\n \"\"\"\n p = self.keys[\"public_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n y = self.keys[\"public_key\"][\"y\"]\n r = random_key or self.generate_random_key()\n\n if plaintext > p:\n plaintext = plaintext % p\n logger.debug(\n f\"ElGamal can encrypt messages [1, {p}]. \"\n f\"Seems you exceeded this limit. New plaintext is {plaintext}\"\n )\n\n c1 = pow(g, r, p)\n if self.exponential is False:\n c2 = (plaintext * pow(y, r, p)) % p\n else:\n c2 = (pow(g, plaintext, p) * pow(y, r, p)) % p\n\n return c1, c2\n\n def decrypt(self, ciphertext: tuple) -> int:\n \"\"\"\n Decrypt ciphertext with ElGamal\n Args:\n ciphertext (tuple): c1 and c2\n Returns:\n plaintext (int): restored message\n \"\"\"\n c1, c2 = ciphertext\n\n x = self.keys[\"private_key\"][\"x\"]\n p = self.keys[\"public_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n\n m_prime = (c2 * pow(c1, -1 * x, p)) % p\n\n if self.exponential is False:\n return m_prime\n\n if self.exponential is True:\n # m_prime = g^m . Find m for known m_prime and known g (DLP).\n m = 0\n while True:\n if pow(g, m, p) == m_prime:\n return m\n m += 1\n if m > p:\n raise ValueError(f\"Cannot restore the message in [0, {p}]\")\n\n return -1\n\n def multiply(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n \"\"\"\n Perform homomorphic multiplication on encrypted data\n Result of this must be equal to E(m1 * m2)\n Args:\n ciphertext1 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n ciphertext2 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n Returns\n ciphertext (dict): ElGamal ciphertext consisting of c1 and c2 keys\n \"\"\"\n if self.exponential is True:\n raise ValueError(\"Exponential ElGamal is not homomorphic with respect to the addition\")\n p = self.keys[\"public_key\"][\"p\"]\n return (ciphertext1[0] * ciphertext2[0]) % p, (ciphertext1[1] * ciphertext2[1]) % p\n\n def add(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n \"\"\"\n Perform homomorphic addition on encrypted data\n Result of this must be equal to E(m1 + m2)\n Args:\n ciphertext1 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n ciphertext2 (dict): ElGamal ciphertext consisting of c1 and c2 keys\n Returns\n ciphertext (dict): ElGamal ciphertext consisting of c1 and c2 keys\n \"\"\"\n if self.exponential is False:\n raise ValueError(\"Regular ElGamal is not homomorphic with respect to the addition\")\n p = self.keys[\"public_key\"][\"p\"]\n return (ciphertext1[0] * ciphertext2[0]) % p, (ciphertext1[1] * ciphertext2[1]) % p\n\n def xor(self, ciphertext1: tuple, ciphertext2: tuple) -> int:\n raise ValueError(\"ElGamal is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: tuple, constant: int) -> tuple:\n if self.exponential is False:\n raise ValueError(\"ElGamal is not supporting multiplying ciphertext by a known constant\")\n p = self.keys[\"public_key\"][\"p\"]\n if constant > p:\n constant = constant % p\n logger.debug(\n f\"ElGamal can encrypt messages [1, {p}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n\n return pow(ciphertext[0], constant, p), pow(ciphertext[1], constant, p)\n\n def reencrypt(self, ciphertext: tuple) -> tuple:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n if self.exponential is True:\n # then this is additively homomorphic\n neutral_element = 0\n else:\n # then this is multiplicatively homomorphic\n neutral_element = 1\n\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n\n if self.exponential is True:\n reencrypted_value = self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n else:\n reencrypted_value = self.multiply(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n return reencrypted_value" }, { "identifier": "Paillier", "path": "lightphe/cryptosystems/Paillier.py", "snippet": "class Paillier(Homomorphic):\n \"\"\"\n Paillier algorithm is homomorphic with respect to the addition.\n Also, it supports power operation for ciphertext base and plaintext exponent\n Ref: https://sefiks.com/2023/04/03/a-step-by-step-partially-homomorphic-encryption-example-with-paillier-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=1024):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n n = self.keys[\"public_key\"][\"n\"]\n self.plaintext_modulo = n\n self.ciphertext_modulo = n * n\n\n def generate_keys(self, key_size: int):\n \"\"\"\n Generate public and private keys of Paillier cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime modulus q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n g = 1 + n\n\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"n\"] = n\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Paillier requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n r = random.randint(0, n)\n if math.gcd(r, n) == 1:\n break\n return r\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Paillier\n Args:\n plaintext (int): message to encrypt\n random_key (int): Paillier requires a random key that co-prime to n.\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n r = random_key or self.generate_random_key()\n assert math.gcd(r, n) == 1\n return (pow(g, plaintext, n * n) * pow(r, n, n * n)) % (n * n)\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Paillier\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n phi = self.keys[\"private_key\"][\"phi\"]\n n = self.keys[\"public_key\"][\"n\"]\n mu = pow(phi, -1, n)\n\n return (self.lx(pow(ciphertext, phi, n * n)) * mu) % (n)\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n squared.\n Args:\n ciphertext1 (int): 1st ciphertext created with Paillier\n ciphertext2 (int): 2nd ciphertext created with Paillier\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Paillier\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % (n * n)\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Paillier is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Paillier is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * m2) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Paillier\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Paillier\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Paillier can encrypt messages [1, {n}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n\n return pow(ciphertext, constant, n * n)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n def lx(self, x: int) -> int:\n \"\"\"\n Find logarithm over cyclic group\n Args:\n x (int): some integer\n Returns:\n lx (int): (x-1) / n\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n y = (x - 1) // n\n assert y - int(y) == 0\n return int(y)" }, { "identifier": "DamgardJurik", "path": "lightphe/cryptosystems/DamgardJurik.py", "snippet": "class DamgardJurik(Homomorphic):\n \"\"\"\n Damgard-Jurik algorithm is a generalization of Paillier.\n It is homomorphic with respect to the addition.\n Ref: https://sefiks.com/2023/10/20/a-step-by-step-partially-homomorphic-encryption-example-with-damgard-jurik-in-python/\n \"\"\"\n\n def __init__(self, s: int = 2, keys: Optional[dict] = None, key_size: int = 1024):\n \"\"\"\n Args:\n s (int): cryptosystem's module is going to be n^(s+1). if s == 1 then this is Paillier\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size=key_size, s=s)\n n = self.keys[\"public_key\"][\"n\"]\n self.plaintext_modulo = n\n self.ciphertext_modulo = pow(n, s + 1)\n\n def generate_keys(self, key_size: int, s: Optional[int] = None):\n \"\"\"\n Generate public and private keys of Paillier cryptosystem\n Args:\n s (int): cryptosystem's module is going to be n^(s+1). if s == 1 then this is Paillier\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime modulus q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n g = 1 + n\n\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"s\"] = s\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Paillier requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n r = random.randint(0, n)\n if math.gcd(r, n) == 1:\n break\n return r\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Paillier\n Args:\n plaintext (int): message to encrypt\n random_key (int): Paillier requires a random key that co-prime to n.\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n s = self.keys[\"public_key\"][\"s\"]\n r = random_key or self.generate_random_key()\n modulo = pow(n, s + 1)\n\n # assert math.gcd(r, n) == 1\n c = (pow(g, plaintext, modulo) * pow(r, n, modulo)) % modulo\n # c = (pow(g, plaintext, modulo) * pow(r, pow(n, s), modulo)) % modulo\n if math.gcd(c, modulo) != 1:\n logger.info(f\"WARNING! gcd({c=}, {modulo=}) != 1\")\n return c\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Paillier\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n phi = self.keys[\"private_key\"][\"phi\"]\n n = self.keys[\"public_key\"][\"n\"]\n s = self.keys[\"public_key\"][\"s\"]\n mu = pow(phi, -1, n)\n modulo = pow(n, s + 1)\n return (self.lx(pow(ciphertext, phi, modulo)) * mu) % (n)\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n squared.\n Args:\n ciphertext1 (int): 1st ciphertext created with Paillier\n ciphertext2 (int): 2nd ciphertext created with Paillier\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Paillier\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n s = self.keys[\"public_key\"][\"s\"]\n modulo = pow(n, s + 1)\n return (ciphertext1 * ciphertext2) % modulo\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Damgard-Jurik is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Damgard-Jurik is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext by a known plain constant\n Result of this must be equal to E(m1 * m2), where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Damgard-Jurik\n constant (int): a known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Damgard-Jurik\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Damgard-Jurik can encrypt messages [1, {n}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n return pow(ciphertext, constant, self.ciphertext_modulo)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n def lx(self, x: int) -> int:\n \"\"\"\n Find logarithm over cyclic group\n Args:\n x (int): some integer\n Returns:\n lx (int): (x-1) / n\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n y = (x - 1) // n\n assert y - int(y) == 0\n return int(y)" }, { "identifier": "OkamotoUchiyama", "path": "lightphe/cryptosystems/OkamotoUchiyama.py", "snippet": "class OkamotoUchiyama(Homomorphic):\n \"\"\"\n Okamoto-Uchiyama algorithm is homomorphic with respect to the addition.\n Ref: https://sefiks.com/2023/10/20/a-step-by-step-partially-homomorphic-encryption-example-with-okamoto-uchiyama-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=1024):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"private_key\"][\"p\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of OkamotoUchiyama cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime modulus p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime modulus q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # modulo\n n = p * p * q\n\n # generator\n g = random.randint(2, n)\n\n if pow(g, p - 1, p * p) == 1:\n raise ValueError(\"Fermat's Little Theorem must be satisfied\")\n\n h = pow(g, n, n)\n\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"h\"] = h\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Okamoto-Uchiyama requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return random.randint(1, n - 1)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with OkamotoUchiyama\n Args:\n plaintext (int): message to encrypt\n random_key (int): OkamotoUchiyama requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n p = self.keys[\"private_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n h = self.keys[\"public_key\"][\"h\"]\n r = random_key or self.generate_random_key()\n\n if plaintext > p:\n plaintext = plaintext % p\n logger.debug(\n f\"plaintext must be in scale [0, {p=}] but this is exceeded.\"\n \"New plaintext is {plaintext}\"\n )\n return (pow(g, plaintext, n) * pow(h, r, n)) % n\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Okamoto-Uchiyama\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n p = self.keys[\"private_key\"][\"p\"]\n g = self.keys[\"public_key\"][\"g\"]\n\n a = self.lx(pow(ciphertext, p - 1, p * p))\n b = self.lx(pow(g, p - 1, p * p))\n return (a * pow(b, -1, p)) % p\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with OkamotoUchiyama\n ciphertext2 (int): 2nd ciphertext created with OkamotoUchiyama\n Returns:\n ciphertext3 (int): 3rd ciphertext created with OkamotoUchiyama\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % n\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Okamoto-Uchiyama is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Okamoto-Uchiyama is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * constant) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Okamoto-Uchiyama\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Okamoto-Uchiyama\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Okamoto-Uchiyama can encrypt messages [1, {n}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n return pow(ciphertext, constant, n)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)\n\n def lx(self, x: int) -> int:\n \"\"\"\n Find logarithm over cyclic group\n Args:\n x (int): some integer\n Returns:\n lx (int): (x-1) / p\n \"\"\"\n p = self.keys[\"private_key\"][\"p\"]\n if x % p != 1:\n raise ValueError(f\"Input passed to lx ({x}) must be identical to 1 in modulo {p}\")\n if math.gcd(x, p * p) != 1:\n raise ValueError(f\"gcd({x}, {p}^2) must be equal to 1\")\n y = (x - 1) // p\n assert y - int(y) == 0\n return int(y)" }, { "identifier": "Benaloh", "path": "lightphe/cryptosystems/Benaloh.py", "snippet": "class Benaloh(Homomorphic):\n def __init__(self, keys: Optional[dict] = None, key_size: int = 50):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits. default is less than other cryptosystems\n because decryption of Benaloh requires to solve DLP :/\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"r\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of Paillier cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n x = 1\n while x == 1:\n # picking a prime p\n p = sympy.randprime(200, 2**key_size)\n\n # picking a prime q\n q = sympy.randprime(100, p)\n\n n = p * q\n phi = (p - 1) * (q - 1)\n\n r = p - 1\n while gcd(q - 1, r) != 1:\n r = int(r / gcd(q - 1, r))\n\n if not (\n # r should divide p-1 without remainder\n (p - 1) % r == 0\n # r and (p - 1) / r must be coprimes\n and gcd(r, int((p - 1) / r)) == 1\n # r and q-1 must be coprimes\n and gcd(r, q - 1) == 1\n ):\n continue\n\n y = random.randint(2, n)\n if gcd(y, n) != 1:\n continue\n\n # to guarantee correct decryption\n prime_factors = sympy.factorint(r).keys()\n decryption_guaranteed = True\n for prime_factor in prime_factors:\n # none of r's prime factor should satisfy the condition\n if pow(y, int(phi / prime_factor), n) == 1:\n decryption_guaranteed = False\n\n if decryption_guaranteed is False:\n continue\n\n x = pow(y, int(phi / r), n)\n if x != 1:\n break\n\n keys[\"public_key\"][\"y\"] = y\n keys[\"public_key\"][\"r\"] = r\n keys[\"public_key\"][\"n\"] = n\n\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"private_key\"][\"x\"] = x\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Generate random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n u = random.randint(1, n)\n if gcd(u, n) == 1:\n break\n return u\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Benaloh\n Args:\n plaintext (int): message to encrypt\n random_key (int): Benaloh requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n y = self.keys[\"public_key\"][\"y\"]\n r = self.keys[\"public_key\"][\"r\"]\n n = self.keys[\"public_key\"][\"n\"]\n\n u = random_key or self.generate_random_key()\n\n if plaintext > r:\n plaintext = plaintext % r\n logger.debug(\n f\"Benaloh lets you to encrypt messages in [0, {r=}].\"\n f\"But your plaintext exceeds this limit.\"\n f\"New plaintext is {plaintext}\"\n )\n\n c = (pow(y, plaintext, n) * pow(u, r, n)) % n\n\n if gcd(c, n) != 1:\n logger.debug(\"ciphertext is not co-prime with n!\")\n\n return c\n\n def decrypt(self, ciphertext: int) -> int:\n \"\"\"\n Decrypt a given ciphertext with Benaloh\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n r = self.keys[\"public_key\"][\"r\"]\n phi = self.keys[\"private_key\"][\"phi\"]\n x = self.keys[\"private_key\"][\"x\"]\n\n a = pow(ciphertext, int(phi / r), n)\n\n md = 0\n while True:\n if pow(x, md, n) == a:\n break\n md = md + 1\n if md > r:\n raise ValueError(f\"Message cannot be restored in [{0}, {n}]\")\n return md\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with Benaloh\n ciphertext2 (int): 2nd ciphertext created with Benaloh\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Benaloh\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return (ciphertext1 * ciphertext2) % n\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Benaloh is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Benaloh is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * constant) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Benaloh\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Benaloh\n \"\"\"\n # raise ValueError(\"Benaloh is not supporting multiplying by a constant\")\n n = self.keys[\"public_key\"][\"n\"]\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Benaloh can encrypt messages [1, {self.plaintext_modulo}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n return pow(ciphertext, constant, n)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)" }, { "identifier": "NaccacheStern", "path": "lightphe/cryptosystems/NaccacheStern.py", "snippet": "class NaccacheStern(Homomorphic):\n \"\"\"\n Naccache-Stern algorithm is homomorphic with respect to the addition.\n It is a generaliation of Benaloh cryptosystem\n Ref: https://sefiks.com/2023/10/26/a-step-by-step-partially-homomorphic-encryption-example-with-naccache-stern-in-python/\n Original paper: https://dl.acm.org/doi/pdf/10.1145/288090.288106\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=37, deterministic: bool = False):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits. Less than many cryptosystems because\n decryption requires to solve DLP.\n deterministic (boolean): deterministic or probabilistic version of\n cryptosystem\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.keys[\"public_key\"][\"sigma\"]\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n self.deterministic = deterministic\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of Naccache-Stern cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # pick a family of small primes. the largest one is 10-bits\n # TODO: do something generic instead of constant primes\n prime_set = [3, 5, 7, 11, 13, 17]\n k = len(prime_set)\n\n if all(sympy.isprime(prime) is True for prime in prime_set) is False:\n raise ValueError(\"All items of prime set must be prime!\")\n\n # divide the set in half and find products of primes\n u = 1\n v = 1\n\n for i, prime in enumerate(prime_set):\n if i < len(prime_set) / 2:\n u = u * prime\n else:\n v = v * prime\n\n # product of all primes\n sigma = u * v\n\n # pick large prime numbers\n while True:\n a = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n b = sympy.randprime(100, a)\n\n # calculate two primes from chosen ones\n p = (2 * a * u) + 1\n q = (2 * b * v) + 1\n\n # recommended n is 768 bits\n n = p * q\n phi = (p - 1) * (q - 1)\n\n if phi % sigma != 0:\n logger.debug(\"canceled because phi cannot be divisible by sigma\")\n continue\n\n if math.gcd(sigma, int(phi // sigma)) != 1:\n logger.debug(\"canceled because sigma and phi/sigma are not coprime\")\n continue\n\n p_conditions = []\n for i in range(0, int(k / 2)):\n pi = prime_set[i]\n if (\n (p - 1) % pi == 0\n and math.gcd(pi, int((p - 1) / pi)) == 1\n and math.gcd(pi, q - 1) == 1\n ):\n p_conditions.append(1)\n else:\n p_conditions.append(0)\n p_satisfied = True if len(p_conditions) == sum(p_conditions) else False\n if p_satisfied is False:\n logger.debug(\"canceled because p_conditions are not satisfied\")\n continue\n\n q_conditions = []\n for i in range(int(k / 2), k):\n pi = prime_set[i]\n if (\n (q - 1) % pi == 0\n and math.gcd(pi, int((q - 1) / pi)) == 1\n and math.gcd(pi, p - 1)\n ):\n q_conditions.append(1)\n else:\n q_conditions.append(0)\n\n q_satisfied = True if len(q_conditions) == sum(q_conditions) else False\n if q_satisfied is False:\n logger.debug(\"canceled because q_conditions are not satisfied\")\n continue\n\n # p and q must be primes\n if not (sympy.isprime(p) and sympy.isprime(q)):\n continue\n\n # choose a generator g\n g = random.randint(2, n)\n # it must be co-prime to n\n if math.gcd(g, n) != 1:\n logger.debug(\"canceled becuase g is not co-prime with ne\")\n continue\n # guarantee it is not pi-th power.\n for pi in prime_set:\n logger.debug(\"canceled because g is a pi-th power\")\n if pow(g, int(phi / pi), n) == 1:\n continue\n\n # the order of g modulo n must be phi/4\n if pow(g, int(phi / 4), n) != 1:\n continue\n\n # check decryption is guaranteed similar to benaloh\n # ps: this is not mentioned in the original paper\n is_decryption_guaranteed = True\n for pi in prime_set:\n prime_factors = sympy.factorint(pi).keys()\n for prime_factor in prime_factors:\n if pow(g, int(phi / prime_factor), n) == 1:\n is_decryption_guaranteed = False\n if is_decryption_guaranteed is True:\n break\n\n logger.debug(f\"n bits is {len(bin(n)[2:])}\")\n\n keys[\"public_key\"][\"g\"] = g\n keys[\"public_key\"][\"n\"] = n\n # sigma can optionally be secret in deterministic version\n keys[\"public_key\"][\"sigma\"] = sigma\n\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n keys[\"private_key\"][\"phi\"] = phi\n keys[\"private_key\"][\"prime_set\"] = prime_set\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Naccache-Stern requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n return random.randint(1, n - 1)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> int:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Naccache-Stern\n Args:\n plaintext (int): message to encrypt\n random_key (int): Naccache-Stern requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n g = self.keys[\"public_key\"][\"g\"]\n n = self.keys[\"public_key\"][\"n\"]\n r = random_key or self.generate_random_key()\n sigma = self.keys[\"public_key\"][\"sigma\"]\n if plaintext > self.plaintext_modulo:\n plaintext = plaintext % self.plaintext_modulo\n logger.debug(\n f\"plaintext must be in scale [0, {self.plaintext_modulo}] \"\n \"but this is exceeded. New plaintext is {plaintext}\"\n )\n\n if self.deterministic is True:\n return pow(g, plaintext, n)\n\n # Probabilistic\n return (pow(r, sigma, n) * pow(g, plaintext, n)) % n\n\n def decrypt(self, ciphertext: int):\n \"\"\"\n Decrypt a given ciphertext with Naccache-Stern\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n phi = self.keys[\"private_key\"][\"phi\"]\n n = self.keys[\"public_key\"][\"n\"]\n g = self.keys[\"public_key\"][\"g\"]\n prime_set = self.keys[\"private_key\"][\"prime_set\"]\n\n remainders = []\n for i, prime in enumerate(prime_set):\n ci = pow(ciphertext, int(phi / prime), n)\n logger.debug(f\"c_{i} = {ci}\")\n\n j = 0\n while True:\n if ci == pow(g, int((j * phi) / prime), n):\n logger.debug(f\"m_{i} = {j}\")\n remainders.append(j)\n break\n j = j + 1\n if j > prime**2:\n raise ValueError(\n f\"c_{i} cannot be restored from {ci} = {g}^(j*{phi}/{prime}) mod {n}\"\n )\n\n congruences = []\n for i in range(0, len(prime_set)):\n logger.debug(f\"m mod {prime_set[i]} = {remainders[i]}\")\n congruences.append((remainders[i], prime_set[i]))\n\n # chinese remainder problem\n ms = solve_congruence(*congruences)\n if not ms:\n raise ValueError(\"message cannot be restored with Chinese Remainder!\")\n return ms[0]\n\n def add(self, ciphertext1: int, ciphertext2: int) -> int:\n \"\"\"\n Perform homomorphic addition on encrypted data.\n Result of this must be equal to E(m1 + m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with Naccache-Stern\n ciphertext2 (int): 2nd ciphertext created with Naccache-Stern\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Naccache-Stern\n \"\"\"\n return (ciphertext1 * ciphertext2) % self.ciphertext_modulo\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Naccache-Stern is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Naccache-Stern is not homomorphic with respect to the exclusive or\")\n\n def multiply_by_contant(self, ciphertext: int, constant: int) -> int:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to E(m1 * constant) where E(m1) = ciphertext\n Encryption calculations are done in module n squared.\n Args:\n ciphertext (int): ciphertext created with Naccache-Stern\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Naccache-Stern\n \"\"\"\n if constant > self.plaintext_modulo:\n constant = constant % self.plaintext_modulo\n logger.debug(\n f\"Naccache-Stern can encrypt messages [1, {self.plaintext_modulo}]. \"\n f\"Seems constant exceeded this limit. New constant is {constant}\"\n )\n\n return pow(ciphertext, constant, self.ciphertext_modulo)\n\n def reencrypt(self, ciphertext: int) -> int:\n \"\"\"\n Re-generate ciphertext with re-encryption. Many ciphertext will be decrypted to same plaintext.\n Args:\n ciphertext (int): given ciphertext\n Returns:\n new ciphertext (int): different ciphertext for same plaintext\n \"\"\"\n if self.deterministic is True:\n raise ValueError(\n \"Deterministic version of Naccache-Stern does not support reencryption.\"\n \"If you still want to perform ciphertext regeneration, then you may \"\n \"consider to use its probabilistic version.\"\n )\n neutral_element = 0\n neutral_encrypted = self.encrypt(plaintext=neutral_element)\n return self.add(ciphertext1=ciphertext, ciphertext2=neutral_encrypted)" }, { "identifier": "GoldwasserMicali", "path": "lightphe/cryptosystems/GoldwasserMicali.py", "snippet": "class GoldwasserMicali(Homomorphic):\n \"\"\"\n Goldwasser-Micali algorithm is homomorphic with respect to the Exclusively OR (XOR).\n Ref: https://sefiks.com/2023/10/27/a-step-by-step-partially-homomorphic-encryption-example-with-goldwasser-micali-in-python/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size=100):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits\n \"\"\"\n self.keys = keys or self.generate_keys(key_size)\n self.ciphertext_modulo = self.keys[\"public_key\"][\"n\"]\n # TODO: not sure about the plaintext modulo\n self.plaintext_modulo = self.keys[\"public_key\"][\"n\"]\n\n def generate_keys(self, key_size: int) -> dict:\n \"\"\"\n Generate public and private keys of Goldwasser-Micali cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # picking a prime p\n p = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n # picking a prime q\n q = sympy.randprime(200, 2 ** int(key_size / 2) - 1)\n\n n = p * q\n\n # find non-residue x\n while True:\n x = random.randint(1, n - 1)\n if math.gcd(x, n) == 1 and jacobi_symbol(x, p) == -1 and jacobi_symbol(x, q) == -1:\n break\n\n keys[\"public_key\"][\"n\"] = n\n keys[\"public_key\"][\"x\"] = x\n\n keys[\"private_key\"][\"p\"] = p\n keys[\"private_key\"][\"q\"] = q\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Goldwasser-Micali requires to generate one-time random key that co-prime to n\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n while True:\n r = random.randint(1, n)\n if math.gcd(r, n) == 1:\n break\n return r\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> list:\n \"\"\"\n Encrypt a given plaintext for optionally given random key with Goldwasser-Micali\n Args:\n plaintext (int): message to encrypt\n random_key (int): Goldwasser-Micali requires a random key\n Random key will be generated automatically if you do not set this.\n Returns:\n ciphertext (int): encrypted message\n \"\"\"\n n = self.keys[\"public_key\"][\"n\"]\n x = self.keys[\"public_key\"][\"x\"]\n\n m_binary = bin(plaintext)[2:]\n\n # number of bits\n k = len(m_binary)\n\n if random_key and len(random_key) != k:\n raise ValueError(f\"Random key must be length of {k}\")\n\n c = []\n for i in range(0, k):\n mi = int(m_binary[i])\n\n if random_key:\n ri = random_key[i]\n else:\n ri = self.generate_random_key()\n\n ci = (pow(ri, 2, n) * pow(x, mi, n)) % n\n c.append(ci)\n\n return c\n\n def decrypt(self, ciphertext: list) -> int:\n \"\"\"\n Decrypt a given ciphertext with Goldwasser-Micali\n Args:\n ciphertext (int): encrypted message\n Returns:\n plaintext (int): restored message\n \"\"\"\n m_binaries = []\n\n p = self.keys[\"private_key\"][\"p\"]\n q = self.keys[\"private_key\"][\"q\"]\n\n for i in ciphertext:\n xp = i % p\n xq = i % q\n\n if pow(xp, int((p - 1) / 2), p) == 1 and pow(xq, int((q - 1) / 2), q) == 1:\n m_binaries.append(\"0\")\n else:\n m_binaries.append(\"1\")\n\n m_binary = \"\".join(m_binaries)\n return int(m_binary, 2)\n\n def add(self, ciphertext1: list, ciphertext2: list) -> list:\n raise ValueError(\"Goldwasser-Micali is not homomorphic with respect to the addition\")\n\n def multiply(self, ciphertext1: int, ciphertext2: int) -> int:\n raise ValueError(\"Goldwasser-Micali is not homomorphic with respect to the multiplication\")\n\n def xor(self, ciphertext1: int, ciphertext2: int) -> list:\n \"\"\"\n Perform homomorphic xor on encrypted data.\n Result of this must be equal to E(m1 ^ m2) = E(m1) ^ E(m2)\n Encryption calculations are done in module n\n Args:\n ciphertext1 (int): 1st ciphertext created with Goldwasser-Micali\n ciphertext2 (int): 2nd ciphertext created with Goldwasser-Micali\n Returns:\n ciphertext3 (int): 3rd ciphertext created with Goldwasser-Micali\n \"\"\"\n ciphertext3 = []\n for i in range(0, len(ciphertext1)):\n c1 = ciphertext1[i]\n c2 = ciphertext2[i]\n ciphertext3.append((c1 * c2) % self.ciphertext_modulo)\n\n return ciphertext3\n\n def multiply_by_contant(self, ciphertext: int, constant: int):\n raise ValueError(\"Goldwasser-Micali does not support multiplying with constant\")\n\n def reencrypt(self, ciphertext: int):\n raise ValueError(\"Goldwasser-Micali does not support re-encryption\")" }, { "identifier": "EllipticCurveElGamal", "path": "lightphe/cryptosystems/EllipticCurveElGamal.py", "snippet": "class EllipticCurveElGamal(Homomorphic):\n \"\"\"\n Elliptic Curve ElGamal algorithm is an additively homomorphic algorithm\n Unluckily, it requires to solve (EC)DLP to restore plaintext in decryption\n However it is easy to restore plaintext while plaintext is not very large\n unsimilar to Benaloh or Naccache-Stern\n Ref: https://sefiks.com/2018/08/21/elliptic-curve-elgamal-encryption/\n \"\"\"\n\n def __init__(self, keys: Optional[dict] = None, key_size: int = 160):\n \"\"\"\n Args:\n keys (dict): private - public key pair.\n set this to None if you want to generate random keys.\n key_size (int): key size in bits. default is 160.\n this is equivalent to 1024 bit RSA.\n \"\"\"\n # TODO: add different forms and curves. e.g. Koblitz, Edwards (Ed25519)\n self.curve = Weierstrass()\n self.keys = keys or self.generate_keys(key_size)\n self.plaintext_modulo = self.curve.p\n self.ciphertext_modulo = self.curve.p\n\n def generate_keys(self, key_size: int):\n \"\"\"\n Generate public and private keys of Elliptic Curve ElGamal cryptosystem\n Args:\n key_size (int): key size in bits\n Returns:\n keys (dict): having private_key and public_key keys\n \"\"\"\n keys = {}\n keys[\"private_key\"] = {}\n keys[\"public_key\"] = {}\n\n # private key\n ka = random.getrandbits(key_size)\n\n # public key\n Qa = self.curve.apply_double_and_add_method(G=self.curve.G, k=ka, p=self.curve.p)\n\n keys[\"public_key\"][\"Qa\"] = Qa\n keys[\"private_key\"][\"ka\"] = ka\n\n return keys\n\n def generate_random_key(self) -> int:\n \"\"\"\n Elliptic Curve ElGamal requires to generate one-time random key per encryption\n Returns:\n random key (int): one time random key for encryption\n \"\"\"\n return random.getrandbits(128)\n\n def encrypt(self, plaintext: int, random_key: Optional[int] = None) -> tuple:\n \"\"\"\n Encrypt plaintext with Elliptic Curve ElGamal\n Args:\n plaintext (int): message to encrypt\n random_key (int): random key for encryption. Do not set this to a static value.\n Returns\n ciphertext (tuple): c1 and c2\n \"\"\"\n # modulo\n p = self.curve.p\n\n # base point\n G = self.curve.G\n\n # public key\n Qa = self.keys[\"public_key\"][\"Qa\"]\n\n # random key\n r = random_key or self.generate_random_key()\n\n s = self.curve.apply_double_and_add_method(G=G, k=plaintext, p=p)\n\n c1 = self.curve.apply_double_and_add_method(G=G, k=r, p=p)\n\n c2 = self.curve.apply_double_and_add_method(G=Qa, k=r, p=p)\n c2 = self.curve.add_points(c2, s, p)\n\n return c1, c2\n\n def decrypt(self, ciphertext: tuple) -> int:\n \"\"\"\n Decrypt ciphertext with Elliptic Curve ElGamal\n Args:\n ciphertext (tuple): c1 and c2\n Returns:\n plaintext (int): restored message\n \"\"\"\n # modulo\n p = self.curve.p\n\n # private key\n ka = self.keys[\"private_key\"][\"ka\"]\n\n c1, c2 = ciphertext\n c1_prime = (c1[0], (-1 * c1[1]) % p)\n s_prime = self.curve.apply_double_and_add_method(G=c1_prime, k=ka, p=p)\n s_prime = self.curve.add_points(P=c2, Q=s_prime, p=p)\n\n # s_prime is a point on the elliptic curve\n # s_prime = k x G\n # we need to find k from known s_prime and G\n # this requires to solve ECDLP\n\n # base point\n G = self.curve.G\n k = 2\n while True:\n G = self.curve.add_points(P=G, Q=self.curve.G, p=p)\n if G[0] == s_prime[0] and G[1] == s_prime[1]:\n return k\n k = k + 1\n if k > self.curve.n:\n raise ValueError(f\"Cannot restore scalar from {s_prime} = k x {self.curve.G}\")\n\n def multiply(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n raise ValueError(\n \"Elliptic Curve ElGamal is not homomorphic with respect to the multiplication\"\n )\n\n def add(self, ciphertext1: tuple, ciphertext2: tuple) -> tuple:\n \"\"\"\n Perform homomorphic addition on encrypted data\n Result of this must be equal to E(m1 + m2)\n Args:\n ciphertext1 (dict): Elliptic Curve ElGamal ciphertext consisting of c1 and c2 keys\n ciphertext2 (dict): Elliptic Curve ElGamal ciphertext consisting of c1 and c2 keys\n Returns\n ciphertext (dict): Elliptic Curve ElGamal ciphertext consisting of c1 and c2 keys\n \"\"\"\n a = self.curve.add_points(P=ciphertext1[0], Q=ciphertext2[0], p=self.curve.p)\n b = self.curve.add_points(P=ciphertext1[1], Q=ciphertext2[1], p=self.curve.p)\n return a, b\n\n def xor(self, ciphertext1: tuple, ciphertext2: tuple) -> int:\n raise ValueError(\n \"Elliptic Curve ElGamal is not homomorphic with respect to the exclusive or\"\n )\n\n def multiply_by_contant(self, ciphertext: tuple, constant: int) -> tuple:\n \"\"\"\n Multiply a ciphertext with a plain constant.\n Result of this must be equal to k x E(m1) = E(m1 * k)\n where E(m1) = ciphertext\n Args:\n ciphertext (int): ciphertext created with Elliptic Curve ElGamal\n constant (int): known plain constant\n Returns:\n ciphertext (int): new ciphertext created with Elliptic Curve ElGamal\n \"\"\"\n return self.curve.apply_double_and_add_method(\n G=ciphertext[0], k=constant, p=self.curve.p\n ), self.curve.apply_double_and_add_method(G=ciphertext[1], k=constant, p=self.curve.p)\n\n def reencrypt(self, ciphertext: tuple) -> tuple:\n raise ValueError(\"Elliptic Curve ElGamal does not support regeneration of ciphertext\")" }, { "identifier": "phe_utils", "path": "lightphe/commons/phe_utils.py", "snippet": "def parse_int(value: Union[int, float], modulo: int) -> int:\ndef fractionize(value: float, modulo: int, precision: Optional[int] = None) -> Tuple[int, int]:\ndef solve_dlp():" }, { "identifier": "Logger", "path": "lightphe/commons/logger.py", "snippet": "class Logger:\n def __init__(self, module):\n self.module = module\n log_level = os.environ.get(\"LIGHTPHE_LOG_LEVEL\", str(logging.INFO))\n try:\n self.log_level = int(log_level)\n except Exception as err:\n self.dump_log(\n f\"Exception while parsing $LIGHTPHE_LOG_LEVEL.\"\n f\"Expected int but it is {log_level} ({str(err)})\"\n )\n self.log_level = logging.INFO\n\n def info(self, message):\n if self.log_level <= logging.INFO:\n self.dump_log(message)\n\n def debug(self, message):\n if self.log_level <= logging.DEBUG:\n self.dump_log(f\"🕷️ {message}\")\n\n def warn(self, message):\n if self.log_level <= logging.WARNING:\n self.dump_log(f\"⚠️ {message}\")\n\n def error(self, message):\n if self.log_level <= logging.ERROR:\n self.dump_log(f\"🔴 {message}\")\n\n def critical(self, message):\n if self.log_level <= logging.CRITICAL:\n self.dump_log(f\"💥 {message}\")\n\n def dump_log(self, message):\n print(f\"{str(datetime.now())[2:-7]} - {message}\")" } ]
from typing import Union from lightphe.models.Homomorphic import Homomorphic from lightphe.models.Algorithm import Algorithm from lightphe.cryptosystems.RSA import RSA from lightphe.cryptosystems.ElGamal import ElGamal from lightphe.cryptosystems.Paillier import Paillier from lightphe.cryptosystems.DamgardJurik import DamgardJurik from lightphe.cryptosystems.OkamotoUchiyama import OkamotoUchiyama from lightphe.cryptosystems.Benaloh import Benaloh from lightphe.cryptosystems.NaccacheStern import NaccacheStern from lightphe.cryptosystems.GoldwasserMicali import GoldwasserMicali from lightphe.cryptosystems.EllipticCurveElGamal import EllipticCurveElGamal from lightphe.commons import phe_utils from lightphe.commons.logger import Logger
17,615
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value if algorithm_name == Algorithm.RSA: cs = RSA(keys=keys) elif algorithm_name == Algorithm.ElGamal: cs = ElGamal(keys=keys) elif algorithm_name == Algorithm.ExponentialElGamal: cs = ElGamal(keys=keys, exponential=True) elif algorithm_name == Algorithm.EllipticCurveElGamal: cs = EllipticCurveElGamal(keys=keys) elif algorithm_name == Algorithm.Paillier: cs = Paillier(keys=keys) elif algorithm_name == Algorithm.DamgardJurik: cs = DamgardJurik(keys=keys) elif algorithm_name == Algorithm.OkamotoUchiyama: cs = OkamotoUchiyama(keys=keys) elif algorithm_name == Algorithm.Benaloh: cs = Benaloh(keys=keys) elif algorithm_name == Algorithm.NaccacheStern: cs = NaccacheStern(keys=keys)
logger = Logger(module="lightphe/models/Ciphertext.py") # pylint: disable=too-few-public-methods, no-else-return class Ciphertext: def __init__(self, algorithm_name: str, keys: dict, value: Union[int, tuple, list]): self.algorithm_name = algorithm_name self.keys = keys self.value = value if algorithm_name == Algorithm.RSA: cs = RSA(keys=keys) elif algorithm_name == Algorithm.ElGamal: cs = ElGamal(keys=keys) elif algorithm_name == Algorithm.ExponentialElGamal: cs = ElGamal(keys=keys, exponential=True) elif algorithm_name == Algorithm.EllipticCurveElGamal: cs = EllipticCurveElGamal(keys=keys) elif algorithm_name == Algorithm.Paillier: cs = Paillier(keys=keys) elif algorithm_name == Algorithm.DamgardJurik: cs = DamgardJurik(keys=keys) elif algorithm_name == Algorithm.OkamotoUchiyama: cs = OkamotoUchiyama(keys=keys) elif algorithm_name == Algorithm.Benaloh: cs = Benaloh(keys=keys) elif algorithm_name == Algorithm.NaccacheStern: cs = NaccacheStern(keys=keys)
elif algorithm_name == Algorithm.GoldwasserMicali:
9
2023-10-28 14:57:59+00:00
24k
DataCanvasIO/LMS
lms/runtime/prune/llm_pruner/LLMPruner/peft/mapping.py
[ { "identifier": "PeftModel", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModel(PushToHubMixin, torch.nn.Module):\n \"\"\"\n Base model encompassing various Peft methods.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft.\n peft_config ([`PeftConfig`]): The configuration of the Peft model.\n\n\n **Attributes**:\n - **base_model** ([`~transformers.PreTrainedModel`]) -- The base transformer model used for Peft.\n - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model.\n - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when\n saving the model.\n - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if\n using [`PromptLearningConfig`].\n - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if\n using [`PromptLearningConfig`].\n - **transformer_backbone_name** (`str`) -- The name of the transformer\n backbone in the base model if using [`PromptLearningConfig`].\n - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone\n in the base model if using [`PromptLearningConfig`].\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__()\n self.base_model = model\n self.config = self.base_model.config\n self.modules_to_save = None\n self.peft_config = {}\n self.active_adapter = adapter_name\n self.peft_type = peft_config.peft_type\n self.base_model_torch_dtype = getattr(model, \"dtype\", None)\n if not isinstance(peft_config, PromptLearningConfig):\n self.peft_config[adapter_name] = peft_config\n self.base_model = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type](\n self.base_model, self.peft_config, adapter_name\n )\n else:\n self.add_adapter(adapter_name, peft_config)\n\n def save_pretrained(self, save_directory, **kwargs):\n r\"\"\"\n This function saves the adapter model and the adapter configuration files to a directory, so that it can be\n reloaded using the [`LoraModel.from_pretrained`] class method, and also used by the [`LoraModel.push_to_hub`]\n method.\n\n Args:\n save_directory (`str`):\n Directory where the adapter model and configuration files will be saved (will be created if it does not\n exist).\n kwargs (additional keyword arguments, *optional*):\n Additional keyword arguments passed along to the `push_to_hub` method.\n \"\"\"\n if os.path.isfile(save_directory):\n raise ValueError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n os.makedirs(save_directory, exist_ok=True)\n\n for adapter_name, peft_config in self.peft_config.items():\n # save only the trainable weights\n output_state_dict = get_peft_model_state_dict(\n self, state_dict=kwargs.get(\"state_dict\", None), adapter_name=adapter_name\n )\n output_dir = os.path.join(save_directory, adapter_name) if adapter_name != \"default\" else save_directory\n os.makedirs(output_dir, exist_ok=True)\n torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME))\n\n # save the config and change the inference mode to `True`\n if peft_config.base_model_name_or_path is None:\n peft_config.base_model_name_or_path = (\n self.base_model.__dict__.get(\"name_or_path\", None)\n if isinstance(peft_config, PromptLearningConfig)\n else self.base_model.model.__dict__.get(\"name_or_path\", None)\n )\n inference_mode = peft_config.inference_mode\n peft_config.inference_mode = True\n peft_config.save_pretrained(output_dir)\n peft_config.inference_mode = inference_mode\n\n @classmethod\n def from_pretrained(cls, model, model_id, adapter_name=\"default\", is_trainable=False, **kwargs):\n r\"\"\"\n Instantiate a [`LoraModel`] from a pretrained Lora configuration and weights.\n\n Args:\n model ([`~transformers.PreTrainedModel`]):\n The model to be adapted. The model should be initialized with the\n [`~transformers.PreTrainedModel.from_pretrained`] method from the 🤗 Transformers library.\n model_id (`str` or `os.PathLike`):\n The name of the Lora configuration to use. Can be either:\n - A string, the `model id` of a Lora configuration hosted inside a model repo on the Hugging Face\n Hub.\n - A path to a directory containing a Lora configuration file saved using the `save_pretrained`\n method (`./my_lora_config_directory/`).\n \"\"\"\n from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING\n\n # load the config\n config = PEFT_TYPE_TO_CONFIG_MAPPING[\n PeftConfig.from_pretrained(model_id, subfolder=kwargs.get(\"subfolder\", None)).peft_type\n ].from_pretrained(model_id, subfolder=kwargs.get(\"subfolder\", None))\n print(\"Config: \", config)\n\n if (getattr(model, \"hf_device_map\", None) is not None) and len(\n set(model.hf_device_map.values()).intersection({\"cpu\", \"disk\"})\n ) > 0:\n remove_hook_from_submodules(model)\n\n if isinstance(config, PromptLearningConfig) and is_trainable:\n raise ValueError(\"Cannot set a prompt learning adapter to trainable when loading pretrained adapter.\")\n else:\n config.inference_mode = not is_trainable\n\n if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys():\n model = cls(model, config, adapter_name)\n else:\n model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name)\n model.load_adapter(model_id, adapter_name, **kwargs)\n return model\n\n def _setup_prompt_encoder(self, adapter_name):\n config = self.peft_config[adapter_name]\n self.prompt_encoder = torch.nn.ModuleDict({})\n self.prompt_tokens = {}\n transformer_backbone = None\n for name, module in self.base_model.named_children():\n for param in module.parameters():\n param.requires_grad = False\n if isinstance(module, PreTrainedModel):\n # Make sure to freeze Tranformers model\n if transformer_backbone is None:\n transformer_backbone = module\n self.transformer_backbone_name = name\n\n if config.num_transformer_submodules is None:\n config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1\n\n for named_param, value in list(transformer_backbone.named_parameters()):\n if value.shape[0] == self.base_model.config.vocab_size:\n self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(\".weight\", \"\"))\n break\n\n if config.peft_type == PeftType.PROMPT_TUNING:\n prompt_encoder = PromptEmbedding(config, self.word_embeddings)\n elif config.peft_type == PeftType.P_TUNING:\n prompt_encoder = PromptEncoder(config)\n elif config.peft_type == PeftType.PREFIX_TUNING:\n prompt_encoder = PrefixEncoder(config)\n else:\n raise ValueError(\"Not supported\")\n self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder}))\n self.prompt_tokens[adapter_name] = torch.arange(\n config.num_virtual_tokens * config.num_transformer_submodules\n ).long()\n\n def get_prompt_embedding_to_save(self, adapter_name):\n \"\"\"\n Returns the prompt embedding to save when saving the model. Only applicable when `peft_config.peft_type !=\n PeftType.LORA`.\n \"\"\"\n prompt_tokens = self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(self.device)\n if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING:\n prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens]\n prompt_embeddings = self.prompt_encoder[adapter_name](prompt_tokens)\n return prompt_embeddings[0].detach().cpu()\n\n def get_prompt(self, batch_size):\n \"\"\"\n Returns the virtual prompts to use for Peft. Only applicable when `peft_config.peft_type != PeftType.LORA`.\n \"\"\"\n peft_config = self.active_peft_config\n prompt_encoder = self.prompt_encoder[self.active_adapter]\n prompt_tokens = self.prompt_tokens[self.active_adapter].unsqueeze(0).expand(batch_size, -1).to(self.device)\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens]\n if peft_config.inference_mode:\n past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)\n else:\n past_key_values = prompt_encoder(prompt_tokens)\n past_key_values = past_key_values.view(\n batch_size,\n peft_config.num_virtual_tokens,\n peft_config.num_layers * 2,\n peft_config.num_attention_heads,\n peft_config.token_dim // peft_config.num_attention_heads,\n )\n if peft_config.num_transformer_submodules == 2:\n past_key_values = torch.cat([past_key_values, past_key_values], dim=2)\n past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split(\n peft_config.num_transformer_submodules * 2\n )\n if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None:\n post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type]\n past_key_values = post_process_fn(past_key_values)\n return past_key_values\n else:\n if peft_config.inference_mode:\n prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1)\n else:\n prompts = prompt_encoder(prompt_tokens)\n return prompts\n\n def print_trainable_parameters(self):\n \"\"\"\n Prints the number of trainable parameters in the model.\n \"\"\"\n trainable_params = 0\n all_param = 0\n for _, param in self.named_parameters():\n num_params = param.numel()\n # if using DS Zero 3 and the weights are initialized empty\n if num_params == 0 and hasattr(param, \"ds_numel\"):\n num_params = param.ds_numel\n\n all_param += num_params\n if param.requires_grad:\n trainable_params += num_params\n print(\n f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}\"\n )\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.base_model, name)\n\n def forward(self, *args, **kwargs):\n \"\"\"\n Forward pass of the model.\n \"\"\"\n return self.get_base_model()(*args, **kwargs)\n\n @contextmanager\n def disable_adapter(self):\n \"\"\"\n Disables the adapter module.\n \"\"\"\n if isinstance(self.active_peft_config, PromptLearningConfig):\n old_forward = self.forward\n self.forward = self.base_model.forward\n else:\n self.base_model.disable_adapter_layers()\n yield\n if isinstance(self.active_peft_config, PromptLearningConfig):\n self.forward = old_forward\n else:\n self.base_model.enable_adapter_layers()\n\n def get_base_model(self):\n \"\"\"\n Returns the base model.\n \"\"\"\n return self.base_model if isinstance(self.active_peft_config, PromptLearningConfig) else self.base_model.model\n\n def add_adapter(self, adapter_name, peft_config):\n if peft_config.peft_type != self.peft_type:\n raise ValueError(\n f\"Cannot combine adapters with different peft types. \"\n f\"Found {self.peft_type} and {peft_config.peft_type}.\"\n )\n self.peft_config[adapter_name] = peft_config\n if isinstance(peft_config, PromptLearningConfig):\n self._setup_prompt_encoder(adapter_name)\n else:\n self.base_model.add_adapter(adapter_name, peft_config)\n if getattr(peft_config, \"modules_to_save\", None) is not None:\n if self.modules_to_save is None:\n self.modules_to_save = set(peft_config.modules_to_save)\n else:\n self.modules_to_save = self.modules_to_save.update(peft_config.modules_to_save)\n _set_trainable(self, adapter_name)\n\n def load_adapter(self, model_id, adapter_name, is_trainable=False, **kwargs):\n from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING\n\n if adapter_name not in self.peft_config:\n # load the config\n peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[\n PeftConfig.from_pretrained(model_id, subfolder=kwargs.get(\"subfolder\", None)).peft_type\n ].from_pretrained(model_id, subfolder=kwargs.get(\"subfolder\", None))\n if isinstance(peft_config, PromptLearningConfig) and is_trainable:\n raise ValueError(\"Cannot set a prompt learning adapter to trainable when loading pretrained adapter.\")\n else:\n peft_config.inference_mode = not is_trainable\n self.add_adapter(adapter_name, peft_config)\n\n # load weights if any\n path = os.path.join(model_id, kwargs[\"subfolder\"]) if kwargs.get(\"subfolder\", None) is not None else model_id\n print(\"Load from adapter:\", WEIGHTS_NAME)\n if os.path.exists(os.path.join(path, WEIGHTS_NAME)):\n filename = os.path.join(path, WEIGHTS_NAME)\n else:\n try:\n filename = hf_hub_download(model_id, WEIGHTS_NAME, subfolder=kwargs.get(\"subfolder\", None))\n except: # noqa\n raise ValueError(\n f\"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. \"\n f\"Please check that the file {WEIGHTS_NAME} is present at {model_id}.\"\n )\n\n adapters_weights = torch.load(\n filename, map_location=torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n )\n # load the weights into the model\n set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name)\n if (\n (getattr(self, \"hf_device_map\", None) is not None)\n and (len(set(self.hf_device_map.values()).intersection({\"cpu\", \"disk\"})) > 0)\n and len(self.peft_config) == 1\n ):\n device_map = kwargs.get(\"device_map\", \"auto\")\n max_memory = kwargs.get(\"max_memory\", None)\n offload_dir = kwargs.get(\"offload_folder\", None)\n offload_index = kwargs.get(\"offload_index\", None)\n\n dispatch_model_kwargs = {}\n # Safety checker for previous `accelerate` versions\n # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/\n if \"offload_index\" in inspect.signature(dispatch_model).parameters:\n dispatch_model_kwargs[\"offload_index\"] = offload_index\n\n no_split_module_classes = self._no_split_modules\n\n if device_map != \"sequential\":\n max_memory = get_balanced_memory(\n self,\n max_memory=max_memory,\n no_split_module_classes=no_split_module_classes,\n low_zero=(device_map == \"balanced_low_0\"),\n )\n if isinstance(device_map, str):\n device_map = infer_auto_device_map(\n self, max_memory=max_memory, no_split_module_classes=no_split_module_classes\n )\n dispatch_model(\n self,\n device_map=device_map,\n offload_dir=offload_dir,\n **dispatch_model_kwargs,\n )\n hook = AlignDevicesHook(io_same_device=True)\n if isinstance(self.peft_config[adapter_name], PromptLearningConfig):\n remove_hook_from_submodules(self.prompt_encoder)\n add_hook_to_module(self.get_base_model(), hook)\n\n # Set model in evaluation mode to deactivate Dropout modules by default\n self.eval()\n\n def set_adapter(self, adapter_name):\n \"\"\"\n Sets the active adapter.\n \"\"\"\n if adapter_name not in self.peft_config:\n raise ValueError(f\"Adapter {adapter_name} not found.\")\n self.active_adapter = adapter_name\n if not isinstance(self.peft_config[adapter_name], PromptLearningConfig):\n self.base_model.set_adapter(adapter_name)\n _set_adapter(self, adapter_name)\n\n @property\n def active_peft_config(self):\n return self.peft_config[self.active_adapter]" }, { "identifier": "PeftModelForCausalLM", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModelForCausalLM(PeftModel):\n \"\"\"\n Peft model for causal language modeling.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForCausalLM\n >>> from peft import PeftModelForCausalLM, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"PREFIX_TUNING\",\n ... \"task_type\": \"CAUSAL_LM\",\n ... \"inference_mode\": False,\n ... \"num_virtual_tokens\": 20,\n ... \"token_dim\": 1280,\n ... \"num_transformer_submodules\": 1,\n ... \"num_attention_heads\": 20,\n ... \"num_layers\": 36,\n ... \"encoder_hidden_size\": 1280,\n ... \"prefix_projection\": False,\n ... \"postprocess_past_key_value_function\": None,\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForCausalLM.from_pretrained(\"gpt2-large\")\n >>> peft_model = PeftModelForCausalLM(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n peft_config = self.active_peft_config\n if not isinstance(peft_config, PromptLearningConfig):\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n batch_size = input_ids.shape[0]\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\")\n kwargs[\"token_type_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size)\n return self.base_model(input_ids=input_ids, past_key_values=past_key_values, **kwargs)\n else:\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n # concat prompt labels\n if labels is not None:\n prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(self.device)\n kwargs[\"labels\"] = torch.cat((prefix_labels, labels), dim=1)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n\n def generate(self, **kwargs):\n peft_config = self.active_peft_config\n self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation\n try:\n if not isinstance(peft_config, PromptLearningConfig):\n outputs = self.base_model.generate(**kwargs)\n else:\n if \"input_ids\" not in kwargs:\n raise ValueError(\"input_ids must be provided for Peft model generation\")\n # For gpt2 models, we construct postion_ids on the fly by using attention mask, and position ids need to match input_shape.\n # for prefix tuning, input shape is determined using `input_ids`. Thus we should not expand 'attention_mask' here\n # for prompt tuning input_ids is not passed but a concatenated input_embeds is passed. Thus attention_mask needs to be of same size of num_virtual_tokens + input_ids\n if kwargs.get(\"attention_mask\", None) is not None and peft_config.peft_type in [\n PeftType.PROMPT_TUNING,\n PeftType.P_TUNING,\n ]:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(\n kwargs[\"input_ids\"].shape[0], peft_config.num_virtual_tokens\n ).to(kwargs[\"input_ids\"].device)\n kwargs[\"attention_mask\"] = torch.cat((prefix_attention_mask, kwargs[\"attention_mask\"]), dim=1)\n\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\n \"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\"\n )\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\n \"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\"\n )\n kwargs[\"token_type_ids\"] = None\n\n outputs = self.base_model.generate(**kwargs)\n except:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n raise\n else:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n return outputs\n\n def prepare_inputs_for_generation(self, *args, **kwargs):\n peft_config = self.active_peft_config\n model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)\n if isinstance(peft_config, PromptLearningConfig):\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n prefix_attention_mask = torch.ones(\n model_kwargs[\"input_ids\"].shape[0], peft_config.num_virtual_tokens\n ).to(model_kwargs[\"input_ids\"].device)\n model_kwargs[\"attention_mask\"] = torch.cat(\n (prefix_attention_mask, model_kwargs[\"attention_mask\"]), dim=1\n )\n\n if model_kwargs[\"past_key_values\"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size=model_kwargs[\"input_ids\"].shape[0])\n\n if self.base_model_torch_dtype is not None:\n # handle the case for Bloom where it outputs tuple of tuples\n if isinstance(past_key_values[0], tuple):\n past_key_values = tuple(\n tuple(\n past_key_value.to(self.base_model_torch_dtype)\n for past_key_value in past_key_value_tuple\n )\n for past_key_value_tuple in past_key_values\n )\n else:\n past_key_values = tuple(\n past_key_value.to(self.base_model_torch_dtype) for past_key_value in past_key_values\n )\n\n model_kwargs[\"past_key_values\"] = past_key_values\n else:\n if model_kwargs[\"past_key_values\"] is None:\n inputs_embeds = self.word_embeddings(model_kwargs[\"input_ids\"])\n prompts = self.get_prompt(batch_size=model_kwargs[\"input_ids\"].shape[0])\n prompts = prompts.to(inputs_embeds.dtype)\n model_kwargs[\"inputs_embeds\"] = torch.cat((prompts, inputs_embeds), dim=1)\n model_kwargs[\"input_ids\"] = None\n\n return model_kwargs" }, { "identifier": "PeftModelForSeq2SeqLM", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModelForSeq2SeqLM(PeftModel):\n \"\"\"\n Peft model for sequence-to-sequence language modeling.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSeq2SeqLM\n >>> from peft import PeftModelForSeq2SeqLM, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"LORA\",\n ... \"task_type\": \"SEQ_2_SEQ_LM\",\n ... \"inference_mode\": False,\n ... \"r\": 8,\n ... \"target_modules\": [\"q\", \"v\"],\n ... \"lora_alpha\": 32,\n ... \"lora_dropout\": 0.1,\n ... \"merge_weights\": False,\n ... \"fan_in_fan_out\": False,\n ... \"enable_lora\": None,\n ... \"bias\": \"none\",\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\")\n >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation\n self.base_model_prepare_encoder_decoder_kwargs_for_generation = (\n self.base_model._prepare_encoder_decoder_kwargs_for_generation\n )\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n decoder_input_ids=None,\n decoder_attention_mask=None,\n decoder_inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n peft_config = self.active_peft_config\n if not isinstance(peft_config, PromptLearningConfig):\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n decoder_input_ids=decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n decoder_inputs_embeds=decoder_inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n batch_size = input_ids.shape[0]\n if decoder_attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1)\n\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\")\n kwargs[\"token_type_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"decoder_attention_mask\": decoder_attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n past_key_values = self.get_prompt(batch_size)\n return self.base_model(\n input_ids=input_ids, decoder_input_ids=decoder_input_ids, past_key_values=past_key_values, **kwargs\n )\n else:\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n if decoder_inputs_embeds is None and decoder_input_ids is None:\n decoder_input_ids = shift_tokens_right(\n labels, self.config.pad_token_id, self.config.decoder_start_token_id\n )\n decoder_inputs_embeds = self.word_embeddings(decoder_input_ids)\n\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n kwargs[\"attention_mask\"] = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n # concat prompt labels\n if labels is not None:\n if peft_config.num_transformer_submodules == 1:\n kwargs[\"labels\"] = labels\n elif peft_config.num_transformer_submodules == 2:\n prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(self.device)\n kwargs[\"labels\"] = torch.cat((prefix_labels, labels), dim=1)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts[:, : peft_config.num_virtual_tokens], inputs_embeds), dim=1)\n if peft_config.num_transformer_submodules == 1:\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n elif peft_config.num_transformer_submodules == 2:\n decoder_inputs_embeds = torch.cat(\n (prompts[:, peft_config.num_virtual_tokens :], decoder_inputs_embeds), dim=1\n )\n return self.base_model(\n inputs_embeds=inputs_embeds, decoder_inputs_embeds=decoder_inputs_embeds, **kwargs\n )\n\n def generate(self, **kwargs):\n peft_config = self.active_peft_config\n self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation\n self.base_model._prepare_encoder_decoder_kwargs_for_generation = (\n self._prepare_encoder_decoder_kwargs_for_generation\n )\n try:\n if not isinstance(peft_config, PromptLearningConfig):\n outputs = self.base_model.generate(**kwargs)\n else:\n if \"input_ids\" not in kwargs:\n raise ValueError(\"input_ids must be provided for Peft model generation\")\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\n \"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\"\n )\n kwargs[\"position_ids\"] = None\n if kwargs.get(\"token_type_ids\", None) is not None:\n warnings.warn(\n \"Token type ids are not supported for parameter efficient tuning. Ignoring token type ids\"\n )\n kwargs[\"token_type_ids\"] = None\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n outputs = self.base_model.generate(**kwargs)\n else:\n raise NotImplementedError\n except:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n self.base_model._prepare_encoder_decoder_kwargs_for_generation = (\n self.base_model_prepare_encoder_decoder_kwargs_for_generation\n )\n raise\n else:\n self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation\n self.base_model._prepare_encoder_decoder_kwargs_for_generation = (\n self.base_model_prepare_encoder_decoder_kwargs_for_generation\n )\n return outputs\n\n def prepare_inputs_for_generation(self, *args, **kwargs):\n peft_config = self.active_peft_config\n model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs)\n if model_kwargs[\"past_key_values\"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING:\n batch_size = model_kwargs[\"decoder_input_ids\"].shape[0]\n past_key_values = self.get_prompt(batch_size)\n if self.base_model_torch_dtype is not None:\n # handle the case for Bloom where it outputs tuple of tuples\n if isinstance(past_key_values[0], tuple):\n past_key_values = tuple(\n tuple(\n past_key_value.to(self.base_model_torch_dtype) for past_key_value in past_key_value_tuple\n )\n for past_key_value_tuple in past_key_values\n )\n else:\n past_key_values = tuple(\n past_key_value.to(self.base_model_torch_dtype) for past_key_value in past_key_values\n )\n model_kwargs[\"past_key_values\"] = past_key_values\n\n return model_kwargs" }, { "identifier": "PeftModelForSequenceClassification", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModelForSequenceClassification(PeftModel):\n \"\"\"\n Peft model for sequence classification tasks.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n **Attributes**:\n - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.\n - **cls_layer_name** (`str`) -- The name of the classification layer.\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSequenceClassification\n >>> from peft import PeftModelForSequenceClassification, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"PREFIX_TUNING\",\n ... \"task_type\": \"SEQ_CLS\",\n ... \"inference_mode\": False,\n ... \"num_virtual_tokens\": 20,\n ... \"token_dim\": 768,\n ... \"num_transformer_submodules\": 1,\n ... \"num_attention_heads\": 12,\n ... \"num_layers\": 12,\n ... \"encoder_hidden_size\": 768,\n ... \"prefix_projection\": False,\n ... \"postprocess_past_key_value_function\": None,\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForSequenceClassification.from_pretrained(\"bert-base-cased\")\n >>> peft_model = PeftModelForSequenceClassification(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n if self.modules_to_save is None:\n self.modules_to_save = {\"classifier\", \"score\"}\n else:\n self.modules_to_save.update({\"classifier\", \"score\"})\n\n for name, _ in self.base_model.named_children():\n if any(module_name in name for module_name in self.modules_to_save):\n self.cls_layer_name = name\n break\n\n # to make sure classifier layer is trainable\n _set_trainable(self, adapter_name)\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n peft_config = self.active_peft_config\n if not isinstance(peft_config, PromptLearningConfig):\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n batch_size = input_ids.shape[0]\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)\n else:\n if kwargs.get(\"token_type_ids\", None) is not None:\n kwargs[\"token_type_ids\"] = torch.cat(\n (\n torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.device),\n kwargs[\"token_type_ids\"],\n ),\n dim=1,\n ).long()\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n\n def _prefix_tuning_forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n batch_size = input_ids.shape[0]\n past_key_values = self.get_prompt(batch_size)\n fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())\n kwargs.update(\n {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"inputs_embeds\": inputs_embeds,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n \"past_key_values\": past_key_values,\n }\n )\n if \"past_key_values\" in fwd_params:\n return self.base_model(labels=labels, **kwargs)\n else:\n transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)\n fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())\n if \"past_key_values\" not in fwd_params:\n raise ValueError(\"Model does not support past key values which are required for prefix tuning.\")\n outputs = transformer_backbone_name(**kwargs)\n pooled_output = outputs[1] if len(outputs) > 1 else outputs[0]\n if \"dropout\" in [name for name, _ in list(self.base_model.named_children())]:\n pooled_output = self.base_model.dropout(pooled_output)\n logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.base_model.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = MSELoss()\n if self.base_model.num_labels == 1:\n loss = loss_fct(logits.squeeze(), labels.squeeze())\n else:\n loss = loss_fct(logits, labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )" }, { "identifier": "PeftModelForTokenClassification", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/peft_model.py", "snippet": "class PeftModelForTokenClassification(PeftModel):\n \"\"\"\n Peft model for token classification tasks.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): Base transformer model.\n peft_config ([`PeftConfig`]): Peft config.\n\n **Attributes**:\n - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model.\n - **cls_layer_name** (`str`) -- The name of the classification layer.\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSequenceClassification\n >>> from peft import PeftModelForTokenClassification, get_peft_config\n\n >>> config = {\n ... \"peft_type\": \"PREFIX_TUNING\",\n ... \"task_type\": \"TOKEN_CLS\",\n ... \"inference_mode\": False,\n ... \"num_virtual_tokens\": 20,\n ... \"token_dim\": 768,\n ... \"num_transformer_submodules\": 1,\n ... \"num_attention_heads\": 12,\n ... \"num_layers\": 12,\n ... \"encoder_hidden_size\": 768,\n ... \"prefix_projection\": False,\n ... \"postprocess_past_key_value_function\": None,\n ... }\n\n >>> peft_config = get_peft_config(config)\n >>> model = AutoModelForTokenClassification.from_pretrained(\"bert-base-cased\")\n >>> peft_model = PeftModelForTokenClassification(model, peft_config)\n >>> peft_model.print_trainable_parameters()\n trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117\n ```\n \"\"\"\n\n def __init__(self, model, peft_config: PeftConfig = None, adapter_name=\"default\"):\n super().__init__(model, peft_config, adapter_name)\n if self.modules_to_save is None:\n self.modules_to_save = {\"classifier\", \"score\"}\n else:\n self.modules_to_save.update({\"classifier\", \"score\"})\n\n for name, _ in self.base_model.named_children():\n if any(module_name in name for module_name in self.modules_to_save):\n self.cls_layer_name = name\n break\n\n # to make sure classifier layer is trainable\n _set_trainable(self, adapter_name)\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n peft_config = self.active_peft_config\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n if not isinstance(peft_config, PromptLearningConfig):\n return self.base_model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n labels=labels,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n **kwargs,\n )\n\n batch_size = input_ids.shape[0]\n if attention_mask is not None:\n # concat prompt attention mask\n prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device)\n attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1)\n if kwargs.get(\"position_ids\", None) is not None:\n warnings.warn(\"Position ids are not supported for parameter efficient tuning. Ignoring position ids.\")\n kwargs[\"position_ids\"] = None\n kwargs.update(\n {\n \"attention_mask\": attention_mask,\n \"labels\": labels,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n }\n )\n\n if peft_config.peft_type == PeftType.PREFIX_TUNING:\n return self._prefix_tuning_forward(input_ids=input_ids, **kwargs)\n else:\n if kwargs.get(\"token_type_ids\", None) is not None:\n kwargs[\"token_type_ids\"] = torch.cat(\n (\n torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.device),\n kwargs[\"token_type_ids\"],\n ),\n dim=1,\n ).long()\n if inputs_embeds is None:\n inputs_embeds = self.word_embeddings(input_ids)\n prompts = self.get_prompt(batch_size=batch_size)\n prompts = prompts.to(inputs_embeds.dtype)\n inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1)\n return self.base_model(inputs_embeds=inputs_embeds, **kwargs)\n\n def _prefix_tuning_forward(\n self,\n input_ids=None,\n attention_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n **kwargs,\n ):\n batch_size = input_ids.shape[0]\n past_key_values = self.get_prompt(batch_size)\n fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys())\n kwargs.update(\n {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"inputs_embeds\": inputs_embeds,\n \"output_attentions\": output_attentions,\n \"output_hidden_states\": output_hidden_states,\n \"return_dict\": return_dict,\n \"past_key_values\": past_key_values,\n }\n )\n if \"past_key_values\" in fwd_params:\n return self.base_model(labels=labels, **kwargs)\n else:\n transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name)\n fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys())\n if \"past_key_values\" not in fwd_params:\n raise ValueError(\"Model does not support past key values which are required for prefix tuning.\")\n outputs = transformer_backbone_name(**kwargs)\n sequence_output = outputs[0]\n if \"dropout\" in [name for name, _ in list(self.base_model.named_children())]:\n sequence_output = self.base_model.dropout(sequence_output)\n logits = self.base_model.get_submodule(self.cls_layer_name)(sequence_output)\n\n loss = None\n loss = None\n if labels is not None:\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return TokenClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )" }, { "identifier": "LoraConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/lora.py", "snippet": "class LoraConfig(PeftConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`LoraModel`].\n\n Args:\n r (`int`): Lora attention dimension.\n target_modules (`Union[List[str],str]`): The names of the modules to apply Lora to.\n lora_alpha (`float`): The alpha parameter for Lora scaling.\n lora_dropout (`float`): The dropout probability for Lora layers.\n fan_in_fan_out (`bool`): Set this to True if the layer to replace stores weight like (fan_in, fan_out).\n For example, gpt-2 uses `Conv1D` which stores weights like (fan_in, fan_out) and hence this should be set to `True`.:\n bias (`str`): Bias type for Lora. Can be 'none', 'all' or 'lora_only'\n modules_to_save (`List[str]`):List of modules apart from LoRA layers to be set as trainable\n and saved in the final checkpoint.\n \"\"\"\n\n r: int = field(default=8, metadata={\"help\": \"Lora attention dimension\"})\n target_modules: Optional[Union[List[str], str]] = field(\n default=None,\n metadata={\n \"help\": \"List of module names or regex expression of the module names to replace with Lora.\"\n \"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' \"\n },\n )\n lora_alpha: int = field(default=None, metadata={\"help\": \"Lora alpha\"})\n lora_dropout: float = field(default=None, metadata={\"help\": \"Lora dropout\"})\n fan_in_fan_out: bool = field(\n default=False,\n metadata={\"help\": \"Set this to True if the layer to replace stores weight like (fan_in, fan_out)\"},\n )\n bias: str = field(default=\"none\", metadata={\"help\": \"Bias type for Lora. Can be 'none', 'all' or 'lora_only'\"})\n modules_to_save: Optional[List[str]] = field(\n default=None,\n metadata={\n \"help\": \"List of modules apart from LoRA layers to be set as trainable and saved in the final checkpoint. \"\n \"For example, in Sequence Classification or Token Classification tasks, \"\n \"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved.\"\n },\n )\n init_lora_weights: bool = field(\n default=True,\n metadata={\"help\": \"Whether to initialize the weights of the Lora layers.\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.LORA" }, { "identifier": "AdaLoraConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/adalora.py", "snippet": "class AdaLoraConfig(LoraConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`~peft.AdaLora`].\n\n Args:\n target_r (`int`): The target average rank of incremental matrix.\n init_r (`int`): The initial rank for each incremental matrix.\n tinit (`int`): The steps of initial fine-tuning warmup.\n tfinal (`int`): The step of final fine-tuning.\n deltaT (`int`): The time internval between two budget allocations.\n beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing.\n beta2 (`float`): The hyperparameter of EMA for undertainty quantification.\n orth_reg_weight (`float`): The coefficient of orthogonal regularization.\n total_step (`int`): The total training steps that should be specified before training.\n rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator.\n \"\"\"\n\n target_r: int = field(default=8, metadata={\"help\": \"Target Lora matrix dimension.\"})\n init_r: int = field(default=12, metadata={\"help\": \"Intial Lora matrix dimension.\"})\n tinit: int = field(default=0, metadata={\"help\": \"The steps of initial warmup.\"})\n tfinal: int = field(default=0, metadata={\"help\": \"The steps of final warmup.\"})\n deltaT: int = field(default=1, metadata={\"help\": \"Step interval of rank allocation.\"})\n beta1: float = field(default=0.85, metadata={\"help\": \"Hyperparameter of EMA.\"})\n beta2: float = field(default=0.85, metadata={\"help\": \"Hyperparameter of EMA.\"})\n orth_reg_weight: float = field(default=0.5, metadata={\"help\": \"The orthogonal regularization coefficient.\"})\n total_step: Optional[int] = field(default=None, metadata={\"help\": \"The total training steps.\"})\n rank_pattern: Optional[dict] = field(default=None, metadata={\"help\": \"The saved rank pattern.\"})\n\n def __post_init__(self):\n self.peft_type = PeftType.ADALORA" }, { "identifier": "PromptEncoderConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/p_tuning.py", "snippet": "class PromptEncoderConfig(PromptLearningConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`PromptEncoder`].\n\n Args:\n encoder_reparameterization_type (Union[[`PromptEncoderReparameterizationType`], `str`]):\n The type of reparameterization to use.\n encoder_hidden_size (`int`): The hidden size of the prompt encoder.\n encoder_num_layers (`int`): The number of layers of the prompt encoder.\n encoder_dropout (`float`): The dropout probability of the prompt encoder.\n \"\"\"\n\n encoder_reparameterization_type: Union[str, PromptEncoderReparameterizationType] = field(\n default=PromptEncoderReparameterizationType.MLP,\n metadata={\"help\": \"How to reparameterize the prompt encoder\"},\n )\n encoder_hidden_size: int = field(\n default=None,\n metadata={\"help\": \"The hidden size of the prompt encoder\"},\n )\n encoder_num_layers: int = field(\n default=2,\n metadata={\"help\": \"The number of layers of the prompt encoder\"},\n )\n encoder_dropout: float = field(\n default=0.0,\n metadata={\"help\": \"The dropout of the prompt encoder\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.P_TUNING" }, { "identifier": "PrefixTuningConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/prefix_tuning.py", "snippet": "class PrefixTuningConfig(PromptLearningConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`PrefixEncoder`].\n\n Args:\n encoder_hidden_size (`int`): The hidden size of the prompt encoder.\n prefix_projection (`bool`): Whether to project the prefix embeddings.\n \"\"\"\n\n encoder_hidden_size: int = field(\n default=None,\n metadata={\"help\": \"The hidden size of the encoder\"},\n )\n prefix_projection: bool = field(\n default=False,\n metadata={\"help\": \"Whether to project the prefix tokens\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.PREFIX_TUNING" }, { "identifier": "PromptTuningConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/tuners/prompt_tuning.py", "snippet": "class PromptTuningConfig(PromptLearningConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`PromptEmbedding`].\n\n Args:\n prompt_tuning_init (Union[[`PromptTuningInit`], `str`]): The initialization of the prompt embedding.\n prompt_tuning_init_text (`str`, *optional*):\n The text to initialize the prompt embedding. Only used if `prompt_tuning_init` is `TEXT`.\n tokenizer_name_or_path (`str`, *optional*):\n The name or path of the tokenizer. Only used if `prompt_tuning_init` is `TEXT`.\n \"\"\"\n\n prompt_tuning_init: Union[PromptTuningInit, str] = field(\n default=PromptTuningInit.RANDOM,\n metadata={\"help\": \"How to initialize the prompt tuning parameters\"},\n )\n prompt_tuning_init_text: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The text to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`\"\n },\n )\n tokenizer_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The tokenizer to use for prompt tuning initialization. Only used if prompt_tuning_init is `TEXT`\"\n },\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.PROMPT_TUNING" }, { "identifier": "PromptLearningConfig", "path": "lms/runtime/prune/llm_pruner/LLMPruner/peft/utils/config.py", "snippet": "class PromptLearningConfig(PeftConfig):\n \"\"\"\n This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or\n [`PromptTuning`].\n\n Args:\n num_virtual_tokens (`int`): The number of virtual tokens to use.\n token_dim (`int`): The hidden embedding dimension of the base transformer model.\n num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.\n num_attention_heads (`int`): The number of attention heads in the base transformer model.\n num_layers (`int`): The number of layers in the base transformer model.\n \"\"\"\n\n num_virtual_tokens: int = field(default=None, metadata={\"help\": \"Number of virtual tokens\"})\n token_dim: int = field(\n default=None, metadata={\"help\": \"The hidden embedding dimension of the base transformer model\"}\n )\n num_transformer_submodules: Optional[int] = field(\n default=None, metadata={\"help\": \"Number of transformer submodules\"}\n )\n num_attention_heads: Optional[int] = field(default=None, metadata={\"help\": \"Number of attention heads\"})\n num_layers: Optional[int] = field(default=None, metadata={\"help\": \"Number of transformer layers\"})" } ]
from .peft_model import ( PeftModel, PeftModelForCausalLM, PeftModelForSeq2SeqLM, PeftModelForSequenceClassification, PeftModelForTokenClassification, ) from .tuners import AdaLoraConfig, LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptTuningConfig from .utils import PromptLearningConfig
14,919
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. MODEL_TYPE_TO_PEFT_MODEL_MAPPING = { "SEQ_CLS": PeftModelForSequenceClassification, "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, "CAUSAL_LM": PeftModelForCausalLM, "TOKEN_CLS": PeftModelForTokenClassification, } PEFT_TYPE_TO_CONFIG_MAPPING = { "PROMPT_TUNING": PromptTuningConfig, "PREFIX_TUNING": PrefixTuningConfig, "P_TUNING": PromptEncoderConfig, "LORA": LoraConfig, "ADALORA": AdaLoraConfig, } def get_peft_config(config_dict): """ Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. """ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict) def _prepare_prompt_learning_config(peft_config, model_config): if peft_config.num_layers is None: if "num_hidden_layers" in model_config: num_layers = model_config["num_hidden_layers"] elif "num_layers" in model_config: num_layers = model_config["num_layers"] elif "n_layer" in model_config: num_layers = model_config["n_layer"] else: raise ValueError("Please specify `num_layers` in `peft_config`") peft_config.num_layers = num_layers if peft_config.token_dim is None: if "hidden_size" in model_config: token_dim = model_config["hidden_size"] elif "n_embd" in model_config: token_dim = model_config["n_embd"] elif "d_model" in model_config: token_dim = model_config["d_model"] else: raise ValueError("Please specify `token_dim` in `peft_config`") peft_config.token_dim = token_dim if peft_config.num_attention_heads is None: if "num_attention_heads" in model_config: num_attention_heads = model_config["num_attention_heads"] elif "n_head" in model_config: num_attention_heads = model_config["n_head"] elif "num_heads" in model_config: num_attention_heads = model_config["num_heads"] elif "encoder_attention_heads" in model_config: num_attention_heads = model_config["encoder_attention_heads"] else: raise ValueError("Please specify `num_attention_heads` in `peft_config`") peft_config.num_attention_heads = num_attention_heads if getattr(peft_config, "encoder_hidden_size", None) is None: setattr(peft_config, "encoder_hidden_size", token_dim) return peft_config def get_peft_model(model, peft_config): """ Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. """ model_config = model.config.to_dict() if hasattr(model.config, "to_dict") else model.config peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not isinstance( peft_config, PromptLearningConfig ):
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. MODEL_TYPE_TO_PEFT_MODEL_MAPPING = { "SEQ_CLS": PeftModelForSequenceClassification, "SEQ_2_SEQ_LM": PeftModelForSeq2SeqLM, "CAUSAL_LM": PeftModelForCausalLM, "TOKEN_CLS": PeftModelForTokenClassification, } PEFT_TYPE_TO_CONFIG_MAPPING = { "PROMPT_TUNING": PromptTuningConfig, "PREFIX_TUNING": PrefixTuningConfig, "P_TUNING": PromptEncoderConfig, "LORA": LoraConfig, "ADALORA": AdaLoraConfig, } def get_peft_config(config_dict): """ Returns a Peft config object from a dictionary. Args: config_dict (`Dict[str, Any]`): Dictionary containing the configuration parameters. """ return PEFT_TYPE_TO_CONFIG_MAPPING[config_dict["peft_type"]](**config_dict) def _prepare_prompt_learning_config(peft_config, model_config): if peft_config.num_layers is None: if "num_hidden_layers" in model_config: num_layers = model_config["num_hidden_layers"] elif "num_layers" in model_config: num_layers = model_config["num_layers"] elif "n_layer" in model_config: num_layers = model_config["n_layer"] else: raise ValueError("Please specify `num_layers` in `peft_config`") peft_config.num_layers = num_layers if peft_config.token_dim is None: if "hidden_size" in model_config: token_dim = model_config["hidden_size"] elif "n_embd" in model_config: token_dim = model_config["n_embd"] elif "d_model" in model_config: token_dim = model_config["d_model"] else: raise ValueError("Please specify `token_dim` in `peft_config`") peft_config.token_dim = token_dim if peft_config.num_attention_heads is None: if "num_attention_heads" in model_config: num_attention_heads = model_config["num_attention_heads"] elif "n_head" in model_config: num_attention_heads = model_config["n_head"] elif "num_heads" in model_config: num_attention_heads = model_config["num_heads"] elif "encoder_attention_heads" in model_config: num_attention_heads = model_config["encoder_attention_heads"] else: raise ValueError("Please specify `num_attention_heads` in `peft_config`") peft_config.num_attention_heads = num_attention_heads if getattr(peft_config, "encoder_hidden_size", None) is None: setattr(peft_config, "encoder_hidden_size", token_dim) return peft_config def get_peft_model(model, peft_config): """ Returns a Peft model object from a model and a config. Args: model ([`transformers.PreTrainedModel`]): Model to be wrapped. peft_config ([`PeftConfig`]): Configuration object containing the parameters of the Peft model. """ model_config = model.config.to_dict() if hasattr(model.config, "to_dict") else model.config peft_config.base_model_name_or_path = model.__dict__.get("name_or_path", None) if peft_config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys() and not isinstance( peft_config, PromptLearningConfig ):
return PeftModel(model, peft_config)
0
2023-10-30 10:50:32+00:00
24k
chenran-li/RQL-release
stable_baselines3/dqn_ME/dqn_ME.py
[ { "identifier": "ReplayBuffer", "path": "stable_baselines3/common/buffers.py", "snippet": "class ReplayBuffer(BaseBuffer):\n \"\"\"\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n \"\"\"\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n device: Union[th.device, str] = \"auto\",\n n_envs: int = 1,\n optimize_memory_usage: bool = False,\n handle_timeout_termination: bool = True,\n ):\n super().__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n # Adjust buffer size\n self.buffer_size = max(buffer_size // n_envs, 1)\n\n # Check that the replay buffer can fit into the memory\n if psutil is not None:\n mem_available = psutil.virtual_memory().available\n\n # there is a bug if both optimize_memory_usage and handle_timeout_termination are true\n # see https://github.com/DLR-RM/stable-baselines3/issues/934\n if optimize_memory_usage and handle_timeout_termination:\n raise ValueError(\n \"ReplayBuffer does not support optimize_memory_usage = True \"\n \"and handle_timeout_termination = True simultaneously.\"\n )\n self.optimize_memory_usage = optimize_memory_usage\n\n self.observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype)\n\n if optimize_memory_usage:\n # `observations` contains also the next observation\n self.next_observations = None\n else:\n self.next_observations = np.zeros((self.buffer_size, self.n_envs) + self.obs_shape, dtype=observation_space.dtype)\n\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=action_space.dtype)\n\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n # Handle timeouts termination properly if needed\n # see https://github.com/DLR-RM/stable-baselines3/issues/284\n self.handle_timeout_termination = handle_timeout_termination\n self.timeouts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n\n if psutil is not None:\n total_memory_usage = self.observations.nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes\n\n if self.next_observations is not None:\n total_memory_usage += self.next_observations.nbytes\n\n if total_memory_usage > mem_available:\n # Convert to GB\n total_memory_usage /= 1e9\n mem_available /= 1e9\n warnings.warn(\n \"This system does not have apparently enough memory to store the complete \"\n f\"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB\"\n )\n\n def add(\n self,\n obs: np.ndarray,\n next_obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n done: np.ndarray,\n infos: List[Dict[str, Any]],\n ) -> None:\n\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space, spaces.Discrete):\n obs = obs.reshape((self.n_envs,) + self.obs_shape)\n next_obs = next_obs.reshape((self.n_envs,) + self.obs_shape)\n\n # Same, for actions\n action = action.reshape((self.n_envs, self.action_dim))\n\n # Copy to avoid modification by reference\n self.observations[self.pos] = np.array(obs).copy()\n\n if self.optimize_memory_usage:\n self.observations[(self.pos + 1) % self.buffer_size] = np.array(next_obs).copy()\n else:\n self.next_observations[self.pos] = np.array(next_obs).copy()\n\n self.actions[self.pos] = np.array(action).copy()\n self.rewards[self.pos] = np.array(reward).copy()\n self.dones[self.pos] = np.array(done).copy()\n\n if self.handle_timeout_termination:\n self.timeouts[self.pos] = np.array([info.get(\"TimeLimit.truncated\", False) for info in infos])\n\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n self.pos = 0\n\n def sample(self, batch_size: int, env: Optional[VecNormalize] = None) -> ReplayBufferSamples:\n \"\"\"\n Sample elements from the replay buffer.\n Custom sampling when using memory efficient variant,\n as we should not sample the element with index `self.pos`\n See https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n\n :param batch_size: Number of element to sample\n :param env: associated gym VecEnv\n to normalize the observations/rewards when sampling\n :return:\n \"\"\"\n if not self.optimize_memory_usage:\n return super().sample(batch_size=batch_size, env=env)\n # Do not sample the element with index `self.pos` as the transitions is invalid\n # (we use only one array to store `obs` and `next_obs`)\n if self.full:\n batch_inds = (np.random.randint(1, self.buffer_size, size=batch_size) + self.pos) % self.buffer_size\n else:\n batch_inds = np.random.randint(0, self.pos, size=batch_size)\n return self._get_samples(batch_inds, env=env)\n\n def _get_samples(self, batch_inds: np.ndarray, env: Optional[VecNormalize] = None) -> ReplayBufferSamples:\n # Sample randomly the env idx\n env_indices = np.random.randint(0, high=self.n_envs, size=(len(batch_inds),))\n\n if self.optimize_memory_usage:\n next_obs = self._normalize_obs(self.observations[(batch_inds + 1) % self.buffer_size, env_indices, :], env)\n else:\n next_obs = self._normalize_obs(self.next_observations[batch_inds, env_indices, :], env)\n\n data = (\n self._normalize_obs(self.observations[batch_inds, env_indices, :], env),\n self.actions[batch_inds, env_indices, :],\n next_obs,\n # Only use dones that are not due to timeouts\n # deactivated by default (timeouts is initialized as an array of False)\n (self.dones[batch_inds, env_indices] * (1 - self.timeouts[batch_inds, env_indices])).reshape(-1, 1),\n self._normalize_reward(self.rewards[batch_inds, env_indices].reshape(-1, 1), env),\n )\n return ReplayBufferSamples(*tuple(map(self.to_torch, data)))" }, { "identifier": "OffPolicyAlgorithm", "path": "stable_baselines3/common/off_policy_algorithm.py", "snippet": "class OffPolicyAlgorithm(BaseAlgorithm):\n \"\"\"\n The base for Off-Policy algorithms (ex: SAC/TD3)\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from\n (if registered in Gym, can be str. Can be None for loading trained models)\n :param learning_rate: learning rate for the optimizer,\n it can be a function of the current progress remaining (from 1 to 0)\n :param buffer_size: size of the replay buffer\n :param learning_starts: how many steps of the model to collect transitions for before learning starts\n :param batch_size: Minibatch size for each gradient update\n :param tau: the soft update coefficient (\"Polyak update\", between 0 and 1)\n :param gamma: the discount factor\n :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit\n like ``(5, \"step\")`` or ``(2, \"episode\")``.\n :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)\n Set to ``-1`` means to do as many gradient steps as steps done in the environment\n during the rollout.\n :param action_noise: the action noise type (None by default), this can help\n for hard exploration problem. Cf common.noise for the different action noise type.\n :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).\n If ``None``, it will be automatically selected.\n :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.\n :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n :param policy_kwargs: Additional arguments to be passed to the policy on creation\n :param tensorboard_log: the log location for tensorboard (if None, no logging)\n :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for\n debug messages\n :param device: Device on which the code should run.\n By default, it will try to use a Cuda compatible device and fallback to cpu\n if it is not possible.\n :param support_multi_env: Whether the algorithm supports training\n with multiple environments (as in A2C)\n :param monitor_wrapper: When creating an environment, whether to wrap it\n or not in a Monitor wrapper.\n :param seed: Seed for the pseudo random generators\n :param use_sde: Whether to use State Dependent Exploration (SDE)\n instead of action noise exploration (default: False)\n :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE\n Default: -1 (only sample at the beginning of the rollout)\n :param use_sde_at_warmup: Whether to use gSDE instead of uniform sampling\n during the warm up phase (before learning starts)\n :param sde_support: Whether the model support gSDE or not\n :param supported_action_spaces: The action spaces supported by the algorithm.\n \"\"\"\n\n def __init__(\n self,\n policy: Union[str, Type[BasePolicy]],\n env: Union[GymEnv, str],\n learning_rate: Union[float, Schedule],\n buffer_size: int = 1_000_000, # 1e6\n learning_starts: int = 100,\n batch_size: int = 256,\n tau: float = 0.005,\n gamma: float = 0.99,\n train_freq: Union[int, Tuple[int, str]] = (1, \"step\"),\n gradient_steps: int = 1,\n action_noise: Optional[ActionNoise] = None,\n replay_buffer_class: Optional[Type[ReplayBuffer]] = None,\n replay_buffer_kwargs: Optional[Dict[str, Any]] = None,\n optimize_memory_usage: bool = False,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n tensorboard_log: Optional[str] = None,\n verbose: int = 0,\n device: Union[th.device, str] = \"auto\",\n support_multi_env: bool = False,\n monitor_wrapper: bool = True,\n seed: Optional[int] = None,\n use_sde: bool = False,\n sde_sample_freq: int = -1,\n use_sde_at_warmup: bool = False,\n sde_support: bool = True,\n supported_action_spaces: Optional[Tuple[spaces.Space, ...]] = None,\n ):\n\n super().__init__(\n policy=policy,\n env=env,\n learning_rate=learning_rate,\n policy_kwargs=policy_kwargs,\n tensorboard_log=tensorboard_log,\n verbose=verbose,\n device=device,\n support_multi_env=support_multi_env,\n monitor_wrapper=monitor_wrapper,\n seed=seed,\n use_sde=use_sde,\n sde_sample_freq=sde_sample_freq,\n supported_action_spaces=supported_action_spaces,\n )\n self.buffer_size = buffer_size\n self.batch_size = batch_size\n self.learning_starts = learning_starts\n self.tau = tau\n self.gamma = gamma\n self.gradient_steps = gradient_steps\n self.action_noise = action_noise\n self.optimize_memory_usage = optimize_memory_usage\n self.replay_buffer_class = replay_buffer_class\n if replay_buffer_kwargs is None:\n replay_buffer_kwargs = {}\n self.replay_buffer_kwargs = replay_buffer_kwargs\n self._episode_storage = None\n\n # Save train freq parameter, will be converted later to TrainFreq object\n self.train_freq = train_freq\n\n self.actor = None # type: Optional[th.nn.Module]\n self.replay_buffer = None # type: Optional[ReplayBuffer]\n # Update policy keyword arguments\n if sde_support:\n self.policy_kwargs[\"use_sde\"] = self.use_sde\n # For gSDE only\n self.use_sde_at_warmup = use_sde_at_warmup\n\n def _convert_train_freq(self) -> None:\n \"\"\"\n Convert `train_freq` parameter (int or tuple)\n to a TrainFreq object.\n \"\"\"\n if not isinstance(self.train_freq, TrainFreq):\n train_freq = self.train_freq\n\n # The value of the train frequency will be checked later\n if not isinstance(train_freq, tuple):\n train_freq = (train_freq, \"step\")\n\n try:\n train_freq = (train_freq[0], TrainFrequencyUnit(train_freq[1]))\n except ValueError as e:\n raise ValueError(\n f\"The unit of the `train_freq` must be either 'step' or 'episode' not '{train_freq[1]}'!\"\n ) from e\n\n if not isinstance(train_freq[0], int):\n raise ValueError(f\"The frequency of `train_freq` must be an integer and not {train_freq[0]}\")\n\n self.train_freq = TrainFreq(*train_freq)\n\n def _setup_model(self) -> None:\n self._setup_lr_schedule()\n self.set_random_seed(self.seed)\n\n # Use DictReplayBuffer if needed\n if self.replay_buffer_class is None:\n if isinstance(self.observation_space, spaces.Dict):\n self.replay_buffer_class = DictReplayBuffer\n else:\n self.replay_buffer_class = ReplayBuffer\n\n elif self.replay_buffer_class == HerReplayBuffer:\n assert self.env is not None, \"You must pass an environment when using `HerReplayBuffer`\"\n\n # If using offline sampling, we need a classic replay buffer too\n if self.replay_buffer_kwargs.get(\"online_sampling\", True):\n replay_buffer = None\n else:\n replay_buffer = DictReplayBuffer(\n self.buffer_size,\n self.observation_space,\n self.action_space,\n device=self.device,\n optimize_memory_usage=self.optimize_memory_usage,\n )\n\n self.replay_buffer = HerReplayBuffer(\n self.env,\n self.buffer_size,\n device=self.device,\n replay_buffer=replay_buffer,\n **self.replay_buffer_kwargs,\n )\n\n if self.replay_buffer is None:\n self.replay_buffer = self.replay_buffer_class(\n self.buffer_size,\n self.observation_space,\n self.action_space,\n device=self.device,\n n_envs=self.n_envs,\n optimize_memory_usage=self.optimize_memory_usage,\n **self.replay_buffer_kwargs,\n )\n\n self.policy = self.policy_class( # pytype:disable=not-instantiable\n self.observation_space,\n self.action_space,\n self.lr_schedule,\n **self.policy_kwargs, # pytype:disable=not-instantiable\n )\n self.policy = self.policy.to(self.device)\n\n # Convert train freq parameter to TrainFreq object\n self._convert_train_freq()\n\n def save_replay_buffer(self, path: Union[str, pathlib.Path, io.BufferedIOBase]) -> None:\n \"\"\"\n Save the replay buffer as a pickle file.\n\n :param path: Path to the file where the replay buffer should be saved.\n if path is a str or pathlib.Path, the path is automatically created if necessary.\n \"\"\"\n assert self.replay_buffer is not None, \"The replay buffer is not defined\"\n save_to_pkl(path, self.replay_buffer, self.verbose)\n\n def load_replay_buffer(\n self,\n path: Union[str, pathlib.Path, io.BufferedIOBase],\n truncate_last_traj: bool = True,\n ) -> None:\n \"\"\"\n Load a replay buffer from a pickle file.\n\n :param path: Path to the pickled replay buffer.\n :param truncate_last_traj: When using ``HerReplayBuffer`` with online sampling:\n If set to ``True``, we assume that the last trajectory in the replay buffer was finished\n (and truncate it).\n If set to ``False``, we assume that we continue the same trajectory (same episode).\n \"\"\"\n self.replay_buffer = load_from_pkl(path, self.verbose)\n assert isinstance(self.replay_buffer, ReplayBuffer), \"The replay buffer must inherit from ReplayBuffer class\"\n\n # Backward compatibility with SB3 < 2.1.0 replay buffer\n # Keep old behavior: do not handle timeout termination separately\n if not hasattr(self.replay_buffer, \"handle_timeout_termination\"): # pragma: no cover\n self.replay_buffer.handle_timeout_termination = False\n self.replay_buffer.timeouts = np.zeros_like(self.replay_buffer.dones)\n\n if isinstance(self.replay_buffer, HerReplayBuffer):\n assert self.env is not None, \"You must pass an environment at load time when using `HerReplayBuffer`\"\n self.replay_buffer.set_env(self.get_env())\n if truncate_last_traj:\n self.replay_buffer.truncate_last_trajectory()\n\n def _setup_learn(\n self,\n total_timesteps: int,\n callback: MaybeCallback = None,\n reset_num_timesteps: bool = True,\n tb_log_name: str = \"run\",\n progress_bar: bool = False,\n ) -> Tuple[int, BaseCallback]:\n \"\"\"\n cf `BaseAlgorithm`.\n \"\"\"\n # Prevent continuity issue by truncating trajectory\n # when using memory efficient replay buffer\n # see https://github.com/DLR-RM/stable-baselines3/issues/46\n\n # Special case when using HerReplayBuffer,\n # the classic replay buffer is inside it when using offline sampling\n if isinstance(self.replay_buffer, HerReplayBuffer):\n replay_buffer = self.replay_buffer.replay_buffer\n else:\n replay_buffer = self.replay_buffer\n\n truncate_last_traj = (\n self.optimize_memory_usage\n and reset_num_timesteps\n and replay_buffer is not None\n and (replay_buffer.full or replay_buffer.pos > 0)\n )\n\n if truncate_last_traj:\n warnings.warn(\n \"The last trajectory in the replay buffer will be truncated, \"\n \"see https://github.com/DLR-RM/stable-baselines3/issues/46.\"\n \"You should use `reset_num_timesteps=False` or `optimize_memory_usage=False`\"\n \"to avoid that issue.\"\n )\n # Go to the previous index\n pos = (replay_buffer.pos - 1) % replay_buffer.buffer_size\n replay_buffer.dones[pos] = True\n\n return super()._setup_learn(\n total_timesteps,\n callback,\n reset_num_timesteps,\n tb_log_name,\n progress_bar,\n )\n\n def learn(\n self: SelfOffPolicyAlgorithm,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 4,\n tb_log_name: str = \"run\",\n reset_num_timesteps: bool = True,\n progress_bar: bool = False,\n ) -> SelfOffPolicyAlgorithm:\n\n total_timesteps, callback = self._setup_learn(\n total_timesteps,\n callback,\n reset_num_timesteps,\n tb_log_name,\n progress_bar,\n )\n\n callback.on_training_start(locals(), globals())\n\n while self.num_timesteps < total_timesteps:\n rollout = self.collect_rollouts(\n self.env,\n train_freq=self.train_freq,\n action_noise=self.action_noise,\n callback=callback,\n learning_starts=self.learning_starts,\n replay_buffer=self.replay_buffer,\n log_interval=log_interval,\n )\n\n if rollout.continue_training is False:\n break\n\n if self.num_timesteps > 0 and self.num_timesteps > self.learning_starts:\n # If no `gradient_steps` is specified,\n # do as many gradients steps as steps performed during the rollout\n gradient_steps = self.gradient_steps if self.gradient_steps >= 0 else rollout.episode_timesteps\n # Special case when the user passes `gradient_steps=0`\n if gradient_steps > 0:\n self.train(batch_size=self.batch_size, gradient_steps=gradient_steps)\n\n callback.on_training_end()\n\n return self\n\n def train(self, gradient_steps: int, batch_size: int) -> None:\n \"\"\"\n Sample the replay buffer and do the updates\n (gradient descent and update target networks)\n \"\"\"\n raise NotImplementedError()\n\n def _sample_action(\n self,\n learning_starts: int,\n action_noise: Optional[ActionNoise] = None,\n n_envs: int = 1,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Sample an action according to the exploration policy.\n This is either done by sampling the probability distribution of the policy,\n or sampling a random action (from a uniform distribution over the action space)\n or by adding noise to the deterministic output.\n\n :param action_noise: Action noise that will be used for exploration\n Required for deterministic policy (e.g. TD3). This can also be used\n in addition to the stochastic policy for SAC.\n :param learning_starts: Number of steps before learning for the warm-up phase.\n :param n_envs:\n :return: action to take in the environment\n and scaled action that will be stored in the replay buffer.\n The two differs when the action space is not normalized (bounds are not [-1, 1]).\n \"\"\"\n # Select action randomly or according to policy\n if self.num_timesteps < learning_starts and not (self.use_sde and self.use_sde_at_warmup):\n # Warmup phase\n unscaled_action = np.array([self.action_space.sample() for _ in range(n_envs)])\n else:\n # Note: when using continuous actions,\n # we assume that the policy uses tanh to scale the action\n # We use non-deterministic action in the case of SAC, for TD3, it does not matter\n unscaled_action, _ = self.predict(self._last_obs, deterministic=False)\n\n # Rescale the action from [low, high] to [-1, 1]\n if isinstance(self.action_space, spaces.Box):\n scaled_action = self.policy.scale_action(unscaled_action)\n\n # Add noise to the action (improve exploration)\n if action_noise is not None:\n scaled_action = np.clip(scaled_action + action_noise(), -1, 1)\n\n # We store the scaled action in the buffer\n buffer_action = scaled_action\n action = self.policy.unscale_action(scaled_action)\n else:\n # Discrete case, no need to normalize or clip\n buffer_action = unscaled_action\n action = buffer_action\n return action, buffer_action\n\n def _dump_logs(self) -> None:\n \"\"\"\n Write log.\n \"\"\"\n time_elapsed = max((time.time_ns() - self.start_time) / 1e9, sys.float_info.epsilon)\n fps = int((self.num_timesteps - self._num_timesteps_at_start) / time_elapsed)\n self.logger.record(\"time/episodes\", self._episode_num, exclude=\"tensorboard\")\n if len(self.ep_info_buffer) > 0 and len(self.ep_info_buffer[0]) > 0:\n self.logger.record(\"rollout/ep_rew_mean\", safe_mean([ep_info[\"r\"] for ep_info in self.ep_info_buffer]))\n self.logger.record(\"rollout/ep_len_mean\", safe_mean([ep_info[\"l\"] for ep_info in self.ep_info_buffer]))\n self.logger.record(\"time/fps\", fps)\n self.logger.record(\"time/time_elapsed\", int(time_elapsed), exclude=\"tensorboard\")\n self.logger.record(\"time/total_timesteps\", self.num_timesteps, exclude=\"tensorboard\")\n if self.use_sde:\n self.logger.record(\"train/std\", (self.actor.get_std()).mean().item())\n\n if len(self.ep_success_buffer) > 0:\n self.logger.record(\"rollout/success_rate\", safe_mean(self.ep_success_buffer))\n # Pass the number of timesteps for tensorboard\n self.logger.dump(step=self.num_timesteps)\n\n def _on_step(self) -> None:\n \"\"\"\n Method called after each step in the environment.\n It is meant to trigger DQN target network update\n but can be used for other purposes\n \"\"\"\n pass\n\n def _store_transition(\n self,\n replay_buffer: ReplayBuffer,\n buffer_action: np.ndarray,\n new_obs: Union[np.ndarray, Dict[str, np.ndarray]],\n reward: np.ndarray,\n dones: np.ndarray,\n infos: List[Dict[str, Any]],\n ) -> None:\n \"\"\"\n Store transition in the replay buffer.\n We store the normalized action and the unnormalized observation.\n It also handles terminal observations (because VecEnv resets automatically).\n\n :param replay_buffer: Replay buffer object where to store the transition.\n :param buffer_action: normalized action\n :param new_obs: next observation in the current episode\n or first observation of the episode (when dones is True)\n :param reward: reward for the current transition\n :param dones: Termination signal\n :param infos: List of additional information about the transition.\n It may contain the terminal observations and information about timeout.\n \"\"\"\n # Store only the unnormalized version\n if self._vec_normalize_env is not None:\n new_obs_ = self._vec_normalize_env.get_original_obs()\n reward_ = self._vec_normalize_env.get_original_reward()\n else:\n # Avoid changing the original ones\n self._last_original_obs, new_obs_, reward_ = self._last_obs, new_obs, reward\n\n # Avoid modification by reference\n next_obs = deepcopy(new_obs_)\n # As the VecEnv resets automatically, new_obs is already the\n # first observation of the next episode\n for i, done in enumerate(dones):\n if done and infos[i].get(\"terminal_observation\") is not None:\n if isinstance(next_obs, dict):\n next_obs_ = infos[i][\"terminal_observation\"]\n # VecNormalize normalizes the terminal observation\n if self._vec_normalize_env is not None:\n next_obs_ = self._vec_normalize_env.unnormalize_obs(next_obs_)\n # Replace next obs for the correct envs\n for key in next_obs.keys():\n next_obs[key][i] = next_obs_[key]\n else:\n next_obs[i] = infos[i][\"terminal_observation\"]\n # VecNormalize normalizes the terminal observation\n if self._vec_normalize_env is not None:\n next_obs[i] = self._vec_normalize_env.unnormalize_obs(next_obs[i, :])\n\n replay_buffer.add(\n self._last_original_obs,\n next_obs,\n buffer_action,\n reward_,\n dones,\n infos,\n )\n\n self._last_obs = new_obs\n # Save the unnormalized observation\n if self._vec_normalize_env is not None:\n self._last_original_obs = new_obs_\n\n def collect_rollouts(\n self,\n env: VecEnv,\n callback: BaseCallback,\n train_freq: TrainFreq,\n replay_buffer: ReplayBuffer,\n action_noise: Optional[ActionNoise] = None,\n learning_starts: int = 0,\n log_interval: Optional[int] = None,\n ) -> RolloutReturn:\n \"\"\"\n Collect experiences and store them into a ``ReplayBuffer``.\n\n :param env: The training environment\n :param callback: Callback that will be called at each step\n (and at the beginning and end of the rollout)\n :param train_freq: How much experience to collect\n by doing rollouts of current policy.\n Either ``TrainFreq(<n>, TrainFrequencyUnit.STEP)``\n or ``TrainFreq(<n>, TrainFrequencyUnit.EPISODE)``\n with ``<n>`` being an integer greater than 0.\n :param action_noise: Action noise that will be used for exploration\n Required for deterministic policy (e.g. TD3). This can also be used\n in addition to the stochastic policy for SAC.\n :param learning_starts: Number of steps before learning for the warm-up phase.\n :param replay_buffer:\n :param log_interval: Log data every ``log_interval`` episodes\n :return:\n \"\"\"\n # Switch to eval mode (this affects batch norm / dropout)\n self.policy.set_training_mode(False)\n\n num_collected_steps, num_collected_episodes = 0, 0\n\n assert isinstance(env, VecEnv), \"You must pass a VecEnv\"\n assert train_freq.frequency > 0, \"Should at least collect one step or episode.\"\n\n if env.num_envs > 1:\n assert train_freq.unit == TrainFrequencyUnit.STEP, \"You must use only one env when doing episodic training.\"\n\n # Vectorize action noise if needed\n if action_noise is not None and env.num_envs > 1 and not isinstance(action_noise, VectorizedActionNoise):\n action_noise = VectorizedActionNoise(action_noise, env.num_envs)\n\n if self.use_sde:\n self.actor.reset_noise(env.num_envs)\n\n callback.on_rollout_start()\n continue_training = True\n\n while should_collect_more_steps(train_freq, num_collected_steps, num_collected_episodes):\n if self.use_sde and self.sde_sample_freq > 0 and num_collected_steps % self.sde_sample_freq == 0:\n # Sample a new noise matrix\n self.actor.reset_noise(env.num_envs)\n\n # Select action randomly or according to policy\n actions, buffer_actions = self._sample_action(learning_starts, action_noise, env.num_envs)\n\n # Rescale and perform action\n new_obs, rewards, dones, infos = env.step(actions)\n\n self.num_timesteps += env.num_envs\n num_collected_steps += 1\n\n # Give access to local variables\n callback.update_locals(locals())\n # Only stop training if return value is False, not when it is None.\n if callback.on_step() is False:\n return RolloutReturn(num_collected_steps * env.num_envs, num_collected_episodes, continue_training=False)\n\n # Retrieve reward and episode length if using Monitor wrapper\n self._update_info_buffer(infos, dones)\n\n # Store data in replay buffer (normalized action and unnormalized observation)\n self._store_transition(replay_buffer, buffer_actions, new_obs, rewards, dones, infos)\n\n self._update_current_progress_remaining(self.num_timesteps, self._total_timesteps)\n\n # For DQN, check if the target network should be updated\n # and update the exploration schedule\n # For SAC/TD3, the update is dones as the same time as the gradient update\n # see https://github.com/hill-a/stable-baselines/issues/900\n self._on_step()\n\n for idx, done in enumerate(dones):\n if done:\n # Update stats\n num_collected_episodes += 1\n self._episode_num += 1\n\n if action_noise is not None:\n kwargs = dict(indices=[idx]) if env.num_envs > 1 else {}\n action_noise.reset(**kwargs)\n\n # Log training infos\n if log_interval is not None and self._episode_num % log_interval == 0:\n self._dump_logs()\n callback.on_rollout_end()\n\n return RolloutReturn(num_collected_steps * env.num_envs, num_collected_episodes, continue_training)" }, { "identifier": "BasePolicy", "path": "stable_baselines3/common/policies.py", "snippet": "class BasePolicy(BaseModel, ABC):\n \"\"\"The base policy object.\n\n Parameters are mostly the same as `BaseModel`; additions are documented below.\n\n :param args: positional arguments passed through to `BaseModel`.\n :param kwargs: keyword arguments passed through to `BaseModel`.\n :param squash_output: For continuous actions, whether the output is squashed\n or not using a ``tanh()`` function.\n \"\"\"\n\n def __init__(self, *args, squash_output: bool = False, **kwargs):\n super().__init__(*args, **kwargs)\n self._squash_output = squash_output\n\n @staticmethod\n def _dummy_schedule(progress_remaining: float) -> float:\n \"\"\"(float) Useful for pickling policy.\"\"\"\n del progress_remaining\n return 0.0\n\n @property\n def squash_output(self) -> bool:\n \"\"\"(bool) Getter for squash_output.\"\"\"\n return self._squash_output\n\n @staticmethod\n def init_weights(module: nn.Module, gain: float = 1) -> None:\n \"\"\"\n Orthogonal initialization (used in PPO and A2C)\n \"\"\"\n if isinstance(module, (nn.Linear, nn.Conv2d)):\n nn.init.orthogonal_(module.weight, gain=gain)\n if module.bias is not None:\n module.bias.data.fill_(0.0)\n\n @abstractmethod\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n \"\"\"\n Get the action according to the policy for a given observation.\n\n By default provides a dummy implementation -- not all BasePolicy classes\n implement this, e.g. if they are a Critic in an Actor-Critic method.\n\n :param observation:\n :param deterministic: Whether to use stochastic or deterministic actions\n :return: Taken action according to the policy\n \"\"\"\n\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n \"\"\"\n Get the policy action from an observation (and optional hidden state).\n Includes sugar-coating to handle different observations (e.g. normalizing images).\n\n :param observation: the input observation\n :param state: The last hidden states (can be None, used in recurrent policies)\n :param episode_start: The last masks (can be None, used in recurrent policies)\n this correspond to beginning of episodes,\n where the hidden states of the RNN must be reset.\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next hidden state\n (used in recurrent policies)\n \"\"\"\n # TODO (GH/1): add support for RNN policies\n # if state is None:\n # state = self.initial_state\n # if episode_start is None:\n # episode_start = [False for _ in range(self.n_envs)]\n # Switch to eval mode (this affects batch norm / dropout)\n self.set_training_mode(False)\n\n observation, vectorized_env = self.obs_to_tensor(observation)\n\n with th.no_grad():\n actions = self._predict(observation, deterministic=deterministic)\n # Convert to numpy, and reshape to the original action shape\n actions = actions.cpu().numpy().reshape((-1,) + self.action_space.shape)\n\n if isinstance(self.action_space, spaces.Box):\n if self.squash_output:\n # Rescale to proper domain when using squashing\n actions = self.unscale_action(actions)\n else:\n # Actions could be on arbitrary scale, so clip the actions to avoid\n # out of bound error (e.g. if sampling from a Gaussian distribution)\n actions = np.clip(actions, self.action_space.low, self.action_space.high)\n\n # Remove batch dimension if needed\n if not vectorized_env:\n actions = actions.squeeze(axis=0)\n\n return actions, state\n\n def scale_action(self, action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [low, high] to [-1, 1]\n (no need for symmetric action space)\n\n :param action: Action to scale\n :return: Scaled action\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return 2.0 * ((action - low) / (high - low)) - 1.0\n\n def unscale_action(self, scaled_action: np.ndarray) -> np.ndarray:\n \"\"\"\n Rescale the action from [-1, 1] to [low, high]\n (no need for symmetric action space)\n\n :param scaled_action: Action to un-scale\n \"\"\"\n low, high = self.action_space.low, self.action_space.high\n return low + (0.5 * (scaled_action + 1.0) * (high - low))" }, { "identifier": "maybe_transpose", "path": "stable_baselines3/common/preprocessing.py", "snippet": "def maybe_transpose(observation: np.ndarray, observation_space: spaces.Space) -> np.ndarray:\n \"\"\"\n Handle the different cases for images as PyTorch use channel first format.\n\n :param observation:\n :param observation_space:\n :return: channel first observation if observation is an image\n \"\"\"\n # Avoid circular import\n from stable_baselines3.common.vec_env import VecTransposeImage\n\n if is_image_space(observation_space):\n if not (observation.shape == observation_space.shape or observation.shape[1:] == observation_space.shape):\n # Try to re-order the channels\n transpose_obs = VecTransposeImage.transpose_image(observation)\n if transpose_obs.shape == observation_space.shape or transpose_obs.shape[1:] == observation_space.shape:\n observation = transpose_obs\n return observation" }, { "identifier": "GymEnv", "path": "stable_baselines3/common/type_aliases.py", "snippet": "class RolloutBufferSamples(NamedTuple):\nclass DictRolloutBufferSamples(NamedTuple):\nclass ReplayBufferSamples(NamedTuple):\nclass DictReplayBufferSamples(NamedTuple):\nclass RolloutReturn(NamedTuple):\nclass TrainFrequencyUnit(Enum):\nclass TrainFreq(NamedTuple):\nclass PolicyPredictor(Protocol):\n STEP = \"step\"\n EPISODE = \"episode\"\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:" }, { "identifier": "get_linear_fn", "path": "stable_baselines3/common/utils.py", "snippet": "def get_linear_fn(start: float, end: float, end_fraction: float) -> Schedule:\n \"\"\"\n Create a function that interpolates linearly between start and end\n between ``progress_remaining`` = 1 and ``progress_remaining`` = ``end_fraction``.\n This is used in DQN for linearly annealing the exploration fraction\n (epsilon for the epsilon-greedy strategy).\n\n :params start: value to start with if ``progress_remaining`` = 1\n :params end: value to end with if ``progress_remaining`` = 0\n :params end_fraction: fraction of ``progress_remaining``\n where end is reached e.g 0.1 then end is reached after 10%\n of the complete training process.\n :return: Linear schedule function.\n \"\"\"\n\n def func(progress_remaining: float) -> float:\n if (1 - progress_remaining) > end_fraction:\n return end\n else:\n return start + (1 - progress_remaining) * (end - start) / end_fraction\n\n return func" }, { "identifier": "get_parameters_by_name", "path": "stable_baselines3/common/utils.py", "snippet": "def get_parameters_by_name(model: th.nn.Module, included_names: Iterable[str]) -> List[th.Tensor]:\n \"\"\"\n Extract parameters from the state dict of ``model``\n if the name contains one of the strings in ``included_names``.\n\n :param model: the model where the parameters come from.\n :param included_names: substrings of names to include.\n :return: List of parameters values (Pytorch tensors)\n that matches the queried names.\n \"\"\"\n return [param for name, param in model.state_dict().items() if any([key in name for key in included_names])]" }, { "identifier": "is_vectorized_observation", "path": "stable_baselines3/common/utils.py", "snippet": "def is_vectorized_observation(observation: Union[int, np.ndarray], observation_space: spaces.Space) -> bool:\n \"\"\"\n For every observation type, detects and validates the shape,\n then returns whether or not the observation is vectorized.\n\n :param observation: the input observation to validate\n :param observation_space: the observation space\n :return: whether the given observation is vectorized or not\n \"\"\"\n\n is_vec_obs_func_dict = {\n spaces.Box: is_vectorized_box_observation,\n spaces.Discrete: is_vectorized_discrete_observation,\n spaces.MultiDiscrete: is_vectorized_multidiscrete_observation,\n spaces.MultiBinary: is_vectorized_multibinary_observation,\n spaces.Dict: is_vectorized_dict_observation,\n }\n\n for space_type, is_vec_obs_func in is_vec_obs_func_dict.items():\n if isinstance(observation_space, space_type):\n return is_vec_obs_func(observation, observation_space)\n else:\n # for-else happens if no break is called\n raise ValueError(f\"Error: Cannot determine if the observation is vectorized with the space type {observation_space}.\")" }, { "identifier": "polyak_update", "path": "stable_baselines3/common/utils.py", "snippet": "def polyak_update(\n params: Iterable[th.Tensor],\n target_params: Iterable[th.Tensor],\n tau: float,\n) -> None:\n \"\"\"\n Perform a Polyak average update on ``target_params`` using ``params``:\n target parameters are slowly updated towards the main parameters.\n ``tau``, the soft update coefficient controls the interpolation:\n ``tau=1`` corresponds to copying the parameters to the target ones whereas nothing happens when ``tau=0``.\n The Polyak update is done in place, with ``no_grad``, and therefore does not create intermediate tensors,\n or a computation graph, reducing memory cost and improving performance. We scale the target params\n by ``1-tau`` (in-place), add the new weights, scaled by ``tau`` and store the result of the sum in the target\n params (in place).\n See https://github.com/DLR-RM/stable-baselines3/issues/93\n\n :param params: parameters to use to update the target params\n :param target_params: parameters to update\n :param tau: the soft update coefficient (\"Polyak update\", between 0 and 1)\n \"\"\"\n with th.no_grad():\n # zip does not raise an exception if length of parameters does not match.\n for param, target_param in zip_strict(params, target_params):\n target_param.data.mul_(1 - tau)\n th.add(target_param.data, param.data, alpha=tau, out=target_param.data)" }, { "identifier": "CnnPolicy", "path": "stable_baselines3/dqn_ME/policies_ME.py", "snippet": "class QNetwork(BasePolicy):\nclass DQNPolicy(BasePolicy):\nclass CnnPolicy(DQNPolicy):\nclass MultiInputPolicy(DQNPolicy):\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n features_extractor: nn.Module,\n features_dim: int,\n net_arch: Optional[List[int]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n normalize_images: bool = True,\n ):\n def forward(self, obs: th.Tensor) -> th.Tensor:\n def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[List[int]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n def _build(self, lr_schedule: Schedule) -> None:\n def make_q_net(self) -> QNetwork:\n def forward(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:\n def _predict(self, obs: th.Tensor, deterministic: bool = True) -> th.Tensor:\n def _get_constructor_parameters(self) -> Dict[str, Any]:\n def predict_logprob(self, observation: th.Tensor) -> th.Tensor:\n def set_training_mode(self, mode: bool) -> None:\n def __init__(\n self,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[List[int]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):\n def __init__(\n self,\n observation_space: spaces.Dict,\n action_space: spaces.Space,\n lr_schedule: Schedule,\n net_arch: Optional[List[int]] = None,\n activation_fn: Type[nn.Module] = nn.ReLU,\n features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,\n features_extractor_kwargs: Optional[Dict[str, Any]] = None,\n normalize_images: bool = True,\n optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,\n optimizer_kwargs: Optional[Dict[str, Any]] = None,\n ):" }, { "identifier": "DQN", "path": "stable_baselines3/dqn/dqn.py", "snippet": "class DQN(OffPolicyAlgorithm):\n \"\"\"\n Deep Q-Network (DQN)\n\n Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236\n Default hyperparameters are taken from the Nature paper,\n except for the optimizer and learning rate that were taken from Stable Baselines defaults.\n\n :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...)\n :param env: The environment to learn from (if registered in Gym, can be str)\n :param learning_rate: The learning rate, it can be a function\n of the current progress remaining (from 1 to 0)\n :param buffer_size: size of the replay buffer\n :param learning_starts: how many steps of the model to collect transitions for before learning starts\n :param batch_size: Minibatch size for each gradient update\n :param tau: the soft update coefficient (\"Polyak update\", between 0 and 1) default 1 for hard update\n :param gamma: the discount factor\n :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit\n like ``(5, \"step\")`` or ``(2, \"episode\")``.\n :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``)\n Set to ``-1`` means to do as many gradient steps as steps done in the environment\n during the rollout.\n :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``).\n If ``None``, it will be automatically selected.\n :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation.\n :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n :param target_update_interval: update the target network every ``target_update_interval``\n environment steps.\n :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced\n :param exploration_initial_eps: initial value of random action probability\n :param exploration_final_eps: final value of random action probability\n :param max_grad_norm: The maximum value for the gradient clipping\n :param tensorboard_log: the log location for tensorboard (if None, no logging)\n :param policy_kwargs: additional arguments to be passed to the policy on creation\n :param verbose: Verbosity level: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for\n debug messages\n :param seed: Seed for the pseudo random generators\n :param device: Device (cpu, cuda, ...) on which the code should be run.\n Setting it to auto, the code will be run on the GPU if possible.\n :param _init_setup_model: Whether or not to build the network at the creation of the instance\n \"\"\"\n\n policy_aliases: Dict[str, Type[BasePolicy]] = {\n \"MlpPolicy\": MlpPolicy,\n \"CnnPolicy\": CnnPolicy,\n \"MultiInputPolicy\": MultiInputPolicy,\n }\n\n def __init__(\n self,\n policy: Union[str, Type[DQNPolicy]],\n env: Union[GymEnv, str],\n learning_rate: Union[float, Schedule] = 1e-4,\n buffer_size: int = 1_000_000, # 1e6\n learning_starts: int = 50000,\n batch_size: int = 32,\n tau: float = 1.0,\n gamma: float = 0.99,\n train_freq: Union[int, Tuple[int, str]] = 4,\n gradient_steps: int = 1,\n replay_buffer_class: Optional[Type[ReplayBuffer]] = None,\n replay_buffer_kwargs: Optional[Dict[str, Any]] = None,\n optimize_memory_usage: bool = False,\n target_update_interval: int = 10000,\n exploration_fraction: float = 0.1,\n exploration_initial_eps: float = 1.0,\n exploration_final_eps: float = 0.05,\n max_grad_norm: float = 10,\n tensorboard_log: Optional[str] = None,\n policy_kwargs: Optional[Dict[str, Any]] = None,\n verbose: int = 0,\n seed: Optional[int] = None,\n device: Union[th.device, str] = \"auto\",\n _init_setup_model: bool = True,\n ):\n\n super().__init__(\n policy,\n env,\n learning_rate,\n buffer_size,\n learning_starts,\n batch_size,\n tau,\n gamma,\n train_freq,\n gradient_steps,\n action_noise=None, # No action noise\n replay_buffer_class=replay_buffer_class,\n replay_buffer_kwargs=replay_buffer_kwargs,\n policy_kwargs=policy_kwargs,\n tensorboard_log=tensorboard_log,\n verbose=verbose,\n device=device,\n seed=seed,\n sde_support=False,\n optimize_memory_usage=optimize_memory_usage,\n supported_action_spaces=(spaces.Discrete,),\n support_multi_env=True,\n )\n\n self.exploration_initial_eps = exploration_initial_eps\n self.exploration_final_eps = exploration_final_eps\n self.exploration_fraction = exploration_fraction\n self.target_update_interval = target_update_interval\n # For updating the target network with multiple envs:\n self._n_calls = 0\n self.max_grad_norm = max_grad_norm\n # \"epsilon\" for the epsilon-greedy exploration\n self.exploration_rate = 0.0\n # Linear schedule will be defined in `_setup_model()`\n self.exploration_schedule = None\n self.q_net, self.q_net_target = None, None\n\n if _init_setup_model:\n self._setup_model()\n\n def _setup_model(self) -> None:\n super()._setup_model()\n self._create_aliases()\n # Copy running stats, see GH issue #996\n self.batch_norm_stats = get_parameters_by_name(self.q_net, [\"running_\"])\n self.batch_norm_stats_target = get_parameters_by_name(self.q_net_target, [\"running_\"])\n self.exploration_schedule = get_linear_fn(\n self.exploration_initial_eps,\n self.exploration_final_eps,\n self.exploration_fraction,\n )\n # Account for multiple environments\n # each call to step() corresponds to n_envs transitions\n if self.n_envs > 1:\n if self.n_envs > self.target_update_interval:\n warnings.warn(\n \"The number of environments used is greater than the target network \"\n f\"update interval ({self.n_envs} > {self.target_update_interval}), \"\n \"therefore the target network will be updated after each call to env.step() \"\n f\"which corresponds to {self.n_envs} steps.\"\n )\n\n self.target_update_interval = max(self.target_update_interval // self.n_envs, 1)\n\n def _create_aliases(self) -> None:\n self.q_net = self.policy.q_net\n self.q_net_target = self.policy.q_net_target\n\n def _on_step(self) -> None:\n \"\"\"\n Update the exploration rate and target network if needed.\n This method is called in ``collect_rollouts()`` after each step in the environment.\n \"\"\"\n self._n_calls += 1\n if self._n_calls % self.target_update_interval == 0:\n polyak_update(self.q_net.parameters(), self.q_net_target.parameters(), self.tau)\n # Copy running stats, see GH issue #996\n polyak_update(self.batch_norm_stats, self.batch_norm_stats_target, 1.0)\n\n self.exploration_rate = self.exploration_schedule(self._current_progress_remaining)\n self.logger.record(\"rollout/exploration_rate\", self.exploration_rate)\n\n def train(self, gradient_steps: int, batch_size: int = 100) -> None:\n # Switch to train mode (this affects batch norm / dropout)\n self.policy.set_training_mode(True)\n # Update learning rate according to schedule\n self._update_learning_rate(self.policy.optimizer)\n\n losses = []\n for _ in range(gradient_steps):\n # Sample replay buffer\n replay_data = self.replay_buffer.sample(batch_size, env=self._vec_normalize_env)\n\n with th.no_grad():\n # Compute the next Q-values using the target network\n next_q_values = self.q_net_target(replay_data.next_observations)\n # Follow greedy policy: use the one with the highest value\n next_q_values, _ = next_q_values.max(dim=1)\n # Avoid potential broadcast issue\n next_q_values = next_q_values.reshape(-1, 1)\n # 1-step TD target\n target_q_values = replay_data.rewards + (1 - replay_data.dones) * self.gamma * next_q_values\n\n # Get current Q-values estimates\n current_q_values = self.q_net(replay_data.observations)\n\n # Retrieve the q-values for the actions from the replay buffer\n current_q_values = th.gather(current_q_values, dim=1, index=replay_data.actions.long())\n\n # Compute Huber loss (less sensitive to outliers)\n loss = F.smooth_l1_loss(current_q_values, target_q_values)\n losses.append(loss.item())\n\n # Optimize the policy\n self.policy.optimizer.zero_grad()\n loss.backward()\n # Clip gradient norm\n th.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.policy.optimizer.step()\n\n # Increase update counter\n self._n_updates += gradient_steps\n\n self.logger.record(\"train/n_updates\", self._n_updates, exclude=\"tensorboard\")\n self.logger.record(\"train/loss\", np.mean(losses))\n\n def predict(\n self,\n observation: Union[np.ndarray, Dict[str, np.ndarray]],\n state: Optional[Tuple[np.ndarray, ...]] = None,\n episode_start: Optional[np.ndarray] = None,\n deterministic: bool = False,\n ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:\n \"\"\"\n Overrides the base_class predict function to include epsilon-greedy exploration.\n\n :param observation: the input observation\n :param state: The last states (can be None, used in recurrent policies)\n :param episode_start: The last masks (can be None, used in recurrent policies)\n :param deterministic: Whether or not to return deterministic actions.\n :return: the model's action and the next state\n (used in recurrent policies)\n \"\"\"\n if not deterministic and np.random.rand() < self.exploration_rate:\n if is_vectorized_observation(maybe_transpose(observation, self.observation_space), self.observation_space):\n if isinstance(observation, dict):\n n_batch = observation[list(observation.keys())[0]].shape[0]\n else:\n n_batch = observation.shape[0]\n action = np.array([self.action_space.sample() for _ in range(n_batch)])\n else:\n action = np.array(self.action_space.sample())\n else:\n action, state = self.policy.predict(observation, state, episode_start, deterministic)\n return action, state\n\n def learn(\n self: SelfDQN,\n total_timesteps: int,\n callback: MaybeCallback = None,\n log_interval: int = 4,\n tb_log_name: str = \"DQN\",\n reset_num_timesteps: bool = True,\n progress_bar: bool = False,\n ) -> SelfDQN:\n\n return super().learn(\n total_timesteps=total_timesteps,\n callback=callback,\n log_interval=log_interval,\n tb_log_name=tb_log_name,\n reset_num_timesteps=reset_num_timesteps,\n progress_bar=progress_bar,\n )\n\n def _excluded_save_params(self) -> List[str]:\n return super()._excluded_save_params() + [\"q_net\", \"q_net_target\"]\n\n def _get_torch_save_params(self) -> Tuple[List[str], List[str]]:\n state_dicts = [\"policy\", \"policy.optimizer\"]\n\n return state_dicts, []" } ]
import warnings import numpy as np import torch as th from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union from gym import spaces from torch.nn import functional as F from stable_baselines3.common.buffers import ReplayBuffer from stable_baselines3.common.off_policy_algorithm import OffPolicyAlgorithm from stable_baselines3.common.policies import BasePolicy from stable_baselines3.common.preprocessing import maybe_transpose from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule from stable_baselines3.common.utils import get_linear_fn, get_parameters_by_name, is_vectorized_observation, polyak_update from stable_baselines3.dqn_ME.policies_ME import CnnPolicy, DQNPolicy, MlpPolicy, MultiInputPolicy from stable_baselines3.dqn.dqn import DQN
15,144
SelfDQN_ME = TypeVar("SelfDQN_ME", bound="DQN_ME") class DQN_ME(DQN): """ Soft Deep Q-Network (i.e. entropy-regularized DQN) Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236, https://arxiv.org/abs/1702.08165 Default hyperparameters are taken from the Nature paper, except for the optimizer and learning rate that were taken from Stable Baselines defaults. :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) :param env: The environment to learn from (if registered in Gym, can be str) :param learning_rate: The learning rate, it can be a function of the current progress remaining (from 1 to 0) :param buffer_size: size of the replay buffer :param learning_starts: how many steps of the model to collect transitions for before learning starts :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update :param gamma: the discount factor :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit like ``(5, "step")`` or ``(2, "episode")``. :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) Set to ``-1`` means to do as many gradient steps as steps done in the environment during the rollout. :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). If ``None``, it will be automatically selected. :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 :param target_update_interval: update the target network every ``target_update_interval`` environment steps. :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced :param exploration_initial_eps: initial value of random action probability :param exploration_final_eps: final value of random action probability :param max_grad_norm: The maximum value for the gradient clipping :param tensorboard_log: the lonext_q_valuesg location for tensorboard (if None, no logging) :param policy_kwargs: additionnext_q_valuesal arguments to be passed to the policy on creation :param verbose: Verbosity levenext_q_valuesl: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for debug messagesnext_q_values :param seed: Seed for the pseunext_q_valuesdo random generators :param device: Device (cpu, cuda, ...) on which the code should be run. Setting it to auto, the code will be run on the GPU if possible. :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy, "CnnPolicy": CnnPolicy,
SelfDQN_ME = TypeVar("SelfDQN_ME", bound="DQN_ME") class DQN_ME(DQN): """ Soft Deep Q-Network (i.e. entropy-regularized DQN) Paper: https://arxiv.org/abs/1312.5602, https://www.nature.com/articles/nature14236, https://arxiv.org/abs/1702.08165 Default hyperparameters are taken from the Nature paper, except for the optimizer and learning rate that were taken from Stable Baselines defaults. :param policy: The policy model to use (MlpPolicy, CnnPolicy, ...) :param env: The environment to learn from (if registered in Gym, can be str) :param learning_rate: The learning rate, it can be a function of the current progress remaining (from 1 to 0) :param buffer_size: size of the replay buffer :param learning_starts: how many steps of the model to collect transitions for before learning starts :param batch_size: Minibatch size for each gradient update :param tau: the soft update coefficient ("Polyak update", between 0 and 1) default 1 for hard update :param gamma: the discount factor :param train_freq: Update the model every ``train_freq`` steps. Alternatively pass a tuple of frequency and unit like ``(5, "step")`` or ``(2, "episode")``. :param gradient_steps: How many gradient steps to do after each rollout (see ``train_freq``) Set to ``-1`` means to do as many gradient steps as steps done in the environment during the rollout. :param replay_buffer_class: Replay buffer class to use (for instance ``HerReplayBuffer``). If ``None``, it will be automatically selected. :param replay_buffer_kwargs: Keyword arguments to pass to the replay buffer on creation. :param optimize_memory_usage: Enable a memory efficient variant of the replay buffer at a cost of more complexity. See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195 :param target_update_interval: update the target network every ``target_update_interval`` environment steps. :param exploration_fraction: fraction of entire training period over which the exploration rate is reduced :param exploration_initial_eps: initial value of random action probability :param exploration_final_eps: final value of random action probability :param max_grad_norm: The maximum value for the gradient clipping :param tensorboard_log: the lonext_q_valuesg location for tensorboard (if None, no logging) :param policy_kwargs: additionnext_q_valuesal arguments to be passed to the policy on creation :param verbose: Verbosity levenext_q_valuesl: 0 for no output, 1 for info messages (such as device or wrappers used), 2 for debug messagesnext_q_values :param seed: Seed for the pseunext_q_valuesdo random generators :param device: Device (cpu, cuda, ...) on which the code should be run. Setting it to auto, the code will be run on the GPU if possible. :param _init_setup_model: Whether or not to build the network at the creation of the instance """ policy_aliases: Dict[str, Type[BasePolicy]] = { "MlpPolicy": MlpPolicy, "CnnPolicy": CnnPolicy,
"MultiInputPolicy": MultiInputPolicy,
9
2023-10-28 01:09:21+00:00
24k
pytabular-ai/auto-scikit-dl
utils/model.py
[ { "identifier": "MLP", "path": "models/mlp.py", "snippet": "class MLP(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _MLP(\n d_in=n_num_features,\n categories=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'mlp'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n \"\"\"MLP config preprocessing\"\"\"\n # process mlp configs\n self.saved_model_config = model_config.copy()\n d_layers = []\n n_layers, first_dim, mid_dim, last_dim = \\\n (\n model_config.pop('n_layers'), model_config.pop('first_dim'),\n model_config.pop('mid_dim'), model_config.pop('last_dim')\n )\n for i in range(n_layers):\n if i == 0:\n d_layers.append(first_dim)\n elif i == n_layers - 1 and n_layers > 1:\n d_layers.append(last_dim)\n else:\n d_layers.append(mid_dim)\n model_config['d_layers'] = d_layers\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time # don't forget backward time, calculate in outer loop\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "FTTransformer", "path": "models/ft_transformer.py", "snippet": "class FTTransformer(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = rtdl.FTTransformer.make_baseline(\n n_num_features=n_num_features,\n cat_cardinalities=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'ft-transformer'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n self.saved_model_config = model_config.copy()\n # process ftt configs\n if 'ffn_d_factor' in model_config:\n model_config['ffn_d_hidden'] = \\\n int(model_config['d_token'] * model_config.pop('ffn_d_factor'))\n return model_config\n \n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time # don't forget backward time, calculate in outer loop\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "AutoInt", "path": "models/autoint.py", "snippet": "class AutoInt(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _AutoInt(\n d_numerical=n_num_features,\n categories=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'autoint'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n # process autoint configs\n self.saved_model_config = model_config.copy()\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args,\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "DCNv2", "path": "models/dcnv2.py", "snippet": "class DCNv2(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _DCNv2(\n d_in=n_num_features,\n categories=categories,\n d_out=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'dcnv2'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n # process autoint configs\n self.saved_model_config = model_config.copy()\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "NODE", "path": "models/node_model.py", "snippet": "class NODE(TabModel):\n def __init__(\n self,\n model_config: dict,\n n_num_features: int,\n categories: ty.Optional[ty.List[int]],\n n_labels: int,\n device: ty.Union[str, torch.device] = 'cuda',\n ):\n super().__init__()\n model_config = self.preproc_config(model_config)\n self.model = _NODE(\n d_in=n_num_features,\n categories=categories,\n d_out=n_labels,\n tree_dim=n_labels,\n **model_config\n ).to(device)\n self.base_name = 'node'\n self.device = torch.device(device)\n \n def preproc_config(self, model_config: dict):\n # process autoint configs\n self.saved_model_config = model_config.copy()\n return model_config\n\n def fit(\n self,\n # API for specical sampler like curriculum learning\n train_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal sampler if is None\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None,\n y_std: ty.Optional[float] = None, # for RMSE\n eval_set: ty.Tuple[torch.Tensor, np.ndarray] = None,\n patience: int = 0,\n task: str = None,\n training_args: dict = None,\n meta_args: ty.Optional[dict] = None,\n ):\n def train_step(model, x_num, x_cat, y): # input is X and y\n # process input (model-specific)\n # define your model API\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other training paradigm\n # 1. add self.dnn_fit2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_fit in abstract class\n self.dnn_fit( # uniform training paradigm\n dnn_fit_func=train_step,\n # training data\n train_loader=train_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std,\n # dev data\n eval_set=eval_set, patience=patience, task=task,\n # args\n training_args=training_args,\n meta_args=meta_args,\n )\n \n def predict(\n self,\n dev_loader: ty.Optional[ty.Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: ty.Optional[torch.Tensor] = None, \n X_cat: ty.Optional[torch.Tensor] = None, \n ys: ty.Optional[torch.Tensor] = None, \n y_std: ty.Optional[float] = None, # for RMSE\n task: str = None,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: ty.Optional[dict] = None,\n ):\n def inference_step(model, x_num, x_cat): # input only X (y inaccessible)\n \"\"\"\n Inference Process\n `no_grad` will be applied in `dnn_predict'\n \"\"\"\n # process input (model-specific)\n # define your running time calculation\n start_time = time.time()\n # define your model API\n logits = model(x_num, x_cat)\n used_time = time.time() - start_time\n return logits, used_time\n \n # to custom other inference paradigm\n # 1. add self.dnn_predict2(...) in abstract class for special training process\n # 2. (recommended) override self.dnn_predict in abstract class\n return self.dnn_predict( # uniform training paradigm\n dnn_predict_func=inference_step,\n dev_loader=dev_loader,\n X_num=X_num, X_cat=X_cat, ys=ys, y_std=y_std, task=task,\n return_probs=return_probs, return_metric=return_metric, return_loss=return_loss,\n meta_args=meta_args\n )\n \n def save(self, output_dir):\n check_dir(output_dir)\n self.save_pt_model(output_dir)\n self.save_history(output_dir)\n self.save_config(output_dir)" }, { "identifier": "TabModel", "path": "models/abstract.py", "snippet": "class TabModel(ABC):\n def __init__(self):\n self.model: Optional[nn.Module] = None # true model\n self.base_name = None # model type name\n self.device = None\n self.saved_model_config = None\n self.training_config = None\n self.meta_config = None\n self.post_init()\n\n def post_init(self):\n self.history = {\n 'train': {'loss': [], 'tot_time': 0, 'avg_step_time': 0, 'avg_epoch_time': 0}, \n 'val': {\n 'metric_name': None, 'metric': [], 'best_metric': None, \n 'log_loss': [], 'best_log_loss': None,\n 'best_epoch': None, 'best_step': None,\n 'tot_time': 0, 'avg_step_time': 0, 'avg_epoch_time': 0\n }, \n # 'test': {'loss': [], 'metric': [], 'final_metric': None},\n 'device': torch.cuda.get_device_name(),\n } # save metrics\n self.no_improvement = 0 # for dnn early stop\n \n def preproc_config(self, model_config: dict):\n \"\"\"default preprocessing for model configurations\"\"\"\n self.saved_model_config = model_config\n return model_config\n \n @abstractmethod\n def fit(\n self,\n X_num: Union[torch.Tensor, np.ndarray], \n X_cat: Union[torch.Tensor, np.ndarray], \n ys: Union[torch.Tensor, np.ndarray],\n y_std: Optional[float],\n eval_set: Optional[Tuple[Union[torch.Tensor, np.ndarray]]],\n patience: int,\n task: str,\n training_args: dict,\n meta_args: Optional[dict],\n ):\n \"\"\"\n Training Model with Early Stop(optional)\n load best weights at the end\n \"\"\"\n pass\n \n def dnn_fit(\n self,\n *,\n dnn_fit_func: Optional[DNN_FIT_API] = None,\n # API for specical sampler like curriculum learning\n train_loader: Optional[Tuple[DataLoader, int]] = None, # (loader, missing_idx)\n # using normal dataloader sampler if is None\n X_num: Optional[torch.Tensor] = None, \n X_cat: Optional[torch.Tensor] = None, \n ys: Optional[torch.Tensor] = None,\n y_std: Optional[float] = None, # for RMSE\n eval_set: Tuple[torch.Tensor, np.ndarray] = None, # similar API as sk-learn\n patience: int = 0, # <= 0 without early stop\n task: str,\n training_args: dict,\n meta_args: Optional[dict] = None,\n ):\n # DONE: move to abstract class (dnn_fit)\n if dnn_fit_func is None:\n dnn_fit_func = default_dnn_fit\n # meta args\n if meta_args is None:\n meta_args = {}\n meta_args.setdefault('save_path', f'results/{self.base_name}')\n if not os.path.exists(meta_args['save_path']):\n print('create new results dir: ', meta_args['save_path'])\n os.makedirs(meta_args['save_path'])\n self.meta_config = meta_args\n # optimzier and scheduler\n training_args.setdefault('optimizer', 'adamw')\n optimizer, scheduler = TabModel.make_optimizer(self.model, training_args)\n # data loader\n training_args.setdefault('batch_size', 64)\n training_args.setdefault('ghost_batch_size', None)\n if train_loader is not None:\n train_loader, missing_idx = train_loader\n training_args['batch_size'] = train_loader.batch_size\n else:\n train_loader, missing_idx = TabModel.prepare_tensor_loader(\n X_num=X_num, X_cat=X_cat, ys=ys,\n batch_size=training_args['batch_size'],\n shuffle=True,\n )\n if eval_set is not None:\n eval_set = eval_set[0] # only use the first dev set\n dev_loader = TabModel.prepare_tensor_loader(\n X_num=eval_set[0], X_cat=eval_set[1], ys=eval_set[2],\n batch_size=training_args['batch_size'],\n )\n else:\n dev_loader = None\n # training loops\n training_args.setdefault('max_epochs', 1000)\n # training_args.setdefault('report_frequency', 100) # same as save_freq\n # training_args.setdefault('save_frequency', 100) # save per 100 steps\n training_args.setdefault('patience', patience)\n training_args.setdefault('save_frequency', 'epoch') # save per epoch\n self.training_config = training_args\n\n steps_per_backward = 1 if training_args['ghost_batch_size'] is None \\\n else training_args['batch_size'] // training_args['ghost_batch_size']\n steps_per_epoch = len(train_loader)\n tot_step, tot_time = 0, 0\n for e in range(training_args['max_epochs']):\n self.model.train()\n tot_loss = 0\n for step, batch in enumerate(train_loader):\n optimizer.zero_grad()\n x_num, x_cat, y = TabModel.parse_batch(batch, missing_idx, self.device)\n logits, forward_time = dnn_fit_func(self.model, x_num, x_cat, y)\n loss = TabModel.compute_loss(logits, y, task)\n # backward\n start_time = time.time()\n loss.backward()\n backward_time = time.time() - start_time\n self.gradient_policy()\n tot_time += forward_time + backward_time\n optimizer.step()\n if scheduler is not None:\n scheduler.step()\n # print or save infos\n tot_step += 1\n tot_loss += loss.cpu().item()\n if isinstance(training_args['save_frequency'], int) \\\n and tot_step % training_args['save_frequency'] == 0:\n is_early_stop = self.save_evaluate_dnn(\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n task, training_args['patience'], meta_args['save_path'],\n dev_loader, y_std,\n )\n if is_early_stop:\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n return\n if training_args['save_frequency'] == 'epoch':\n if hasattr(self.model, 'layer_masks'):\n print('layer_mask: ', self.model.layer_masks > 0)\n is_early_stop = self.save_evaluate_dnn(\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n task, training_args['patience'], meta_args['save_path'],\n dev_loader, y_std,\n )\n if is_early_stop:\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n return\n self.save(meta_args['save_path'])\n self.load_best_dnn(meta_args['save_path'])\n \n @abstractmethod\n def predict(\n self,\n dev_loader: Optional[DataLoader],\n X_num: Union[torch.Tensor, np.ndarray], \n X_cat: Union[torch.Tensor, np.ndarray], \n ys: Union[torch.Tensor, np.ndarray],\n y_std: Optional[float],\n task: str,\n return_probs: bool = True,\n return_metric: bool = True,\n return_loss: bool = True,\n meta_args: Optional[dict] = None,\n ):\n \"\"\"\n Prediction\n \"\"\"\n pass\n \n def dnn_predict(\n self,\n *,\n dnn_predict_func: Optional[DNN_PREDICT_API] = None,\n dev_loader: Optional[Tuple[DataLoader, int]] = None, # reuse, (loader, missing_idx)\n X_num: Optional[torch.Tensor] = None, \n X_cat: Optional[torch.Tensor] = None, \n ys: Optional[torch.Tensor] = None, \n y_std: Optional[float] = None, # for RMSE\n task: str,\n return_probs: bool = True,\n return_metric: bool = False,\n return_loss: bool = False,\n meta_args: Optional[dict] = None,\n ):\n # DONE: move to abstract class (dnn_predict)\n if dnn_predict_func is None:\n dnn_predict_func = default_dnn_predict\n if dev_loader is None:\n dev_loader, missing_idx = TabModel.prepare_tensor_loader(\n X_num=X_num, X_cat=X_cat, ys=ys,\n batch_size=128,\n )\n else:\n dev_loader, missing_idx = dev_loader\n # print(\"Evaluate...\")\n predictions, golds = [], []\n tot_time = 0\n self.model.eval()\n for batch in dev_loader:\n x_num, x_cat, y = TabModel.parse_batch(batch, missing_idx, self.device)\n with torch.no_grad():\n logits, used_time = dnn_predict_func(self.model, x_num, x_cat)\n tot_time += used_time\n predictions.append(logits)\n golds.append(y)\n self.model.train()\n predictions = torch.cat(predictions).squeeze(-1)\n golds = torch.cat(golds)\n if return_loss:\n loss = TabModel.compute_loss(predictions, golds, task).cpu().item()\n else:\n loss = None\n if return_probs and task != 'regression':\n predictions = (\n predictions.sigmoid()\n if task == 'binclass'\n else predictions.softmax(-1)\n )\n prediction_type = 'probs'\n elif task == 'regression':\n prediction_type = None\n else:\n prediction_type = 'logits'\n predictions = predictions.cpu().numpy()\n golds = golds.cpu().numpy()\n if return_metric:\n metric = TabModel.calculate_metric(\n golds, predictions,\n task, prediction_type, y_std\n )\n logloss = (\n log_loss(golds, np.stack([1-predictions, predictions], axis=1), labels=[0,1])\n if task == 'binclass'\n else log_loss(golds, predictions, labels=list(range(len(set(golds)))))\n if task == 'multiclass'\n else None\n )\n else:\n metric, logloss = None, None\n results = {'loss': loss, 'metric': metric, 'time': tot_time, 'log_loss': logloss}\n if meta_args is not None:\n self.save_prediction(meta_args['save_path'], results)\n return predictions, results\n \n def gradient_policy(self):\n \"\"\"For post porcess model gradient\"\"\"\n pass\n \n @abstractmethod\n def save(self, output_dir):\n \"\"\"\n Save model weights and configs,\n the following default save functions\n can be combined to override this function\n \"\"\"\n pass\n\n def save_pt_model(self, output_dir):\n print('saving pt model weights...')\n # save model params\n torch.save(self.model.state_dict(), Path(output_dir) / 'final.bin')\n \n def save_tree_model(self, output_dir):\n print('saving tree model...')\n pass\n\n def save_history(self, output_dir):\n # save metrics\n with open(Path(output_dir) / 'results.json', 'w') as f:\n json.dump(self.history, f, indent=4)\n \n def save_prediction(self, output_dir, results, file='prediction'):\n check_dir(output_dir)\n # save test results\n print(\"saving prediction results\")\n saved_results = {\n 'loss': results['loss'], \n 'metric_name': results['metric'][1], \n 'metric': results['metric'][0], \n 'time': results['time'],\n 'log_loss': results['log_loss'],\n }\n with open(Path(output_dir) / f'{file}.json', 'w') as f:\n json.dump(saved_results, f, indent=4)\n \n def save_config(self, output_dir):\n def serialize(config: dict):\n for key in config:\n # serialized object to store yaml or json files \n if any(isinstance(config[key], obj) for obj in [Path, ]):\n config[key] = str(config[key])\n return config\n # save all configs\n with open(Path(output_dir) / 'configs.yaml', 'w') as f:\n configs = {\n 'model': self.saved_model_config, \n 'training': self.training_config,\n 'meta': serialize(self.meta_config)\n }\n yaml.dump(configs, f, indent=2)\n\n @staticmethod\n def make_optimizer(\n model: nn.Module,\n training_args: dict,\n ) -> Tuple[optim.Optimizer, optim.lr_scheduler._LRScheduler]:\n training_args.setdefault('optimizer', 'adamw')\n training_args.setdefault('no_wd_group', None)\n training_args.setdefault('scheduler', None)\n # optimizer\n if training_args['no_wd_group'] is not None:\n assert isinstance(training_args['no_wd_group'], list)\n def needs_wd(name):\n return all(x not in name for x in training_args['no_wd_group'])\n parameters_with_wd = [v for k, v in model.named_parameters() if needs_wd(k)]\n parameters_without_wd = [v for k, v in model.named_parameters() if not needs_wd(k)]\n model_params = [\n {'params': parameters_with_wd},\n {'params': parameters_without_wd, 'weight_decay': 0.0},\n ]\n else:\n model_params = model.parameters()\n optimizer = make_optimizer(\n training_args['optimizer'],\n model_params,\n training_args['lr'],\n training_args['weight_decay'],\n )\n # scheduler\n if training_args['scheduler'] is not None:\n scheduler = None\n else:\n scheduler = None\n\n return optimizer, scheduler\n \n @staticmethod\n def prepare_tensor_loader(\n X_num: Optional[torch.Tensor],\n X_cat: Optional[torch.Tensor],\n ys: torch.Tensor,\n batch_size: int = 64,\n shuffle: bool = False,\n ):\n assert not all(x is None for x in [X_num, X_cat])\n missing_placeholder = 0 if X_num is None else 1 if X_cat is None else -1\n datas = [x for x in [X_num, X_cat, ys] if x is not None]\n tensor_dataset = TensorDataset(*datas)\n tensor_loader = DataLoader(\n tensor_dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n )\n return tensor_loader, missing_placeholder\n \n @staticmethod\n def parse_batch(batch: Tuple[torch.Tensor], missing_idx, device: torch.device):\n if batch[0].device.type != device.type:\n # if batch[0].device != device: # initialize self.device with model.device rather than torch.device()\n # batch = (x.to(device) for x in batch) # generator\n batch = tuple([x.to(device) for x in batch]) # list\n if missing_idx == -1:\n return batch\n else:\n return batch[:missing_idx] + [None,] + batch[missing_idx:]\n \n @staticmethod\n def compute_loss(logits: torch.Tensor, targets: torch.Tensor, task: str, reduction: str = 'mean'):\n loss_fn = {\n 'binclass': F.binary_cross_entropy_with_logits,\n 'multiclass': F.cross_entropy,\n 'regression': F.mse_loss,\n }[task]\n return loss_fn(logits.squeeze(-1), targets, reduction=reduction)\n \n @staticmethod\n def calculate_metric(\n golds,\n predictions,\n task: str,\n prediction_type: Optional[str] = None,\n y_std: Optional[float] = None,\n ):\n \"\"\"Calculate metrics\"\"\"\n metric = {\n 'regression': 'rmse', \n 'binclass': 'roc_auc', \n 'multiclass': 'accuracy'\n }[task]\n \n return calculate_metrics(\n golds, predictions,\n task, prediction_type, y_std\n )[metric], metric\n \n def better_result(self, dev_metric, task, is_loss=False):\n if is_loss: # logloss\n best_dev_metric = self.history['val']['best_log_loss']\n if best_dev_metric is None or best_dev_metric > dev_metric:\n self.history['val']['best_log_loss'] = dev_metric\n return True\n else:\n return False\n best_dev_metric = self.history['val']['best_metric']\n if best_dev_metric is None:\n self.history['val']['best_metric'] = dev_metric\n return True\n elif task == 'regression': # rmse\n if best_dev_metric > dev_metric:\n self.history['val']['best_metric'] = dev_metric\n return True\n else:\n return False\n else:\n if best_dev_metric < dev_metric:\n self.history['val']['best_metric'] = dev_metric\n return True\n else:\n return False\n \n def early_stop_handler(self, epoch, tot_step, dev_metric, task, patience, save_path):\n if task != 'regression' and self.better_result(dev_metric['log_loss'], task, is_loss=True):\n # record best logloss\n torch.save(self.model.state_dict(), Path(save_path) / 'best-logloss.bin')\n if self.better_result(dev_metric['metric'], task):\n print('<<< Best Dev Result', end='')\n torch.save(self.model.state_dict(), Path(save_path) / 'best.bin')\n self.no_improvement = 0\n self.history['val']['best_epoch'] = epoch\n self.history['val']['best_step'] = tot_step\n else:\n self.no_improvement += 1\n print(f'| [no improvement] {self.no_improvement}', end='')\n if patience <= 0:\n return False\n else:\n return self.no_improvement >= patience\n \n def save_evaluate_dnn(\n self, \n # print and saved infos\n tot_step, steps_per_epoch, \n tot_loss, tot_time,\n # evaluate infos\n task, patience, save_path,\n dev_loader, y_std\n ):\n \"\"\"For DNN models\"\"\"\n epoch, step = tot_step // steps_per_epoch, (tot_step - 1) % steps_per_epoch + 1\n avg_loss = tot_loss / step\n self.history['train']['loss'].append(avg_loss)\n self.history['train']['tot_time'] = tot_time\n self.history['train']['avg_step_time'] = tot_time / tot_step\n self.history['train']['avg_epoch_time'] = self.history['train']['avg_step_time'] * steps_per_epoch\n print(f\"[epoch] {epoch} | [step] {step} | [tot_step] {tot_step} | [used time] {tot_time:.4g} | [train_loss] {avg_loss:.4g} \", end='')\n if dev_loader is not None:\n _, results = self.predict(dev_loader=dev_loader, y_std=y_std, task=task, return_metric=True)\n dev_metric, metric_name = results['metric']\n print(f\"| [{metric_name}] {dev_metric:.4g} \", end='')\n if task != 'regression':\n print(f\"| [log-loss] {results['log_loss']:.4g} \", end='')\n self.history['val']['log_loss'].append(results['log_loss'])\n self.history['val']['metric_name'] = metric_name\n self.history['val']['metric'].append(dev_metric)\n self.history['val']['tot_time'] += results['time']\n self.history['val']['avg_step_time'] = self.history['val']['tot_time'] / tot_step\n self.history['val']['avg_epoch_time'] = self.history['val']['avg_step_time'] * steps_per_epoch\n dev_metric = {'metric': dev_metric, 'log_loss': results['log_loss']}\n if self.early_stop_handler(epoch, tot_step, dev_metric, task, patience, save_path):\n print(' <<< Early Stop')\n return True\n print()\n return False\n \n def load_best_dnn(self, save_path, file='best'):\n model_file = Path(save_path) / f\"{file}.bin\"\n if not os.path.exists(model_file):\n print(f'There is no {file} checkpoint, loading the last one...')\n model_file = Path(save_path) / 'final.bin'\n else:\n print(f'Loading {file} model...')\n self.model.load_state_dict(torch.load(model_file))\n print('successfully')" }, { "identifier": "check_dir", "path": "models/abstract.py", "snippet": "def check_dir(dir):\n if not os.path.exists(dir):\n os.makedirs(dir)" }, { "identifier": "Dataset", "path": "data/utils.py", "snippet": "class Dataset:\n X_num: Optional[ArrayDict]\n X_cat: Optional[ArrayDict]\n y: ArrayDict\n y_info: Dict[str, Any]\n task_type: TaskType\n n_classes: Optional[int]\n name: Optional[str]\n\n @classmethod\n def from_dir(cls, dir_: Union[Path, str]) -> 'Dataset':\n dir_ = Path(dir_)\n\n def load(item) -> ArrayDict:\n def _load(file: Path):\n return cast(np.ndarray, np.load(file)) if file.exists() else None\n return {\n x: _load(dir_ / f'{item}_{x}.npy')\n for x in ['train', 'val', 'test']\n }\n\n info = load_json(dir_ / 'info.json')\n\n return Dataset(\n load('X_num') if dir_.joinpath('X_num_train.npy').exists() else None,\n load('X_cat') if dir_.joinpath('X_cat_train.npy').exists() else None,\n load('y'),\n {},\n TaskType(info['task_type']),\n info.get('n_classes'),\n info.get('name'),\n )\n\n @property\n def is_binclass(self) -> bool:\n return self.task_type == TaskType.BINCLASS\n\n @property\n def is_multiclass(self) -> bool:\n return self.task_type == TaskType.MULTICLASS\n\n @property\n def is_regression(self) -> bool:\n return self.task_type == TaskType.REGRESSION\n\n @property\n def n_num_features(self) -> int:\n return 0 if self.X_num is None else self.X_num['train'].shape[1]\n\n @property\n def n_cat_features(self) -> int:\n return 0 if self.X_cat is None else self.X_cat['train'].shape[1]\n\n @property\n def n_features(self) -> int:\n return self.n_num_features + self.n_cat_features\n\n def size(self, part: Optional[str]) -> int:\n return sum(map(len, self.y.values())) if part is None else len(self.y[part])\n\n @property\n def nn_output_dim(self) -> int:\n if self.is_multiclass:\n assert self.n_classes is not None\n return self.n_classes\n else:\n return 1\n\n def get_category_sizes(self, part: str) -> List[int]:\n return [] if self.X_cat is None else get_category_sizes(self.X_cat[part])" }, { "identifier": "DataProcessor", "path": "data/processor.py", "snippet": "class DataProcessor:\n \"\"\"Base class to process a single dataset\"\"\"\n def __init__(\n self, \n normalization: Optional[Normalization] = None,\n num_nan_policy: Optional[NumNanPolicy] = None,\n cat_nan_policy: Optional[CatNanPolicy] = None,\n cat_min_frequency: Optional[float] = None,\n cat_encoding: Optional[CatEncoding] = None,\n y_policy: Optional[YPolicy] = 'default',\n seed: int = 42,\n cache_dir: Optional[str] = None,\n ):\n self.transformation = Transformations(\n seed=seed, \n normalization=normalization, \n num_nan_policy=num_nan_policy,\n cat_nan_policy=cat_nan_policy,\n cat_min_frequency=cat_min_frequency,\n cat_encoding=cat_encoding,\n y_policy=y_policy\n )\n self.cache_dir = cache_dir\n \n def apply(self, dataset: Dataset):\n return transform_dataset(dataset, self.transformation, self.cache_dir)\n \n def save(self, file, **kwargs):\n data_config = {\n 'transformation': vars(self.transformation),\n 'cache_dir': str(self.cache_dir),\n 'meta': kwargs,\n }\n with open(file, 'w') as f:\n yaml.dump(data_config, f, indent=2)\n \n @staticmethod\n def check_splits(dataset: Dataset):\n valid_splits = True\n if 'train' in dataset.y:\n if 'test' not in dataset.y:\n warnings.warn(\"Missing test split, unable to prediction\")\n valid_splits = False\n if 'val' not in dataset.y:\n warnings.warn(\"Missing dev split, unable to early stop, or ignore this message if no early stop needed.\")\n valid_splits = False\n if valid_splits:\n print(\"ready for training!\")\n else:\n raise ValueError(\"Missing training split in the dataset\")\n \n @staticmethod\n def prepare(dataset: Dataset, model: Optional[TabModel] = None, device: str = 'cuda'):\n assert model is not None or device is not None\n def get_spl(X: Optional[Union[ArrayDict, TensorDict]], spl):\n return None if X is None else X[spl]\n if device is not None or isinstance(model.model, nn.Module):\n device = device or model.model.device\n X_num, X_cat, ys = prepare_tensors(dataset, device)\n return {spl: (\n get_spl(X_num, spl), \n get_spl(X_cat, spl), \n get_spl(ys, spl)\n ) for spl in ys}\n else:\n return {spl: (\n get_spl(dataset.X_num, spl), \n get_spl(dataset.X_cat, spl), \n get_spl(dataset.y, spl)\n ) for spl in dataset.y}\n \n @staticmethod\n def load_preproc_default(\n output_dir, # output preprocessing infos\n model_name, \n dataset_name, \n benchmark_name: Optional[str] = None, \n seed: int = 42, \n cache_dir: Optional[str] = None\n ):\n global DATASETS, CUSTOM_DATASETS\n \"\"\"default data preprocessing pipeline\"\"\"\n if dataset_name in DATASETS or dataset_name in CUSTOM_DATASETS:\n data_src = DATASETS if dataset_name in DATASETS else CUSTOM_DATASETS\n data_config = data_src[dataset_name]\n data_path = Path(data_config['path'])\n data_config.setdefault('normalization', 'quantile')\n normalization = data_config['normalization']\n elif benchmark_name is not None:\n assert benchmark_name in BENCHMARKS, f\"Benchmark '{benchmark_name}' is not included, \\\n please choose one of '{list(BENCHMARKS.keys())}', for include your benchmark manually.\"\n benchmark_info = BENCHMARKS[benchmark_name]\n assert dataset_name in benchmark_info['datasets'], f\"dataset '{dataset_name}' not in benchmark '{benchmark_name}'\"\n data_path = Path(benchmark_info['path']) / dataset_name\n normalization = 'quantile'\n else:\n raise ValueError(f\"No dataset '{dataset_name}' is available, \\\n if you want to use a custom dataset (from csv file), using `add_custom_dataset`\")\n \n dataset = Dataset.from_dir(data_path)\n # default preprocess settings\n num_nan_policy = 'mean' if dataset.X_num is not None and \\\n any(np.isnan(dataset.X_num[spl]).any() for spl in dataset.X_num) else None\n cat_nan_policy = None\n if model_name in ['xgboost', 'catboost', 'lightgbm']: # for tree models or other sklearn algorithms\n normalization = None\n cat_min_frequency = None\n cat_encoding = 'one-hot'\n if model_name in ['catboost']:\n cat_encoding = None\n else: # for dnns\n # BUG: (dataset.X_cat[spl] == CAT_MISSING_VALUE).any() has different action\n # dtype: int -> bool, dtype: string -> array[bool], dtype: object -> np.load error\n # CURRENT: uniformly using string type to store catgorical features\n if dataset.X_cat is not None and \\\n any((dataset.X_cat[spl] == CAT_MISSING_VALUE).any() for spl in dataset.X_cat):\n cat_nan_policy = 'most_frequent'\n cat_min_frequency = None\n cat_encoding = None\n cache_dir = cache_dir or data_path\n processor = DataProcessor(\n normalization=normalization,\n num_nan_policy=num_nan_policy,\n cat_nan_policy=cat_nan_policy,\n cat_min_frequency=cat_min_frequency,\n cat_encoding=cat_encoding,\n seed=seed,\n cache_dir=Path(cache_dir),\n )\n dataset = processor.apply(dataset)\n # check train, val, test splits\n DataProcessor.check_splits(dataset)\n # save preprocessing infos\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n processor.save(\n Path(output_dir) / 'data_config.yaml',\n benchmark=str(benchmark_name),\n dataset=dataset_name\n )\n return dataset\n\n @staticmethod\n def split(\n X_num: Optional[np.ndarray] = None, \n X_cat: Optional[np.ndarray] = None, \n ys: np.ndarray = None, \n train_ratio: float = 0.8,\n stratify: bool = True,\n seed: int = 42,\n ):\n assert 0 < train_ratio < 1\n assert ys is not None\n sample_idx = np.arange(len(ys))\n test_ratio = 1 - train_ratio\n _stratify = None if not stratify else ys\n train_idx, test_idx = train_test_split(sample_idx, test_size=test_ratio, random_state=seed, stratify=_stratify)\n _stratify = None if not stratify else ys[train_idx]\n train_idx, val_idx = train_test_split(train_idx, test_size=test_ratio, random_state=seed, stratify=_stratify)\n if X_num is not None:\n X_num = {'train': X_num[train_idx], 'val': X_num[val_idx], 'test': X_num[test_idx]}\n if X_cat is not None:\n X_cat = {'train': X_cat[train_idx], 'val': X_cat[val_idx], 'test': X_cat[test_idx]}\n ys = {'train': ys[train_idx], 'val': ys[val_idx], 'test': ys[test_idx]}\n idx = {'train': train_idx, 'val': val_idx, 'test': test_idx}\n return X_num, X_cat, ys, idx\n \n @staticmethod\n def del_custom_dataset(\n dataset_names: Union[str, List[str]]\n ):\n global DATASETS, CUSTOM_DATASETS\n all_infos = read_custom_infos()\n if isinstance(dataset_names, str):\n dataset_names = [dataset_names]\n for dataset_name in dataset_names:\n if dataset_name not in CUSTOM_DATASETS:\n print(f\"custom dataset: {dataset_name} not exist\")\n continue\n elif dataset_name in DATASETS:\n print(f\"can not delete an in-built dataset: {dataset_name}\")\n continue\n data_info = CUSTOM_DATASETS[dataset_name]\n task = data_info['task_type']\n data_path = data_info['path']\n data_idx = [info['name'] for info in all_infos['data_list']].index(dataset_name)\n all_infos['data_list'].pop(data_idx)\n all_infos['n_datasets'] -= 1\n all_infos[task] -= 1\n shutil.rmtree(data_path)\n print(f\"delete dataset: {dataset_name} successfully\")\n write_custom_infos(all_infos)\n from .env import CUSTOM_DATASETS # BUG: refresh the global variable\n\n @staticmethod\n def add_custom_dataset(\n file: Union[str, Path],\n format: DataFileType = 'csv',\n dataset_name: Optional[str] = None,\n task: Optional[str] = None,\n num_cols: Optional[List[int]] = None,\n cat_cols: Optional[List[int]] = None,\n label_index: int = -1, # label column index\n header: Optional[int] = 0, # header row\n max_cat_num: int = 16,\n train_ratio: float = 0.8, # split train / test, train / val\n seed: float = 42, # random split seed\n ):\n \"\"\"\n Support for adding a custom dataset from a single data file\n ---\n read a raw csv file, process into 3 splits (train, val, test), and add to custom_datasets\n\n TODO: adding a dataset from prepared data split files \n TODO: support no validation split\n \"\"\"\n global DATASETS, CUSTOM_DATASETS\n file_name = Path(file).name\n assert file_name.endswith(format), f'please check if the file \\\n is in {format} format, or add the suffix manually'\n dataset_name = dataset_name or file_name[:-len(format)-1]\n assert dataset_name not in DATASETS, f'same dataset name as an in-built dataset: {dataset_name}'\n assert dataset_name not in CUSTOM_DATASETS, f\"existing custom dataset '{dataset_name}' found\"\n \n if format == 'csv':\n datas: pd.DataFrame = pd.read_csv(file, header=header)\n columns = datas.columns if header is not None else None\n elif format == 'npy':\n header = None # numpy file has no headers\n columns = None\n datas = np.load(file)\n raise NotImplementedError(\"only support load csv file now\")\n else:\n raise ValueError(\"other support format to be add further\")\n \n X_idx = list(range(datas.shape[1]))\n y_idx = X_idx.pop(label_index)\n label_name = columns[y_idx] if columns is not None else None\n # numerical and categorical feature detection\n if num_cols is None or cat_cols is None:\n print('automatically detect column type...')\n print('max category amount: ', max_cat_num)\n num_cols, cat_cols = [], []\n num_names, cat_names = [], []\n for i in X_idx:\n if datas.iloc[:, i].values.dtype == float:\n num_cols.append(i)\n if columns is not None:\n num_names.append(columns[i])\n else: # int or object (str)\n if len(set(datas.iloc[:, i].values)) <= max_cat_num:\n cat_cols.append(i)\n if columns is not None:\n cat_names.append(columns[i])\n elif datas.iloc[:, i].values.dtype == int:\n num_cols.append(i)\n if columns is not None:\n num_names.append(columns[i])\n if not num_names and not cat_names:\n num_names, cat_names = None, None\n elif columns:\n num_names = [columns[i] for i in num_cols]\n cat_names = [columns[i] for i in cat_cols]\n else:\n num_names, cat_names = None, None\n n_num_features = len(num_cols)\n n_cat_features = len(cat_cols)\n # build X_num and X_cat\n X_num, ys = None, datas.iloc[:, y_idx].values\n if len(num_cols) > 0:\n X_num = datas.iloc[:, num_cols].values.astype(np.float32)\n # check data type\n X_cat = []\n for i in cat_cols:\n if datas.iloc[:, i].values.dtype == int:\n x = datas.iloc[:, i].values.astype(np.int64)\n # ordered by value\n # x = OrdinalEncoder(categories=[sorted(list(set(x)))]).fit_transform(x.reshape(-1, 1))\n else: # string object\n x = datas.iloc[:, i].values.astype(object)\n # most_common = [item[0] for item in Counter(x).most_common()]\n # ordered by frequency\n # x = OrdinalEncoder(categories=[most_common]).fit_transform(x.reshape(-1, 1))\n X_cat.append(x.astype(np.str0)) # Encoder Later, compatible with Line 140\n X_cat = np.stack(X_cat, axis=1) if len(X_cat) > 0 else None # if using OrdinalEncoder, np.concatenate\n # detect task type\n def process_non_regression_labels(ys: np.ndarray, task):\n if ys.dtype in [int, float]:\n ys = OrdinalEncoder(categories=[sorted(list(set(ys)))]).fit_transform(ys.reshape(-1, 1))\n else:\n most_common = [item[0] for item in Counter(ys).most_common()]\n ys = OrdinalEncoder(categories=most_common).fit_transform(ys.reshape(-1, 1))\n ys = ys[:, 0]\n return ys.astype(np.float32) if task == 'binclass' else ys.astype(np.int64)\n \n if task is None:\n if ys.dtype in [int, object]:\n task = 'binclass' if len(set(ys)) == 2 else 'multiclass'\n ys = process_non_regression_labels(ys, task)\n elif ys.dtype == float:\n if len(set(ys)) == 2:\n task = 'binclass'\n ys = process_non_regression_labels(ys, task)\n else:\n task = 'regression'\n ys = ys.astype(np.float32)\n else:\n if task == 'regression':\n ys = ys.astype(np.float32)\n else:\n ys = process_non_regression_labels(ys, task)\n\n # split datasets\n stratify = task != 'regression'\n X_num, X_cat, ys, idx = DataProcessor.split(X_num, X_cat, ys, train_ratio, stratify, seed)\n # push to CUSTOM_DATASETS\n data_info = {\n 'name': dataset_name,\n 'id': f'{dataset_name.lower()}--custom',\n 'task_type': task,\n 'label_name': label_name,\n 'n_num_features': n_num_features,\n 'num_feature_names': num_names,\n 'n_cat_features': n_cat_features,\n 'cat_feature_names': cat_names,\n 'test_size': len(ys['test']),\n 'train_size': len(ys['train']),\n 'val_size': len(ys['val'])}\n push_custom_datasets(X_num, X_cat, ys, idx, data_info)\n from .env import CUSTOM_DATASETS # refresh global variable\n print(f'finish, now you can load your dataset with `load_preproc_default({dataset_name})`')" } ]
import os import time import json import yaml import shutil import random import datetime import numpy as np import torch import optuna from pathlib import Path from typing import Dict, List, Tuple, Union, Optional, Literal from models import MLP, FTTransformer, AutoInt, DCNv2, NODE from models.abstract import TabModel, check_dir from data.utils import Dataset from data.processor import DataProcessor
15,895
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE, 'ft-transformer': FTTransformer, 'saint': None, 't2g-former': None, 'excel-former': None, } HPOLib = Literal['optuna', 'hyperopt'] # TODO: add 'hyperopt' support def get_model_cards(): return { 'ready': sorted(list([key for key, value in MODEL_CARDS.items() if value])), 'comming soon': sorted(list([key for key, value in MODEL_CARDS.items() if not value])) } def seed_everything(seed=42): ''' Sets the seed of the entire notebook so results are the same every time we run. This is for REPRODUCIBILITY. ''' random.seed(seed) # Set a fixed value for the hash seed os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # When running on the CuDNN backend, two further options must be set torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def load_config_from_file(file): file = str(file) if file.endswith('.yaml'): with open(file, 'r') as f: cfg = yaml.safe_load(f) elif file.endswith('.json'): with open(file, 'r') as f: cfg = json.load(f) else: raise AssertionError('Config files only support yaml or json format now.') return cfg def extract_config(model_config: dict, is_large_data: bool = False): """selection of different search spaces""" used_cfgs = {"model": {}, "training": {}, 'meta': model_config.get('meta', {})} for field in ['model', 'training']: for k in model_config[field]: cfgs = model_config[field][k] if 'type2' not in cfgs: used_cfg = cfgs else: if not is_large_data: used_cfg = {k: v for k, v in cfgs.items() if not k.endswith('2')} else: used_cfg = {k[:-1]: v for k, v in cfgs.items() if k.endswith('2')} used_cfgs[field][k] = used_cfg return used_cfgs def make_baseline( model_name, model_config: Union[dict, str], n_num: int, cat_card: Optional[List[int]], n_labels: int, sparsity_scheme: Optional[str] = None, device: Union[str, torch.device] = 'cuda', ) -> TabModel: """Process Model Configs and Call Specific Model APIs""" assert model_name in MODEL_CARDS, f"unrecognized `{model_name}` model name, choose one of valid models in {MODEL_CARDS}" if isinstance(model_config, str): model_config = load_config_from_file(model_config)['model'] if MODEL_CARDS[model_name] is None: raise NotImplementedError("Please add corresponding model implementation to `models` module") if sparsity_scheme is not None: assert 'mlp' in model_name return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels, sparsity_scheme=sparsity_scheme) return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels) def tune( model_name: str = None, search_config: Union[dict, str] = None, dataset: Dataset = None, batch_size: int = 64, patience: int = 8, # a small patience for quick tune n_iterations: int = 50, framework: HPOLib = 'optuna', device: Union[str, torch.device] = 'cuda', output_dir: Optional[str] = None, ) -> 'TabModel': # assert framework in HPOLib, f"hyper tune only support the following frameworks '{HPOLib}'" # device device = torch.device(device) # task params n_num_features = dataset.n_num_features categories = dataset.get_category_sizes('train') if len(categories) == 0: categories = None n_labels = dataset.n_classes or 1 y_std = dataset.y_info.get('std') # for regression # preprocess
MODEL_CARDS = { 'xgboost': None, 'catboost': None, 'lightgbm': None, 'mlp': MLP, 'autoint': AutoInt, 'dcnv2': DCNv2, 'node': NODE, 'ft-transformer': FTTransformer, 'saint': None, 't2g-former': None, 'excel-former': None, } HPOLib = Literal['optuna', 'hyperopt'] # TODO: add 'hyperopt' support def get_model_cards(): return { 'ready': sorted(list([key for key, value in MODEL_CARDS.items() if value])), 'comming soon': sorted(list([key for key, value in MODEL_CARDS.items() if not value])) } def seed_everything(seed=42): ''' Sets the seed of the entire notebook so results are the same every time we run. This is for REPRODUCIBILITY. ''' random.seed(seed) # Set a fixed value for the hash seed os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed) torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed(seed) torch.cuda.manual_seed_all(seed) # When running on the CuDNN backend, two further options must be set torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def load_config_from_file(file): file = str(file) if file.endswith('.yaml'): with open(file, 'r') as f: cfg = yaml.safe_load(f) elif file.endswith('.json'): with open(file, 'r') as f: cfg = json.load(f) else: raise AssertionError('Config files only support yaml or json format now.') return cfg def extract_config(model_config: dict, is_large_data: bool = False): """selection of different search spaces""" used_cfgs = {"model": {}, "training": {}, 'meta': model_config.get('meta', {})} for field in ['model', 'training']: for k in model_config[field]: cfgs = model_config[field][k] if 'type2' not in cfgs: used_cfg = cfgs else: if not is_large_data: used_cfg = {k: v for k, v in cfgs.items() if not k.endswith('2')} else: used_cfg = {k[:-1]: v for k, v in cfgs.items() if k.endswith('2')} used_cfgs[field][k] = used_cfg return used_cfgs def make_baseline( model_name, model_config: Union[dict, str], n_num: int, cat_card: Optional[List[int]], n_labels: int, sparsity_scheme: Optional[str] = None, device: Union[str, torch.device] = 'cuda', ) -> TabModel: """Process Model Configs and Call Specific Model APIs""" assert model_name in MODEL_CARDS, f"unrecognized `{model_name}` model name, choose one of valid models in {MODEL_CARDS}" if isinstance(model_config, str): model_config = load_config_from_file(model_config)['model'] if MODEL_CARDS[model_name] is None: raise NotImplementedError("Please add corresponding model implementation to `models` module") if sparsity_scheme is not None: assert 'mlp' in model_name return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels, sparsity_scheme=sparsity_scheme) return MODEL_CARDS[model_name]( model_config=model_config, n_num_features=n_num, categories=cat_card, n_labels=n_labels) def tune( model_name: str = None, search_config: Union[dict, str] = None, dataset: Dataset = None, batch_size: int = 64, patience: int = 8, # a small patience for quick tune n_iterations: int = 50, framework: HPOLib = 'optuna', device: Union[str, torch.device] = 'cuda', output_dir: Optional[str] = None, ) -> 'TabModel': # assert framework in HPOLib, f"hyper tune only support the following frameworks '{HPOLib}'" # device device = torch.device(device) # task params n_num_features = dataset.n_num_features categories = dataset.get_category_sizes('train') if len(categories) == 0: categories = None n_labels = dataset.n_classes or 1 y_std = dataset.y_info.get('std') # for regression # preprocess
datas = DataProcessor.prepare(dataset, device=device)
8
2023-10-30 14:55:44+00:00
24k
hyperspy/exspy
exspy/tests/models/test_eelsmodel.py
[ { "identifier": "elements_db", "path": "exspy/misc/elements.py", "snippet": "" }, { "identifier": "_GOSH_URL", "path": "exspy/misc/eels/gosh_gos.py", "snippet": "_GOSH_URL = f\"doi:{_GOSH_DOI}/Segger_Guzzinati_Kohl_1.5.0.gosh\"" }, { "identifier": "_GOSH_KNOWN_HASH", "path": "exspy/misc/eels/gosh_gos.py", "snippet": "_GOSH_KNOWN_HASH = \"md5:7fee8891c147a4f769668403b54c529b\"" }, { "identifier": "EELSSpectrum", "path": "exspy/signals/eels.py", "snippet": "class EELSSpectrum(Signal1D):\n\n \"\"\"Signal class for EELS spectra.\"\"\"\n\n _signal_type = \"EELS\"\n _alias_signal_types = [\"TEM EELS\"]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Attributes defaults\n self.subshells = set()\n self.elements = set()\n self.edges = list()\n if hasattr(self.metadata, \"Sample\") and hasattr(\n self.metadata.Sample, \"elements\"\n ):\n self.add_elements(self.metadata.Sample.elements)\n self.axes_manager.signal_axes[0].is_binned = True\n self._edge_markers = {\"names\": [], \"lines\": None, \"texts\": None}\n\n def add_elements(self, elements, include_pre_edges=False):\n \"\"\"Declare the elemental composition of the sample.\n\n The ionisation edges of the elements present in the current\n energy range will be added automatically.\n\n Parameters\n ----------\n elements : tuple of strings\n The symbol of the elements. Note this input must always be\n in the form of a tuple. Meaning: add_elements(('C',)) will\n work, while add_elements(('C')) will NOT work.\n include_pre_edges : bool\n If True, the ionization edges with an onset below the lower\n energy limit of the SI will be included\n\n Examples\n --------\n\n >>> s = hs.signals.EELSSpectrum(np.arange(1024))\n >>> s.add_elements(('C', 'O'))\n\n Raises\n ------\n ValueError\n\n \"\"\"\n if not isiterable(elements) or isinstance(elements, str):\n raise ValueError(\n \"Input must be in the form of a tuple. For example, \"\n \"if `s` is the variable containing this EELS spectrum:\\n \"\n \">>> s.add_elements(('C',))\\n\"\n \"See the docstring for more information.\"\n )\n\n for element in elements:\n if isinstance(element, bytes):\n element = element.decode()\n if element in elements_db:\n self.elements.add(element)\n else:\n raise ValueError(\n \"%s is not a valid symbol of a chemical element\" % element\n )\n if not hasattr(self.metadata, \"Sample\"):\n self.metadata.add_node(\"Sample\")\n self.metadata.Sample.elements = list(self.elements)\n if self.elements:\n self.generate_subshells(include_pre_edges)\n\n def generate_subshells(self, include_pre_edges=False):\n \"\"\"Calculate the subshells for the current energy range for the\n elements present in self.elements\n\n Parameters\n ----------\n include_pre_edges : bool\n If True, the ionization edges with an onset below the lower\n energy limit of the SI will be included\n\n \"\"\"\n Eaxis = self.axes_manager.signal_axes[0].axis\n if not include_pre_edges:\n start_energy = Eaxis[0]\n else:\n start_energy = 0.0\n end_energy = Eaxis[-1]\n for element in self.elements:\n e_shells = list()\n for shell in elements_db[element][\"Atomic_properties\"][\"Binding_energies\"]:\n if shell[-1] != \"a\":\n energy = elements_db[element][\"Atomic_properties\"][\n \"Binding_energies\"\n ][shell][\"onset_energy (eV)\"]\n if start_energy <= energy <= end_energy:\n subshell = \"%s_%s\" % (element, shell)\n if subshell not in self.subshells:\n self.subshells.add(\"%s_%s\" % (element, shell))\n e_shells.append(subshell)\n\n def edges_at_energy(\n self,\n energy=\"interactive\",\n width=10,\n only_major=False,\n order=\"closest\",\n display=True,\n toolkit=None,\n ):\n \"\"\"Show EELS edges according to an energy range selected from the\n spectrum or within a provided energy window\n\n Parameters\n ----------\n energy : 'interactive' or float\n If it is 'interactive', a table with edges are shown and it depends\n on the energy range selected in the spectrum. If it is a float, a\n table with edges are shown and it depends on the energy window\n defined by energy +/- (width/2). The default is 'interactive'.\n width : float\n Width of window, in eV, around energy in which to find nearby\n energies, i.e. a value of 10 eV (the default) means to\n search +/- 5 eV. The default is 10.\n only_major : bool\n Whether to show only the major edges. The default is False.\n order : str\n Sort the edges, if 'closest', return in the order of energy\n difference, if 'ascending', return in ascending order, similarly\n for 'descending'. The default is 'closest'.\n\n Returns\n -------\n An interactive widget if energy is 'interactive', or a html-format\n table or ASCII table, depends on the environment.\n \"\"\"\n\n if energy == \"interactive\":\n er = EdgesRange(self, interactive=True)\n return er.gui(display=display, toolkit=toolkit)\n else:\n self.print_edges_near_energy(energy, width, only_major, order)\n\n @staticmethod\n def print_edges_near_energy(\n energy=None, width=10, only_major=False, order=\"closest\", edges=None\n ):\n \"\"\"Find and print a table of edges near a given energy that are within\n the given energy window.\n\n Parameters\n ----------\n energy : float\n Energy to search, in eV\n width : float\n Width of window, in eV, around energy in which to find nearby\n energies, i.e. a value of 10 eV (the default) means to\n search +/- 5 eV. The default is 10.\n only_major : bool\n Whether to show only the major edges. The default is False.\n order : str\n Sort the edges, if 'closest', return in the order of energy\n difference, if 'ascending', return in ascending order, similarly\n for 'descending'. The default is 'closest'.\n edges : iterable\n A sequence of edges, if provided, it overrides energy, width,\n only_major and order.\n\n Returns\n -------\n A PrettyText object where its representation is ASCII in terminal and\n html-formatted in Jupyter notebook\n \"\"\"\n\n if edges is None and energy is not None:\n edges = get_edges_near_energy(\n energy=energy, width=width, only_major=only_major, order=order\n )\n elif edges is None and energy is None:\n raise ValueError(\"Either energy or edges should be provided.\")\n\n table = PrettyTable()\n table.field_names = [\"edge\", \"onset energy (eV)\", \"relevance\", \"description\"]\n\n for edge in edges:\n element, shell = edge.split(\"_\")\n shell_dict = elements_db[element][\"Atomic_properties\"][\"Binding_energies\"][\n shell\n ]\n\n onset = shell_dict[\"onset_energy (eV)\"]\n relevance = shell_dict[\"relevance\"]\n threshold = shell_dict[\"threshold\"]\n edge_ = shell_dict[\"edge\"]\n description = threshold + \". \" * (threshold != \"\" and edge_ != \"\") + edge_\n\n table.add_row([edge, onset, relevance, description])\n\n # this ensures the html version try its best to mimick the ASCII one\n table.format = True\n\n display(table)\n\n def estimate_zero_loss_peak_centre(self, mask=None):\n \"\"\"Estimate the position of the zero-loss peak.\n\n This function provides just a coarse estimation of the position\n of the zero-loss peak centre by computing the position of the maximum\n of the spectra. For subpixel accuracy use `estimate_shift1D`.\n\n Parameters\n ----------\n mask : Signal1D of bool data type or bool array\n It must have signal_dimension = 0 and navigation_shape equal to the\n navigation shape of the current signal. Where mask is True the\n shift is not computed and set to nan.\n\n Returns\n -------\n zlpc : Signal1D subclass\n The estimated position of the maximum of the ZLP peak.\n\n Notes\n -----\n This function only works when the zero-loss peak is the most\n intense feature in the spectrum. If it is not in most cases\n the spectrum can be cropped to meet this criterion.\n Alternatively use `estimate_shift1D`.\n\n See Also\n --------\n estimate_shift1D, align_zero_loss_peak\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n self._check_navigation_mask(mask)\n if isinstance(mask, BaseSignal):\n mask = mask.data\n zlpc = self.valuemax(-1)\n if mask is not None:\n zlpc.data = np.where(mask, np.nan, zlpc.data)\n zlpc.set_signal_type(\"\")\n title = self.metadata.General.title\n zlpc.metadata.General.title = \"ZLP(%s)\" % title\n return zlpc\n\n def align_zero_loss_peak(\n self,\n calibrate=True,\n also_align=[],\n print_stats=True,\n subpixel=True,\n mask=None,\n signal_range=None,\n show_progressbar=None,\n crop=True,\n **kwargs,\n ):\n \"\"\"Align the zero-loss peak.\n\n This function first aligns the spectra using the result of\n `estimate_zero_loss_peak_centre` which finds the maximum in the\n given energy range, then if subpixel is True,\n proceeds to align with subpixel accuracy using `align1D`. The offset\n is automatically correct if `calibrate` is True.\n\n Parameters\n ----------\n calibrate : bool\n If True, set the offset of the spectral axis so that the\n zero-loss peak is at position zero.\n also_align : list of signals\n A list containing other spectra of identical dimensions to\n align using the shifts applied to the current spectrum.\n If `calibrate` is True, the calibration is also applied to\n the spectra in the list.\n print_stats : bool\n If True, print summary statistics of the ZLP maximum before\n the alignment.\n subpixel : bool\n If True, perform the alignment with subpixel accuracy\n using cross-correlation.\n mask : Signal1D of bool data type or bool array.\n It must have signal_dimension = 0 and navigation_shape equal to\n the shape of the current signal. Where mask is True the shift is\n not computed and set to nan.\n signal_range : tuple of integers, tuple of floats. Optional\n Will only search for the ZLP within the signal_range. If given\n in integers, the range will be in index values. If given floats,\n the range will be in spectrum values. Useful if there are features\n in the spectrum which are more intense than the ZLP.\n Default is searching in the whole signal. Note that ROIs can be used\n in place of a tuple.\n %s\n %s\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Examples\n --------\n >>> s_ll = hs.signals.EELSSpectrum(np.zeros(1000))\n >>> s_ll.data[100] = 100\n >>> s_ll.align_zero_loss_peak()\n\n Aligning both the lowloss signal and another signal\n\n >>> s = hs.signals.EELSSpectrum(np.range(1000))\n >>> s_ll.align_zero_loss_peak(also_align=[s])\n\n Aligning within a narrow range of the lowloss signal\n\n >>> s_ll.align_zero_loss_peak(signal_range=(-10.,10.))\n\n\n See Also\n --------\n estimate_zero_loss_peak_centre, align1D, estimate_shift1D.\n\n Notes\n -----\n Any extra keyword arguments are passed to `align1D`. For\n more information read its docstring.\n\n \"\"\"\n\n def substract_from_offset(value, signals):\n # Test that axes is uniform\n if not self.axes_manager[-1].is_uniform:\n raise NotImplementedError(\n \"Support for EELS signals with \"\n \"non-uniform signal axes is not yet implemented.\"\n )\n if isinstance(value, da.Array):\n value = value.compute()\n for signal in signals:\n signal.axes_manager[-1].offset -= value\n signal.events.data_changed.trigger(signal)\n\n def estimate_zero_loss_peak_centre(s, mask, signal_range):\n if signal_range:\n zlpc = s.isig[\n signal_range[0] : signal_range[1]\n ].estimate_zero_loss_peak_centre(mask=mask)\n else:\n zlpc = s.estimate_zero_loss_peak_centre(mask=mask)\n return zlpc\n\n zlpc = estimate_zero_loss_peak_centre(\n self, mask=mask, signal_range=signal_range\n )\n\n mean_ = np.nanmean(zlpc.data)\n\n if print_stats is True:\n print(underline(\"Initial ZLP position statistics\"))\n zlpc.print_summary_statistics()\n\n for signal in also_align + [self]:\n shift_array = -zlpc.data + mean_\n if zlpc._lazy:\n # We must compute right now because otherwise any changes to the\n # axes_manager of the signal later in the workflow may result in\n # a wrong shift_array\n shift_array = shift_array.compute()\n signal.shift1D(shift_array, crop=crop, show_progressbar=show_progressbar)\n\n if calibrate is True:\n zlpc = estimate_zero_loss_peak_centre(\n self, mask=mask, signal_range=signal_range\n )\n substract_from_offset(np.nanmean(zlpc.data), also_align + [self])\n\n if subpixel is False:\n return\n\n start, end = signal_range or (-3.0, 3.0)\n\n if calibrate is False:\n start += mean_\n end += mean_\n\n start = (\n start\n if start > self.axes_manager[-1].axis[0]\n else self.axes_manager[-1].axis[0]\n )\n end = (\n end\n if end < self.axes_manager[-1].axis[-1]\n else self.axes_manager[-1].axis[-1]\n )\n\n if self.axes_manager.navigation_size > 1:\n self.align1D(\n start,\n end,\n also_align=also_align,\n show_progressbar=show_progressbar,\n mask=mask,\n crop=crop,\n **kwargs,\n )\n if calibrate is True:\n zlpc = estimate_zero_loss_peak_centre(\n self, mask=mask, signal_range=signal_range\n )\n substract_from_offset(np.nanmean(zlpc.data), also_align + [self])\n\n align_zero_loss_peak.__doc__ %= (SHOW_PROGRESSBAR_ARG, CROP_PARAMETER_DOC)\n\n def get_zero_loss_peak_mask(self, zero_loss_peak_mask_width=5.0, signal_mask=None):\n \"\"\"Return boolean array with True value at the position of the zero\n loss peak. This mask can be used to restrict operation to the signal\n locations not marked as True (masked).\n\n Parameters\n ----------\n zero_loss_peak_mask_width: float\n Width of the zero loss peak mask.\n %s\n\n Returns\n -------\n bool array\n \"\"\"\n zlpc = self.estimate_zero_loss_peak_centre()\n (signal_axis,) = self.axes_manager[self.axes_manager.signal_axes]\n axis = signal_axis.axis\n mini_value = zlpc.data.mean() - zero_loss_peak_mask_width / 2\n maxi_value = zlpc.data.mean() + zero_loss_peak_mask_width / 2\n mask = np.logical_and(mini_value <= axis, axis <= maxi_value)\n if signal_mask is not None:\n signal_mask = np.logical_or(mask, signal_mask)\n else:\n signal_mask = mask\n return signal_mask\n\n get_zero_loss_peak_mask.__doc__ %= SIGNAL_MASK_ARG\n\n def spikes_diagnosis(\n self,\n signal_mask=None,\n navigation_mask=None,\n zero_loss_peak_mask_width=None,\n **kwargs,\n ):\n if zero_loss_peak_mask_width is not None:\n signal_mask = self.get_zero_loss_peak_mask(\n zero_loss_peak_mask_width, signal_mask\n )\n super().spikes_diagnosis(\n signal_mask=signal_mask, navigation_mask=None, **kwargs\n )\n\n spikes_diagnosis.__doc__ = SPIKES_DIAGNOSIS_DOCSTRING % MASK_ZERO_LOSS_PEAK_WIDTH\n\n def spikes_removal_tool(\n self,\n signal_mask=None,\n navigation_mask=None,\n threshold=\"auto\",\n zero_loss_peak_mask_width=None,\n interactive=True,\n display=True,\n toolkit=None,\n ):\n if zero_loss_peak_mask_width is not None:\n axis = self.axes_manager.signal_axes[0].axis\n # check the zero_loss is in the signal\n if (\n axis[0] - zero_loss_peak_mask_width / 2 > 0\n or axis[-1] + zero_loss_peak_mask_width / 2 < 0\n ):\n raise ValueError(\"The zero loss peaks isn't in the energy range.\")\n signal_mask = self.get_zero_loss_peak_mask(\n zero_loss_peak_mask_width, signal_mask\n )\n super().spikes_removal_tool(\n signal_mask=signal_mask,\n navigation_mask=navigation_mask,\n threshold=threshold,\n interactive=interactive,\n display=display,\n toolkit=toolkit,\n )\n\n spikes_removal_tool.__doc__ = SPIKES_REMOVAL_TOOL_DOCSTRING % (\n SIGNAL_MASK_ARG,\n NAVIGATION_MASK_ARG,\n MASK_ZERO_LOSS_PEAK_WIDTH,\n DISPLAY_DT,\n TOOLKIT_DT,\n )\n\n def estimate_elastic_scattering_intensity(self, threshold, show_progressbar=None):\n \"\"\"Rough estimation of the elastic scattering intensity by\n truncation of a EELS low-loss spectrum.\n\n Parameters\n ----------\n threshold : {Signal1D, float, int}\n Truncation energy to estimate the intensity of the elastic\n scattering. The threshold can be provided as a signal of the same\n dimension as the input spectrum navigation space containing the\n threshold value in the energy units. Alternatively a constant\n threshold can be specified in energy/index units by passing\n float/int.\n %s\n\n Returns\n -------\n I0: Signal1D\n The elastic scattering intensity.\n\n See Also\n --------\n estimate_elastic_scattering_threshold\n\n \"\"\"\n # TODO: Write units tests\n self._check_signal_dimension_equals_one()\n\n if show_progressbar is None:\n show_progressbar = hs.preferences.General.show_progressbar\n\n if isinstance(threshold, numbers.Number):\n I0 = self.isig[:threshold].integrate1D(-1)\n else:\n ax = self.axes_manager.signal_axes[0]\n # I0 = self._get_navigation_signal()\n # I0 = I0.transpose(signal_axes=[])\n threshold = threshold.transpose(signal_axes=[])\n binned = ax.is_binned\n\n def estimating_function(data, threshold=None):\n if np.isnan(threshold):\n return np.nan\n else:\n # the object is just an array, so have to reimplement\n # integrate1D. However can make certain assumptions, for\n # example 1D signal and pretty much always binned. Should\n # probably at some point be joint\n ind = ax.value2index(threshold)\n data = data[:ind]\n if binned:\n return data.sum()\n else:\n from scipy.integrate import simps\n\n axis = ax.axis[:ind]\n return simps(y=data, x=axis)\n\n I0 = self.map(\n estimating_function,\n threshold=threshold,\n ragged=False,\n show_progressbar=show_progressbar,\n inplace=False,\n )\n I0.metadata.General.title = self.metadata.General.title + \" elastic intensity\"\n I0.set_signal_type(\"\")\n if self.tmp_parameters.has_item(\"filename\"):\n I0.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"_elastic_intensity\"\n )\n I0.tmp_parameters.folder = self.tmp_parameters.folder\n I0.tmp_parameters.extension = self.tmp_parameters.extension\n return I0\n\n estimate_elastic_scattering_intensity.__doc__ %= SHOW_PROGRESSBAR_ARG\n\n def estimate_elastic_scattering_threshold(\n self, window=10.0, tol=None, window_length=5, polynomial_order=3, start=1.0\n ):\n \"\"\"Calculate the first inflexion point of the spectrum derivative\n within a window.\n\n This method assumes that the zero-loss peak is located at position zero\n in all the spectra. Currently it looks for an inflexion point, that can\n be a local maximum or minimum. Therefore, to estimate the elastic\n scattering threshold `start` + `window` must be less than the first\n maximum for all spectra (often the bulk plasmon maximum). If there is\n more than one inflexion point in energy the window it selects the\n smoother one what, often, but not always, is a good choice in this\n case.\n\n Parameters\n ----------\n window : {None, float}\n If None, the search for the local inflexion point is performed\n using the full energy range. A positive float will restrict\n the search to the (0,window] energy window, where window is given\n in the axis units. If no inflexion point is found in this\n spectral range the window value is returned instead.\n tol : {None, float}\n The threshold tolerance for the derivative. If \"auto\" it is\n automatically calculated as the minimum value that guarantees\n finding an inflexion point in all the spectra in given energy\n range.\n window_length : int\n If non zero performs order three Savitzky-Golay smoothing\n to the data to avoid falling in local minima caused by\n the noise. It must be an odd integer.\n polynomial_order : int\n Savitzky-Golay filter polynomial order.\n start : float\n Position from the zero-loss peak centre from where to start\n looking for the inflexion point.\n\n\n Returns\n -------\n\n threshold : Signal1D\n A Signal1D of the same dimension as the input spectrum\n navigation space containing the estimated threshold. Where the\n threshold couldn't be estimated the value is set to nan.\n\n See Also\n --------\n\n estimate_elastic_scattering_intensity,align_zero_loss_peak,\n find_peaks1D_ohaver, fourier_ratio_deconvolution.\n\n Notes\n -----\n\n The main purpose of this method is to be used as input for\n `estimate_elastic_scattering_intensity`. Indeed, for currently\n achievable energy resolutions, there is not such a thing as a elastic\n scattering threshold. Therefore, please be aware of the limitations of\n this method when using it.\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n # Create threshold with the same shape as the navigation dims.\n threshold = self._get_navigation_signal().transpose(signal_axes=0)\n\n # Progress Bar\n axis = self.axes_manager.signal_axes[0]\n min_index, max_index = axis.value_range_to_indices(start, start + window)\n if max_index < min_index + 10:\n raise ValueError(\"Please select a bigger window\")\n s = self.isig[min_index:max_index].deepcopy()\n if window_length:\n s.smooth_savitzky_golay(\n polynomial_order=polynomial_order,\n window_length=window_length,\n differential_order=1,\n )\n else:\n s = s.derivative(-1)\n if tol is None:\n tol = np.max(abs(s.data).min(axis.index_in_array))\n saxis = s.axes_manager[-1]\n inflexion = (abs(s.data) <= tol).argmax(saxis.index_in_array)\n if isinstance(inflexion, da.Array):\n inflexion = inflexion.compute()\n threshold.data[:] = saxis.index2value(inflexion)\n if isinstance(inflexion, np.ndarray):\n threshold.data[inflexion == 0] = np.nan\n else: # Single spectrum\n if inflexion == 0:\n threshold.data[:] = np.nan\n del s\n if np.isnan(threshold.data).any():\n _logger.warning(\n \"No inflexion point could be found in some positions \"\n \"that have been marked with nans.\"\n )\n # Create spectrum image, stop and return value\n threshold.metadata.General.title = (\n self.metadata.General.title + \" elastic scattering threshold\"\n )\n if self.tmp_parameters.has_item(\"filename\"):\n threshold.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"_elastic_scattering_threshold\"\n )\n threshold.tmp_parameters.folder = self.tmp_parameters.folder\n threshold.tmp_parameters.extension = self.tmp_parameters.extension\n threshold.set_signal_type(\"\")\n return threshold\n\n def estimate_thickness(\n self,\n threshold=None,\n zlp=None,\n density=None,\n mean_free_path=None,\n ):\n \"\"\"Estimates the thickness (relative and absolute)\n of a sample using the log-ratio method.\n\n The current EELS spectrum must be a low-loss spectrum containing\n the zero-loss peak. The hyperspectrum must be well calibrated\n and aligned. To obtain the thickness relative to the mean free path\n don't set the `density` and the `mean_free_path`.\n\n Parameters\n ----------\n threshold : {BaseSignal, float}, optional\n If the zero-loss-peak is not provided, use this energy threshold\n to roughly estimate its intensity by truncation.\n If the threshold is constant across the dataset use a float. Otherwise,\n provide a signal of\n the same dimension as the input spectrum navigation space\n containing the threshold value in the energy units.\n zlp : BaseSignal, optional\n If not None the zero-loss peak intensity is calculated from the ZLP\n spectrum supplied by integration.\n mean_free_path : float, optional\n The mean free path of the material in nanometers.\n If not provided, the thickness\n is given relative to the mean free path.\n density : float, optional\n The density of the material in g/cm**3. This is used to estimate the mean\n free path when the mean free path is not known and to perform the\n angular corrections.\n\n Returns\n -------\n s : BaseSignal\n The thickness relative to the MFP. It returns a Signal1D,\n Signal2D or a BaseSignal, depending on the current navigation\n dimensions.\n\n Notes\n -----\n For details see Egerton, R. Electron Energy-Loss Spectroscopy in the Electron\n Microscope. Springer-Verlag, 2011.\n \"\"\"\n axis = self.axes_manager.signal_axes[0]\n total_intensity = self.integrate1D(axis.index_in_array).data\n if threshold is None and zlp is None:\n raise ValueError(\n \"Please provide one of the following keywords: \" \"`threshold`, `zlp`\"\n )\n if zlp is not None:\n I0 = zlp.integrate1D(axis.index_in_array).data\n else:\n I0 = self.estimate_elastic_scattering_intensity(\n threshold=threshold,\n ).data\n\n t_over_lambda = np.log(total_intensity / I0)\n\n if density is not None:\n if self._are_microscope_parameters_missing():\n raise RuntimeError(\n \"Some microscope parameters are missing. Please use the \"\n \"`set_microscope_parameters()` method to set them. \"\n \"If you don't know them, don't set the `density` keyword.\"\n )\n else:\n md = self.metadata.Acquisition_instrument.TEM\n t_over_lambda *= iMFP_angular_correction(\n beam_energy=md.beam_energy,\n alpha=md.convergence_angle,\n beta=md.Detector.EELS.collection_angle,\n density=density,\n )\n if mean_free_path is None:\n mean_free_path = iMFP_Iakoubovskii(\n electron_energy=self.metadata.Acquisition_instrument.TEM.beam_energy,\n density=density,\n )\n _logger.info(f\"The estimated iMFP is {mean_free_path} nm\")\n else:\n _logger.warning(\n \"Computing the thickness without taking into account the effect of \"\n \"the limited collection angle, what usually leads to underestimating \"\n \"the thickness. To perform the angular corrections you must provide \"\n \"the density of the material.\"\n )\n\n s = self._get_navigation_signal(data=t_over_lambda)\n if mean_free_path is not None:\n s.data *= mean_free_path\n s.metadata.General.title = self.metadata.General.title + \" thickness (nm)\"\n s.metadata.Signal.quantity = \"thickness (nm)\"\n else:\n _logger.warning(\n \"Computing the relative thickness. To compute the absolute \"\n \"thickness provide the `mean_free_path` and/or the `density`\"\n )\n s.metadata.General.title = (\n self.metadata.General.title + \" $\\\\frac{t}{\\\\lambda}$\"\n )\n s.metadata.Signal.quantity = \"$\\\\frac{t}{\\\\lambda}$\"\n if self.tmp_parameters.has_item(\"filename\"):\n s.tmp_parameters.filename = self.tmp_parameters.filename + \"_thickness\"\n s.tmp_parameters.folder = self.tmp_parameters.folder\n s.tmp_parameters.extension = self.tmp_parameters.extension\n s = s.transpose(signal_axes=[])\n s.set_signal_type(\"\")\n return s\n\n def fourier_log_deconvolution(self, zlp, add_zlp=False, crop=False):\n \"\"\"Performs fourier-log deconvolution.\n\n Parameters\n ----------\n zlp : EELSSpectrum\n The corresponding zero-loss peak.\n\n add_zlp : bool\n If True, adds the ZLP to the deconvolved spectrum\n crop : bool\n If True crop the spectrum to leave out the channels that\n have been modified to decay smoothly to zero at the sides\n of the spectrum.\n\n Returns\n -------\n An EELSSpectrum containing the current data deconvolved.\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Notes\n -----\n For details see: Egerton, R. Electron Energy-Loss\n Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n if not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"This operation is not yet implemented for non-uniform energy axes\"\n )\n s = self.deepcopy()\n zlp_size = zlp.axes_manager.signal_axes[0].size\n self_size = self.axes_manager.signal_axes[0].size\n tapped_channels = s.hanning_taper()\n # Conservative new size to solve the wrap-around problem\n size = zlp_size + self_size - 1\n # Calculate optimal FFT padding for performance\n complex_result = zlp.data.dtype.kind == \"c\" or s.data.dtype.kind == \"c\"\n size = optimal_fft_size(size, not complex_result)\n\n axis = self.axes_manager.signal_axes[0]\n\n z = np.fft.rfft(zlp.data, n=size, axis=axis.index_in_array)\n j = np.fft.rfft(s.data, n=size, axis=axis.index_in_array)\n if self._lazy or zlp._lazy:\n j1 = z * da.log(j / z).map_blocks(np.nan_to_num)\n else:\n j1 = z * np.nan_to_num(np.log(j / z))\n sdata = np.fft.irfft(j1, axis=axis.index_in_array)\n\n s.data = sdata[\n s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, self_size)),\n ]\n )\n ]\n if add_zlp is True:\n if self_size >= zlp_size:\n if self._lazy:\n _slices_before = s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, zlp_size)),\n ]\n )\n _slices_after = s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(zlp_size, None)),\n ]\n )\n s.data = da.stack(\n (s.data[_slices_before] + zlp.data, s.data[_slices_after]),\n axis=axis.index_in_array,\n )\n else:\n s.data[\n s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, zlp_size)),\n ]\n )\n ] += zlp.data\n else:\n s.data += zlp.data[\n s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, self_size)),\n ]\n )\n ]\n\n s.metadata.General.title = (\n s.metadata.General.title + \" after Fourier-log deconvolution\"\n )\n if s.tmp_parameters.has_item(\"filename\"):\n s.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"_after_fourier_log_deconvolution\"\n )\n if crop is True:\n s.crop(axis.index_in_axes_manager, None, int(-tapped_channels))\n return s\n\n def fourier_ratio_deconvolution(\n self,\n ll,\n fwhm=None,\n threshold=None,\n extrapolate_lowloss=True,\n extrapolate_coreloss=True,\n ):\n \"\"\"Performs Fourier-ratio deconvolution.\n\n The core-loss should have the background removed. To reduce the noise\n amplification the result is convolved with a Gaussian function.\n\n Parameters\n ----------\n ll: EELSSpectrum\n The corresponding low-loss (ll) EELSSpectrum.\n fwhm : float or None\n Full-width half-maximum of the Gaussian function by which\n the result of the deconvolution is convolved. It can be\n used to select the final SNR and spectral resolution. If\n None, the FWHM of the zero-loss peak of the low-loss is\n estimated and used.\n threshold : {None, float}\n Truncation energy to estimate the intensity of the\n elastic scattering. If None the threshold is taken as the\n first minimum after the ZLP centre.\n extrapolate_lowloss, extrapolate_coreloss : bool\n If True the signals are extrapolated using a power law,\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Notes\n -----\n For details see: Egerton, R. Electron Energy-Loss\n Spectroscopy in the Electron Microscope. Springer-Verlag, 2011.\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n if not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"This operation is not yet implemented for non-uniform energy axes.\"\n )\n if not ll.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"The low-loss energy axis is non-uniform. \"\n \"This operation is not yet implemented for non-uniform energy axes\"\n )\n orig_cl_size = self.axes_manager.signal_axes[0].size\n\n if threshold is None:\n threshold = ll.estimate_elastic_scattering_threshold()\n\n if extrapolate_coreloss is True:\n cl = self.power_law_extrapolation(window_size=20, extrapolation_size=100)\n else:\n cl = self.deepcopy()\n\n if extrapolate_lowloss is True:\n ll = ll.power_law_extrapolation(window_size=100, extrapolation_size=100)\n else:\n ll = ll.deepcopy()\n\n ll.hanning_taper()\n cl.hanning_taper()\n\n ll_size = ll.axes_manager.signal_axes[0].size\n cl_size = self.axes_manager.signal_axes[0].size\n # Conservative new size to solve the wrap-around problem\n size = ll_size + cl_size - 1\n # Calculate the optimal FFT size\n size = optimal_fft_size(size)\n\n axis = ll.axes_manager.signal_axes[0]\n if fwhm is None:\n fwhm = float(\n ll.get_current_signal().estimate_peak_width()._get_current_data()\n )\n _logger.info(\"FWHM = %1.2f\" % fwhm)\n\n I0 = ll.estimate_elastic_scattering_intensity(threshold=threshold)\n I0 = I0.data\n if ll.axes_manager.navigation_size > 0:\n I0_shape = list(I0.shape)\n I0_shape.insert(axis.index_in_array, 1)\n I0 = I0.reshape(I0_shape)\n\n from hyperspy.components1d import Gaussian\n\n g = Gaussian()\n g.sigma.value = fwhm / 2.3548\n g.A.value = 1\n g.centre.value = 0\n zl = g.function(\n np.linspace(axis.offset, axis.offset + axis.scale * (size - 1), size)\n )\n z = np.fft.rfft(zl)\n jk = np.fft.rfft(cl.data, n=size, axis=axis.index_in_array)\n jl = np.fft.rfft(ll.data, n=size, axis=axis.index_in_array)\n zshape = [\n 1,\n ] * len(cl.data.shape)\n zshape[axis.index_in_array] = jk.shape[axis.index_in_array]\n cl.data = np.fft.irfft(z.reshape(zshape) * jk / jl, axis=axis.index_in_array)\n cl.data *= I0\n cl.crop(-1, None, int(orig_cl_size))\n cl.metadata.General.title = (\n self.metadata.General.title + \" after Fourier-ratio deconvolution\"\n )\n if cl.tmp_parameters.has_item(\"filename\"):\n cl.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"after_fourier_ratio_deconvolution\"\n )\n return cl\n\n def richardson_lucy_deconvolution(\n self, psf, iterations=15, show_progressbar=None, num_workers=None\n ):\n \"\"\"1D Richardson-Lucy Poissonian deconvolution of\n the spectrum by the given kernel.\n\n Parameters\n ----------\n psf : EELSSpectrum\n It must have the same signal dimension as the current\n spectrum and a spatial dimension of 0 or the same as the\n current spectrum.\n iterations : int\n Number of iterations of the deconvolution. Note that\n increasing the value will increase the noise amplification.\n %s\n %s\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Notes\n -----\n For details on the algorithm see Gloter, A., A. Douiri,\n M. Tence, and C. Colliex. “Improving Energy Resolution of\n EELS Spectra: An Alternative to the Monochromator Solution.”\n Ultramicroscopy 96, no. 3–4 (September 2003): 385–400.\n\n \"\"\"\n if not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"This operation is not yet implemented for non-uniform energy axes.\"\n )\n if show_progressbar is None:\n show_progressbar = hs.preferences.General.show_progressbar\n self._check_signal_dimension_equals_one()\n psf_size = psf.axes_manager.signal_axes[0].size\n maxval = self.axes_manager.navigation_size\n show_progressbar = show_progressbar and (maxval > 0)\n\n def deconv_function(signal, kernel=None, iterations=15, psf_size=None):\n imax = kernel.argmax()\n result = np.array(signal).copy()\n mimax = psf_size - 1 - imax\n for _ in range(iterations):\n first = np.convolve(kernel, result)[imax : imax + psf_size]\n result *= np.convolve(kernel[::-1], signal / first)[\n mimax : mimax + psf_size\n ]\n return result\n\n ds = self.map(\n deconv_function,\n kernel=psf,\n iterations=iterations,\n psf_size=psf_size,\n show_progressbar=show_progressbar,\n num_workers=num_workers,\n ragged=False,\n inplace=False,\n )\n\n ds.metadata.General.title += (\n \" after Richardson-Lucy deconvolution %i iterations\" % iterations\n )\n if ds.tmp_parameters.has_item(\"filename\"):\n ds.tmp_parameters.filename += \"_after_R-L_deconvolution_%iiter\" % iterations\n return ds\n\n richardson_lucy_deconvolution.__doc__ %= (SHOW_PROGRESSBAR_ARG, NUM_WORKERS_ARG)\n\n def _are_microscope_parameters_missing(self, ignore_parameters=[]):\n \"\"\"\n Check if the EELS parameters necessary to calculate the GOS\n are defined in metadata. If not, in interactive mode\n raises an UI item to fill the values.\n The `ignore_parameters` list can be to ignore parameters.\n \"\"\"\n must_exist = (\n \"Acquisition_instrument.TEM.convergence_angle\",\n \"Acquisition_instrument.TEM.beam_energy\",\n \"Acquisition_instrument.TEM.Detector.EELS.collection_angle\",\n )\n missing_parameters = []\n for item in must_exist:\n exists = self.metadata.has_item(item)\n if exists is False and item.split(\".\")[-1] not in ignore_parameters:\n missing_parameters.append(item)\n if missing_parameters:\n _logger.info(\"Missing parameters {}\".format(missing_parameters))\n return True\n else:\n return False\n\n def set_microscope_parameters(\n self,\n beam_energy=None,\n convergence_angle=None,\n collection_angle=None,\n toolkit=None,\n display=True,\n ):\n if set((beam_energy, convergence_angle, collection_angle)) == {None}:\n tem_par = EELSTEMParametersUI(self)\n return tem_par.gui(toolkit=toolkit, display=display)\n mp = self.metadata\n if beam_energy is not None:\n mp.set_item(\"Acquisition_instrument.TEM.beam_energy\", beam_energy)\n if convergence_angle is not None:\n mp.set_item(\n \"Acquisition_instrument.TEM.convergence_angle\", convergence_angle\n )\n if collection_angle is not None:\n mp.set_item(\n \"Acquisition_instrument.TEM.Detector.EELS.collection_angle\",\n collection_angle,\n )\n\n set_microscope_parameters.__doc__ = \"\"\"\n Set the microscope parameters that are necessary to calculate\n the GOS.\n\n If not all of them are defined, in interactive mode\n raises an UI item to fill the values.\n\n beam_energy: float\n The energy of the electron beam in keV.\n convergence_angle : float\n The microscope convergence semi-angle in mrad.\n collection_angle : float\n The collection semi-angle in mrad.\n {}\n {}\n \"\"\".format(\n TOOLKIT_DT, DISPLAY_DT\n )\n\n def power_law_extrapolation(\n self, window_size=20, extrapolation_size=1024, add_noise=False, fix_neg_r=False\n ):\n \"\"\"\n Extrapolate the spectrum to the right using a powerlaw.\n\n Parameters\n ----------\n window_size : int\n The number of channels from the right side of the\n spectrum that are used to estimate the power law\n parameters.\n extrapolation_size : int\n Size of the extrapolation in number of channels\n add_noise : bool\n If True, add poissonian noise to the extrapolated spectrum.\n fix_neg_r : bool\n If True, the negative values for the \"components.PowerLaw\"\n parameter r will be flagged and the extrapolation will be\n done with a constant zero-value.\n\n Returns\n -------\n A new spectrum, with the extrapolation.\n\n \"\"\"\n self._check_signal_dimension_equals_one()\n axis = self.axes_manager.signal_axes[0]\n s = self.deepcopy()\n s.metadata.General.title += \" %i channels extrapolated\" % extrapolation_size\n if s.tmp_parameters.has_item(\"filename\"):\n s.tmp_parameters.filename += (\n \"_%i_channels_extrapolated\" % extrapolation_size\n )\n new_shape = list(self.data.shape)\n new_shape[axis.index_in_array] += extrapolation_size\n if self._lazy:\n left_data = s.data\n right_shape = list(self.data.shape)\n right_shape[axis.index_in_array] = extrapolation_size\n right_chunks = list(self.data.chunks)\n right_chunks[axis.index_in_array] = (extrapolation_size,)\n right_data = da.zeros(\n shape=tuple(right_shape),\n chunks=tuple(right_chunks),\n dtype=self.data.dtype,\n )\n s.data = da.concatenate([left_data, right_data], axis=axis.index_in_array)\n else:\n # just old code\n s.data = np.zeros(new_shape)\n s.data[..., : axis.size] = self.data\n s.get_dimensions_from_data()\n pl = PowerLaw()\n pl._axes_manager = self.axes_manager\n A, r = pl.estimate_parameters(\n s,\n axis.index2value(axis.size - window_size),\n axis.index2value(axis.size - 1),\n out=True,\n )\n if fix_neg_r is True:\n A = np.where(r <= 0, 0, A)\n # If the signal is binned we need to bin the extrapolated power law\n # what, in a first approximation, can be done by multiplying by the\n # axis step size.\n if self.axes_manager[-1].is_binned:\n factor = s.axes_manager[-1].scale\n else:\n factor = 1\n if self._lazy:\n # only need new axes if the navigation dimension is not 0\n if s.axes_manager.navigation_dimension:\n rightslice = (..., None)\n axisslice = (None, slice(axis.size, None))\n else:\n rightslice = (...,)\n axisslice = (slice(axis.size, None),)\n right_chunks[axis.index_in_array] = 1\n x = da.from_array(\n s.axes_manager.signal_axes[0].axis[axisslice],\n chunks=(extrapolation_size,),\n )\n A = A[rightslice]\n r = r[rightslice]\n right_data = factor * A * x ** (-r)\n s.data = da.concatenate([left_data, right_data], axis=axis.index_in_array)\n else:\n s.data[..., axis.size :] = (\n factor\n * A[..., np.newaxis]\n * s.axes_manager.signal_axes[0].axis[np.newaxis, axis.size :]\n ** (-r[..., np.newaxis])\n )\n return s\n\n def kramers_kronig_analysis(\n self, zlp=None, iterations=1, n=None, t=None, delta=0.5, full_output=False\n ):\n r\"\"\"\n Calculate the complex dielectric function from a single scattering\n distribution (SSD) using the Kramers-Kronig relations.\n\n It uses the FFT method as in [1]_. The SSD is an\n EELSSpectrum instance containing SSD low-loss EELS with no zero-loss\n peak. The internal loop is devised to approximately subtract the\n surface plasmon contribution supposing an unoxidized planar surface and\n neglecting coupling between the surfaces. This method does not account\n for retardation effects, instrumental broadening and surface plasmon\n excitation in particles.\n\n Note that either refractive index or thickness are required.\n If both are None or if both are provided an exception is raised.\n\n Parameters\n ----------\n zlp : {None, number, Signal1D}\n ZLP intensity. It is optional (can be None) if `t` is None and `n`\n is not None and the thickness estimation is not required. If `t`\n is not None, the ZLP is required to perform the normalization and\n if `t` is not None, the ZLP is required to calculate the thickness.\n If the ZLP is the same for all spectra, the integral of the ZLP\n can be provided as a number. Otherwise, if the ZLP intensity is not\n the same for all spectra, it can be provided as i) a Signal1D\n of the same dimensions as the current signal containing the ZLP\n spectra for each location ii) a BaseSignal of signal dimension 0\n and navigation_dimension equal to the current signal containing the\n integrated ZLP intensity.\n iterations : int\n Number of the iterations for the internal loop to remove the\n surface plasmon contribution. If 1 the surface plasmon contribution\n is not estimated and subtracted (the default is 1).\n n : {None, float}\n The medium refractive index. Used for normalization of the\n SSD to obtain the energy loss function. If given the thickness\n is estimated and returned. It is only required when `t` is None.\n t : {None, number, Signal1D}\n The sample thickness in nm. Used for normalization of the SSD\n to obtain the energy loss function. It is only required when\n `n` is None. If the thickness is the same for all spectra it can be\n given by a number. Otherwise, it can be provided as a BaseSignal\n with signal dimension 0 and navigation_dimension equal to the\n current signal.\n delta : float\n A small number (0.1-0.5 eV) added to the energy axis in\n specific steps of the calculation the surface loss correction to\n improve stability.\n full_output : bool\n If True, return a dictionary that contains the estimated\n thickness if `t` is None and the estimated surface plasmon\n excitation and the spectrum corrected from surface plasmon\n excitations if `iterations` > 1.\n\n Returns\n -------\n eps: DielectricFunction instance\n The complex dielectric function results,\n\n .. math::\n \\epsilon = \\epsilon_1 + i*\\epsilon_2,\n\n contained in an DielectricFunction instance.\n output: Dictionary (optional)\n A dictionary of optional outputs with the following keys\n\n * ``thickness``: the estimated thickness in nm calculated by\n normalization of the SSD (only when ``t`` is None)\n * ``surface plasmon estimation``: the estimated surface plasmon\n excitation (only if ``iterations`` > 1.)\n\n Raises\n ------\n ValueError\n If both `n` and `t` are undefined (None).\n AttributeError\n If the beam_energy or the collection semi-angle are not defined in\n metadata.\n NotImplementedError\n If the signal axis is a non-uniform axis.\n\n Notes\n -----\n This method is based in Egerton's Matlab code [1]_ with a\n minor difference: the wrap-around problem when computing the FFTs is\n workarounded by padding the signal instead of subtracting the\n reflected tail.\n\n .. [1] Ray Egerton, \"Electron Energy-Loss Spectroscopy in the Electron\n Microscope\", Springer-Verlag, 2011.\n\n \"\"\"\n if not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"This operation is not yet implemented for non-uniform energy axes.\"\n )\n output = {}\n if iterations == 1:\n # In this case s.data is not modified so there is no need to make\n # a deep copy.\n s = self.isig[0.0:]\n else:\n s = self.isig[0.0:].deepcopy()\n\n sorig = self.isig[0.0:]\n # Avoid singularity at 0\n if s.axes_manager.signal_axes[0].axis[0] == 0:\n s = s.isig[1:]\n sorig = self.isig[1:]\n\n # Constants and units\n me = constants.value(\"electron mass energy equivalent in MeV\") * 1e3 # keV\n\n # Mapped parameters\n self._are_microscope_parameters_missing(ignore_parameters=[\"convergence_angle\"])\n e0 = s.metadata.Acquisition_instrument.TEM.beam_energy\n beta = s.metadata.Acquisition_instrument.TEM.Detector.EELS.collection_angle\n\n axis = s.axes_manager.signal_axes[0]\n eaxis = axis.axis.copy()\n\n if isinstance(zlp, hyperspy.signal.BaseSignal):\n if (\n zlp.axes_manager.navigation_dimension\n == self.axes_manager.navigation_dimension\n ):\n if zlp.axes_manager.signal_dimension == 0:\n i0 = zlp.data\n else:\n i0 = zlp.integrate1D(axis.index_in_axes_manager).data\n else:\n raise ValueError(\n \"The ZLP signal dimensions are not \"\n \"compatible with the dimensions of the \"\n \"low-loss signal\"\n )\n # The following prevents errors if the signal is a single spectrum\n if len(i0) != 1:\n i0 = i0.reshape(np.insert(i0.shape, axis.index_in_array, 1))\n elif isinstance(zlp, numbers.Number):\n i0 = zlp\n else:\n raise ValueError(\n \"The zero-loss peak input is not valid, it must be\\\n in the BaseSignal class or a Number.\"\n )\n\n if isinstance(t, hyperspy.signal.BaseSignal):\n if (\n t.axes_manager.navigation_dimension\n == self.axes_manager.navigation_dimension\n ) and (t.axes_manager.signal_dimension == 0):\n t = t.data\n t = t.reshape(np.insert(t.shape, axis.index_in_array, 1))\n else:\n raise ValueError(\n \"The thickness signal dimensions are not \"\n \"compatible with the dimensions of the \"\n \"low-loss signal\"\n )\n elif isinstance(t, np.ndarray) and t.shape and t.shape != (1,):\n raise ValueError(\n \"thickness must be a HyperSpy signal or a number,\" \" not a NumPy array.\"\n )\n\n # Slicer to get the signal data from 0 to axis.size\n slicer = s.axes_manager._get_data_slice(\n [\n (axis.index_in_array, slice(None, axis.size)),\n ]\n )\n\n # Kinetic definitions\n ke = e0 * (1 + e0 / 2.0 / me) / (1 + e0 / me) ** 2\n tgt = e0 * (2 * me + e0) / (me + e0)\n rk0 = 2590 * (1 + e0 / me) * np.sqrt(2 * ke / me)\n\n for io in range(iterations):\n # Calculation of the ELF by normalization of the SSD\n # Norm(SSD) = Imag(-1/epsilon) (Energy Loss Function, ELF)\n\n # We start by the \"angular corrections\"\n Im = s.data / (np.log(1 + (beta * tgt / eaxis) ** 2)) / axis.scale\n if n is None and t is None:\n raise ValueError(\n \"The thickness and the refractive index are \"\n \"not defined. Please provide one of them.\"\n )\n elif n is not None and t is not None:\n raise ValueError(\n \"Please provide the refractive index OR the \"\n \"thickness information, not both\"\n )\n elif n is not None:\n # normalize using the refractive index.\n K = (Im / eaxis).sum(\n axis=axis.index_in_array, keepdims=True\n ) * axis.scale\n K = K / (np.pi / 2) / (1 - 1.0 / n**2)\n # K = (K / (np.pi / 2) / (1 - 1. / n ** 2)).reshape(\n # np.insert(K.shape, axis.index_in_array, 1))\n # Calculate the thickness only if possible and required\n if zlp is not None and (full_output is True or iterations > 1):\n te = 332.5 * K * ke / i0\n if full_output is True:\n output[\"thickness\"] = te\n elif t is not None:\n if zlp is None:\n raise ValueError(\n \"The ZLP must be provided when the \"\n \"thickness is used for normalization.\"\n )\n # normalize using the thickness\n K = t * i0 / (332.5 * ke)\n te = t\n Im = Im / K\n\n # Kramers Kronig Transform:\n # We calculate KKT(Im(-1/epsilon))=1+Re(1/epsilon) with FFT\n # Follows: D W Johnson 1975 J. Phys. A: Math. Gen. 8 490\n # Use an optimal FFT size to speed up the calculation, and\n # make it double the closest upper value to workaround the\n # wrap-around problem.\n esize = optimal_fft_size(2 * axis.size)\n q = -2 * np.fft.fft(Im, esize, axis.index_in_array).imag / esize\n\n q[slicer] *= -1\n q = np.fft.fft(q, axis=axis.index_in_array)\n # Final touch, we have Re(1/eps)\n Re = q[slicer].real + 1\n\n # Egerton does this to correct the wrap-around problem, but in our\n # case this is not necessary because we compute the fft on an\n # extended and padded spectrum to avoid this problem.\n # Re=real(q)\n # Tail correction\n # vm=Re[axis.size-1]\n # Re[:(axis.size-1)]=Re[:(axis.size-1)]+1-(0.5*vm*((axis.size-1) /\n # (axis.size*2-arange(0,axis.size-1)))**2)\n # Re[axis.size:]=1+(0.5*vm*((axis.size-1) /\n # (axis.size+arange(0,axis.size)))**2)\n\n # Epsilon appears:\n # We calculate the real and imaginary parts of the CDF\n e1 = Re / (Re**2 + Im**2)\n e2 = Im / (Re**2 + Im**2)\n\n if iterations > 1 and zlp is not None:\n # Surface losses correction:\n # Calculates the surface ELF from a vacuum border effect\n # A simulated surface plasmon is subtracted from the ELF\n Srfelf = 4 * e2 / ((e1 + 1) ** 2 + e2**2) - Im\n adep = tgt / (eaxis + delta) * np.arctan(\n beta * tgt / axis.axis\n ) - beta / 1000.0 / (beta**2 + axis.axis**2.0 / tgt**2)\n Srfint = 2000 * K * adep * Srfelf / rk0 / te * axis.scale\n s.data = sorig.data - Srfint\n _logger.debug(\"Iteration number: %d / %d\", io + 1, iterations)\n if iterations == io + 1 and full_output is True:\n sp = sorig._deepcopy_with_new_data(Srfint)\n sp.metadata.General.title += (\n \" estimated surface plasmon excitation.\"\n )\n output[\"surface plasmon estimation\"] = sp\n del sp\n del Srfint\n\n eps = s._deepcopy_with_new_data(e1 + e2 * 1j)\n del s\n eps.set_signal_type(\"DielectricFunction\")\n eps.metadata.General.title = (\n self.metadata.General.title + \"dielectric function \"\n \"(from Kramers-Kronig analysis)\"\n )\n if eps.tmp_parameters.has_item(\"filename\"):\n eps.tmp_parameters.filename = (\n self.tmp_parameters.filename + \"_CDF_after_Kramers_Kronig_transform\"\n )\n if \"thickness\" in output:\n # As above,prevent errors if the signal is a single spectrum\n if len(te) != 1:\n te = te[self.axes_manager._get_data_slice([(axis.index_in_array, 0)])]\n thickness = eps._get_navigation_signal(data=te)\n thickness.metadata.General.title = (\n self.metadata.General.title + \" thickness \"\n \"(calculated using Kramers-Kronig analysis)\"\n )\n output[\"thickness\"] = thickness\n if full_output is False:\n return eps\n else:\n return eps, output\n\n def create_model(\n self,\n low_loss=None,\n auto_background=True,\n auto_add_edges=True,\n GOS=\"gosh\",\n gos_file_path=None,\n dictionary=None,\n ):\n \"\"\"Create a model for the current EELS data.\n\n Parameters\n ----------\n %s\n\n Returns\n -------\n model : :class:`~.models.eelsmodel.EELSModel` instance.\n\n Raises\n ------\n NotImplementedError\n If the signal axis is a non-uniform axis.\n \"\"\"\n from exspy.models.eelsmodel import EELSModel\n\n if low_loss is not None and not self.axes_manager.signal_axes[0].is_uniform:\n raise NotImplementedError(\n \"Multiple scattering is not implemented for spectra with a \"\n \"non-uniform energy axis. To create a model that does not \"\n \"account for multiple-scattering do not set the `ll` keyword.\"\n )\n model = EELSModel(\n self,\n low_loss=low_loss,\n auto_background=auto_background,\n auto_add_edges=auto_add_edges,\n GOS=GOS,\n dictionary=dictionary,\n )\n return model\n\n create_model.__doc__ %= EELSMODEL_PARAMETERS\n\n def plot(self, plot_edges=False, only_edges=(\"Major\", \"Minor\"), **kwargs):\n \"\"\"\n Plot the EELS spectrum. Markers indicating the position of the\n EELS edges can be added.\n\n Parameters\n ----------\n plot_edges : {False, True, list of string or string}\n If True, draws on s.metadata.Sample.elements for edges.\n Alternatively, provide a string of a single edge, or an iterable\n containing a list of valid elements, EELS families or edges. For\n example, an element should be 'Zr', an element edge family should\n be 'Zr_L' or an EELS edge 'Zr_L3'.\n only_edges : tuple of string\n Either 'Major' or 'Minor'. Defaults to both.\n kwargs\n The extra keyword arguments for plot()\n \"\"\"\n\n super().plot(**kwargs)\n\n if plot_edges:\n # edges is a mapping {edge_name:edge_energy}\n edges = self._get_edges_to_plot(plot_edges, only_edges)\n self._plot_edge_labels(edges)\n\n self._plot.signal_plot.events.closed.connect(self._on_signal_plot_closing, [])\n\n def _on_signal_plot_closing(self):\n self._edge_markers = {\"lines\": None, \"texts\": None, \"names\": []}\n\n def _get_offsets_and_segments(self, edges):\n index = np.array([float(v) for v in edges.values()]) # dictionaries\n segments = np.empty((len(index), 2, 2))\n offsets = np.empty((len(index), 2))\n for i, ind in enumerate(index):\n segments[i] = [[ind, 1], [ind, 1.1]]\n offsets[i] = [ind, 1.1]\n return offsets, segments\n\n def _initialise_markers(self):\n self._edge_markers[\"lines\"] = Lines(\n segments=np.empty((0, 2, 2)),\n transform=\"relative\",\n color=\"black\",\n shift=np.array([0.0, 0.19]),\n )\n self._edge_markers[\"texts\"] = Texts(\n offsets=np.empty((0, 2)),\n texts=np.empty((0,)),\n offset_transform=\"relative\",\n rotation=np.pi / 2,\n horizontalalignment=\"left\",\n verticalalignment=\"bottom\",\n facecolor=\"black\",\n shift=0.2,\n )\n for key in [\"lines\", \"texts\"]:\n self.add_marker(self._edge_markers[key], render_figure=False)\n\n def _plot_edge_labels(self, edges):\n \"\"\"\n Plot the EELS edge label (vertical line segment and text box) on\n the signal\n\n Parameters\n ----------\n edges : dictionary\n A dictionary with the labels as keys and their energies as values.\n For example, {'Fe_L2': 721.0, 'O_K': 532.0}\n\n \"\"\"\n # the object is needed to connect replot method when axes_manager\n # indices changed\n _ = EdgesRange(self, interactive=False)\n self._add_edge_labels(edges)\n\n def _get_edges_to_plot(self, plot_edges, only_edges):\n # get the dictionary of the edge to be shown\n extra_element_edge_family = []\n if plot_edges is True:\n try:\n elements = self.metadata.Sample.elements\n except AttributeError:\n raise ValueError(\n \"No elements defined. Add them with \"\n \"s.add_elements, or specify elements, edge \"\n \"families or edges directly\"\n )\n else:\n extra_element_edge_family.extend(np.atleast_1d(plot_edges))\n try:\n elements = self.metadata.Sample.elements\n except:\n elements = []\n\n element_edge_family = elements + extra_element_edge_family\n edges_dict = self._get_edges(element_edge_family, only_edges)\n\n return edges_dict\n\n def _get_edges(self, element_edge_family, only_edges):\n # get corresponding information depending on whether it is an element\n # a particular edge or a family of edge\n axis_min = self.axes_manager[-1].low_value\n axis_max = self.axes_manager[-1].high_value\n\n names_and_energies = {}\n shells = [\"K\", \"L\", \"M\", \"N\", \"O\"]\n\n errmsg = \"Edge family '{}' is not supported. Supported edge family \" \"is {}.\"\n for member in element_edge_family:\n try:\n element, ss = member.split(\"_\")\n\n if len(ss) == 1:\n memtype = \"family\"\n if ss not in shells:\n raise AttributeError(errmsg.format(ss, shells))\n if len(ss) == 2:\n memtype = \"edge\"\n if ss[0] not in shells:\n raise AttributeError(errmsg.format(ss[0], shells))\n except ValueError:\n element = member\n ss = \"\"\n memtype = \"element\"\n\n try:\n Binding_energies = elements_db[element][\"Atomic_properties\"][\n \"Binding_energies\"\n ]\n except KeyError as err:\n raise ValueError(\"'{}' is not a valid element\".format(element)) from err\n\n for edge in Binding_energies.keys():\n relevance = Binding_energies[edge][\"relevance\"]\n energy = Binding_energies[edge][\"onset_energy (eV)\"]\n\n isInRel = relevance in only_edges\n isInRng = axis_min < energy < axis_max\n isSameFamily = ss in edge\n\n if memtype == \"element\":\n flag = isInRel & isInRng\n edge_key = element + \"_\" + edge\n elif memtype == \"edge\":\n flag = isInRng & (edge == ss)\n edge_key = member\n elif memtype == \"family\":\n flag = isInRel & isInRng & isSameFamily\n edge_key = element + \"_\" + edge\n\n if flag:\n names_and_energies[edge_key] = energy\n\n return names_and_energies\n\n def _remove_edge_labels(self, edge_names=None, render_figure=True):\n \"\"\"\n Remove EELS edges markers to the signal\n\n Parameters\n ----------\n edge_names : str, list of str or None\n The string must be the name of edges, e. g. 'Fe_L2'.\n If ``None`` (default), remove all edges.\n render_figure : bool\n If True, render the figure after adding the markers\n \"\"\"\n if edge_names is None:\n edge_names = self._edge_markers[\"names\"]\n if isinstance(edge_names, set):\n # convert to list to find the index\n edge_names = list(edge_names)\n if not isinstance(edge_names, (list, tuple, np.ndarray)):\n edge_names = [edge_names]\n\n ind = np.where(np.isin(self._edge_markers[\"names\"], edge_names))\n\n if self._edge_markers[\"lines\"] is not None:\n self._edge_markers[\"lines\"].remove_items(ind)\n if self._edge_markers[\"texts\"] is not None:\n self._edge_markers[\"texts\"].remove_items(ind)\n if self._edge_markers[\"names\"] is not []:\n self._edge_markers[\"names\"] = np.delete(self._edge_markers[\"names\"], ind)\n\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _add_edge_labels(self, edges, render_figure=True):\n \"\"\"\n Add EELS edges markers to the signal\n\n Parameters\n ----------\n edge_name : dictionary or set\n If dictionary must be the name of edge as key and energy as values,\n e.g. {'Cr_L2': 584.0}. If list or set, must the name of the edge,\n e.g. set('Cr_L2', )\n render_figure : bool\n If True, render the figure after adding the markers\n \"\"\"\n if isinstance(edges, set):\n edges_dict = {}\n for edge in edges:\n element, ss = edge.split(\"_\")\n Binding_energies = elements_db[element][\"Atomic_properties\"][\n \"Binding_energies\"\n ]\n edges_dict[edge] = Binding_energies[ss][\"onset_energy (eV)\"]\n edges = edges_dict\n\n offsets, segments = self._get_offsets_and_segments(edges)\n names = list(edges.keys())\n\n self._edge_markers[\"lines\"].add_items(segments=segments)\n self._edge_markers[\"lines\"].update()\n self._edge_markers[\"texts\"].add_items(offsets=offsets, texts=names)\n self._edge_markers[\"lines\"].update()\n self._edge_markers[\"names\"] = np.append(self._edge_markers[\"names\"], names)\n\n if render_figure:\n self._render_figure(plot=[\"signal_plot\"])\n\n def _get_complementary_edges(self, edges, only_major=False):\n \"\"\"\n Get other edges of the same element present within the energy\n range of the axis\n\n Parameters\n ----------\n edges : iterable\n A sequence of strings contains edges in the format of\n element_subshell for EELS. For example, ['Fe_L2', 'O_K']\n only_major : bool\n Whether to show only the major edges. The default is False.\n\n Returns\n -------\n complmt_edges : list\n A list containing all the complementary edges of the same element\n present within the energy range of the axis\n \"\"\"\n\n emin = self.axes_manager[-1].low_value\n emax = self.axes_manager[-1].high_value\n complmt_edges = []\n\n elements = set()\n for edge in edges:\n element, _ = edge.split(\"_\")\n elements.update([element])\n\n for element in elements:\n ss_info = elements_db[element][\"Atomic_properties\"][\"Binding_energies\"]\n\n for subshell in ss_info:\n sse = ss_info[subshell][\"onset_energy (eV)\"]\n ssr = ss_info[subshell][\"relevance\"]\n\n if only_major:\n if ssr != \"Major\":\n continue\n\n edge = element + \"_\" + subshell\n if (\n (emin <= sse <= emax)\n and (subshell[-1] != \"a\")\n and (edge not in edges)\n ):\n complmt_edges.append(edge)\n\n return complmt_edges\n\n def rebin(self, new_shape=None, scale=None, crop=True, dtype=None, out=None):\n factors = self._validate_rebin_args_and_get_factors(\n new_shape=new_shape, scale=scale\n )\n m = super().rebin(\n new_shape=new_shape, scale=scale, crop=crop, dtype=dtype, out=out\n )\n m = out or m\n time_factor = np.prod(\n [factors[axis.index_in_array] for axis in m.axes_manager.navigation_axes]\n )\n mdeels = m.metadata\n m.get_dimensions_from_data()\n if m.metadata.get_item(\"Acquisition_instrument.TEM.Detector.EELS\"):\n mdeels = m.metadata.Acquisition_instrument.TEM.Detector.EELS\n if \"dwell_time\" in mdeels:\n mdeels.dwell_time *= time_factor\n if \"exposure\" in mdeels:\n mdeels.exposure *= time_factor\n else:\n _logger.info(\n \"No dwell_time could be found in the metadata so \"\n \"this has not been updated.\"\n )\n if out is None:\n return m\n else:\n out.events.data_changed.trigger(obj=out)\n return m\n\n rebin.__doc__ = hyperspy.signal.BaseSignal.rebin.__doc__\n\n def vacuum_mask(\n self, threshold=10.0, start_energy=None, closing=True, opening=False\n ):\n \"\"\"\n Generate mask of the vacuum region\n\n Parameters\n ----------\n threshold: float\n For a given navigation coordinate, mean value in the energy axis\n below which the pixel is considered as vacuum.\n start_energy: float, None\n Minimum energy included in the calculation of the mean intensity.\n If None, consider only the last quarter of the spectrum to\n calculate the mask.\n closing: bool\n If True, a morphological closing is applied to the mask.\n opening: bool\n If True, a morphological opening is applied to the mask.\n\n Returns\n -------\n mask: signal\n The mask of the region.\n \"\"\"\n if self.axes_manager.navigation_dimension == 0:\n raise RuntimeError(\n \"Navigation dimenstion must be higher than 0 \"\n \"to estimate a vacuum mask.\"\n )\n signal_axis = self.axes_manager.signal_axes[0]\n if start_energy is None:\n start_energy = 0.75 * signal_axis.high_value\n\n mask = self.isig[start_energy:].mean(-1) <= threshold\n\n from scipy.ndimage import binary_dilation, binary_erosion\n\n if closing:\n mask.data = binary_dilation(mask.data, border_value=0)\n mask.data = binary_erosion(mask.data, border_value=1)\n if opening:\n mask.data = binary_erosion(mask.data, border_value=1)\n mask.data = binary_dilation(mask.data, border_value=0)\n return mask" } ]
import contextlib import io import numpy as np import pooch import pytest import hyperspy.api as hs from unittest import mock from exspy.misc.elements import elements_db as elements from hyperspy.decorators import lazifyTestClass from exspy.misc.eels.gosh_gos import _GOSH_URL, _GOSH_KNOWN_HASH from exspy.signals import EELSSpectrum from exspy.models.eelsmodel import EELSModel from hyperspy.components1d import PowerLaw from hyperspy.components1d import PowerLaw
19,076
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. # Dask does not always work nicely with np.errstate, # see: https://github.com/dask/dask/issues/3245, so # filter out divide-by-zero warnings that only appear # when the test is lazy. When the test is not lazy, # internal use of np.errstate means the warnings never # appear in the first place. @pytest.mark.filterwarnings( "ignore:invalid value encountered in subtract:RuntimeWarning" ) @pytest.mark.filterwarnings("ignore:divide by zero encountered in log:RuntimeWarning") @lazifyTestClass class TestCreateEELSModel: def setup_method(self, method): s = EELSSpectrum(np.zeros(200)) s.set_microscope_parameters(100, 10, 10) s.axes_manager[-1].offset = 150 s.add_elements(("B", "C")) self.s = s def test_create_eelsmodel(self): assert isinstance(self.s.create_model(), EELSModel) def test_create_eelsmodel_no_md(self): s = self.s del s.metadata.Acquisition_instrument with pytest.raises(ValueError): s.create_model() def test_auto_add_edges_true(self): m = self.s.create_model(auto_add_edges=True) cnames = [component.name for component in m] assert "B_K" in cnames and "C_K" in cnames def test_gos_hydrogenic(self): m = self.s.create_model(auto_add_edges=True, GOS="hydrogenic") assert m["B_K"].GOS._name == "hydrogenic" m.fit() def test_gos_gosh(self): m = self.s.create_model(auto_add_edges=True, GOS="gosh") assert m["B_K"].GOS._name == "gosh" m.fit() with pytest.raises(ValueError): self.s.create_model(auto_add_edges=True, GOS="not_a_GOS") def test_gos_file(self): gos_file_path = pooch.retrieve( url=_GOSH_URL,
# -*- coding: utf-8 -*- # Copyright 2007-2023 The exSpy developers # # This file is part of exSpy. # # exSpy is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # exSpy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with exSpy. If not, see <https://www.gnu.org/licenses/#GPL>. # Dask does not always work nicely with np.errstate, # see: https://github.com/dask/dask/issues/3245, so # filter out divide-by-zero warnings that only appear # when the test is lazy. When the test is not lazy, # internal use of np.errstate means the warnings never # appear in the first place. @pytest.mark.filterwarnings( "ignore:invalid value encountered in subtract:RuntimeWarning" ) @pytest.mark.filterwarnings("ignore:divide by zero encountered in log:RuntimeWarning") @lazifyTestClass class TestCreateEELSModel: def setup_method(self, method): s = EELSSpectrum(np.zeros(200)) s.set_microscope_parameters(100, 10, 10) s.axes_manager[-1].offset = 150 s.add_elements(("B", "C")) self.s = s def test_create_eelsmodel(self): assert isinstance(self.s.create_model(), EELSModel) def test_create_eelsmodel_no_md(self): s = self.s del s.metadata.Acquisition_instrument with pytest.raises(ValueError): s.create_model() def test_auto_add_edges_true(self): m = self.s.create_model(auto_add_edges=True) cnames = [component.name for component in m] assert "B_K" in cnames and "C_K" in cnames def test_gos_hydrogenic(self): m = self.s.create_model(auto_add_edges=True, GOS="hydrogenic") assert m["B_K"].GOS._name == "hydrogenic" m.fit() def test_gos_gosh(self): m = self.s.create_model(auto_add_edges=True, GOS="gosh") assert m["B_K"].GOS._name == "gosh" m.fit() with pytest.raises(ValueError): self.s.create_model(auto_add_edges=True, GOS="not_a_GOS") def test_gos_file(self): gos_file_path = pooch.retrieve( url=_GOSH_URL,
known_hash=_GOSH_KNOWN_HASH,
2
2023-10-28 20:04:10+00:00
24k
Elfenreigen/UniChest
train.py
[ { "identifier": "utils", "path": "factory/utils.py", "snippet": "class SmoothedValue(object):\nclass MetricLogger(object):\nclass AttrDict(dict):\n def __init__(self, window_size=20, fmt=None):\n def update(self, value, n=1):\n def synchronize_between_processes(self):\n def median(self):\n def avg(self):\n def global_avg(self):\n def max(self):\n def value(self):\n def __str__(self):\n def __init__(self, delimiter=\"\\t\"):\n def update(self, **kwargs):\n def __getattr__(self, attr):\n def __str__(self):\n def global_avg(self):\n def synchronize_between_processes(self):\n def add_meter(self, name, meter):\n def log_every(self, iterable, print_freq, header=None):\n def __init__(self, *args, **kwargs):\ndef compute_acc(logits, label, reduction='mean'):\ndef compute_n_params(model, return_str=True):\ndef setup_for_distributed(is_master):\n def print(*args, **kwargs):\ndef seed_worker(worker_id):\ndef is_dist_avail_and_initialized():\ndef get_world_size():\ndef get_rank():\ndef is_main_process():\ndef save_on_master(*args, **kwargs):\ndef init_distributed_mode(args):\n MB = 1024.0 * 1024.0" }, { "identifier": "create_scheduler", "path": "scheduler/scheduler_factory.py", "snippet": "def create_scheduler(args, optimizer):\n num_epochs = args.epochs\n\n if getattr(args, 'lr_noise', None) is not None:\n lr_noise = getattr(args, 'lr_noise')\n if isinstance(lr_noise, (list, tuple)):\n noise_range = [n * num_epochs for n in lr_noise]\n if len(noise_range) == 1:\n noise_range = noise_range[0]\n else:\n noise_range = lr_noise * num_epochs\n else:\n noise_range = None\n\n lr_scheduler = None\n if args.sched == 'cosine':\n lr_scheduler = CosineLRScheduler(\n optimizer,\n t_initial=num_epochs,\n t_mul=getattr(args, 'lr_cycle_mul', 1.),\n lr_min=args.min_lr,\n decay_rate=args.decay_rate,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n cycle_limit=getattr(args, 'lr_cycle_limit', 1),\n t_in_epochs=True,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs\n elif args.sched == 'tanh':\n lr_scheduler = TanhLRScheduler(\n optimizer,\n t_initial=num_epochs,\n t_mul=getattr(args, 'lr_cycle_mul', 1.),\n lr_min=args.min_lr,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n cycle_limit=getattr(args, 'lr_cycle_limit', 1),\n t_in_epochs=True,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n num_epochs = lr_scheduler.get_cycle_length() + args.cooldown_epochs\n elif args.sched == 'step':\n lr_scheduler = StepLRScheduler(\n optimizer,\n decay_t=args.decay_epochs,\n decay_rate=args.decay_rate,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n elif args.sched == 'plateau':\n mode = 'min' if 'loss' in getattr(args, 'eval_metric', '') else 'max'\n lr_scheduler = PlateauLRScheduler(\n optimizer,\n decay_rate=args.decay_rate,\n patience_t=args.patience_epochs,\n lr_min=args.min_lr,\n mode=mode,\n warmup_lr_init=args.warmup_lr,\n warmup_t=args.warmup_epochs,\n cooldown_t=0,\n noise_range_t=noise_range,\n noise_pct=getattr(args, 'lr_noise_pct', 0.67),\n noise_std=getattr(args, 'lr_noise_std', 1.),\n noise_seed=getattr(args, 'seed', 42),\n )\n\n return lr_scheduler, num_epochs" }, { "identifier": "create_optimizer", "path": "optim/optim_factory.py", "snippet": "def create_optimizer(args, model, image_encoder,text_encoder, filter_bias_and_bn=True):\n opt_lower = args.opt.lower()\n weight_decay = args.weight_decay\n if weight_decay and filter_bias_and_bn:\n skip = {}\n if hasattr(model, 'no_weight_decay'):\n skip = model.no_weight_decay()\n parameters = add_weight_decay(model,image_encoder,text_encoder, weight_decay, skip)\n weight_decay = 0.\n else:\n parameters = [filter(lambda p: p.requires_grad, model.parameters()),filter(lambda p: p.requires_grad, image_encoder.parameters()),filter(lambda p: p.requires_grad, text_encoder.parameters())]\n #model.parameters()\n\n # print(parameters)\n if 'fused' in opt_lower:\n assert has_apex and torch.cuda.is_available(), 'APEX and CUDA required for fused optimizers'\n\n opt_args = dict(lr=args.lr, weight_decay=weight_decay)\n if hasattr(args, 'opt_eps') and args.opt_eps is not None:\n opt_args['eps'] = args.opt_eps\n if hasattr(args, 'opt_betas') and args.opt_betas is not None:\n opt_args['betas'] = args.opt_betas\n if hasattr(args, 'opt_args') and args.opt_args is not None:\n opt_args.update(args.opt_args)\n\n opt_split = opt_lower.split('_')\n opt_lower = opt_split[-1]\n if opt_lower == 'sgd' or opt_lower == 'nesterov':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'momentum':\n opt_args.pop('eps', None)\n optimizer = optim.SGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'adam':\n optimizer = optim.Adam(parameters, **opt_args)\n elif opt_lower == 'adamw':\n optimizer = optim.AdamW(parameters, **opt_args)\n elif opt_lower == 'nadam':\n optimizer = Nadam(parameters, **opt_args)\n elif opt_lower == 'radam':\n optimizer = RAdam(parameters, **opt_args)\n elif opt_lower == 'adamp': \n optimizer = AdamP(parameters, wd_ratio=0.01, nesterov=True, **opt_args)\n elif opt_lower == 'sgdp': \n optimizer = SGDP(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'adadelta':\n optimizer = optim.Adadelta(parameters, **opt_args)\n elif opt_lower == 'adafactor':\n if not args.lr:\n opt_args['lr'] = None\n optimizer = Adafactor(parameters, **opt_args)\n elif opt_lower == 'adahessian':\n optimizer = Adahessian(parameters, **opt_args)\n elif opt_lower == 'rmsprop':\n optimizer = optim.RMSprop(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n elif opt_lower == 'rmsproptf':\n optimizer = RMSpropTF(parameters, alpha=0.9, momentum=args.momentum, **opt_args)\n elif opt_lower == 'novograd':\n optimizer = NovoGrad(parameters, **opt_args)\n elif opt_lower == 'nvnovograd':\n optimizer = NvNovoGrad(parameters, **opt_args)\n elif opt_lower == 'fusedsgd':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=True, **opt_args)\n elif opt_lower == 'fusedmomentum':\n opt_args.pop('eps', None)\n optimizer = FusedSGD(parameters, momentum=args.momentum, nesterov=False, **opt_args)\n elif opt_lower == 'fusedadam':\n optimizer = FusedAdam(parameters, adam_w_mode=False, **opt_args)\n elif opt_lower == 'fusedadamw':\n optimizer = FusedAdam(parameters, adam_w_mode=True, **opt_args)\n elif opt_lower == 'fusedlamb':\n optimizer = FusedLAMB(parameters, **opt_args)\n elif opt_lower == 'fusednovograd':\n opt_args.setdefault('betas', (0.95, 0.98))\n optimizer = FusedNovoGrad(parameters, **opt_args)\n else:\n assert False and \"Invalid optimizer\"\n raise ValueError\n\n if len(opt_split) > 1:\n if opt_split[0] == 'lookahead':\n optimizer = Lookahead(optimizer)\n\n return optimizer" }, { "identifier": "train", "path": "engine/train.py", "snippet": "def train(model, image_encoder, text_encoder, tokenizer, data_loader, optimizer, epoch, warmup_steps, device, scheduler, args, config, writer):\n clip_loss = ClipLoss()\n ce_loss = nn.CrossEntropyLoss(ignore_index=-1)\n \n if args.add_dataset:\n ASL_loss = AsymmetricLossAdd(gamma_neg=6, gamma_pos=0, clip=0.05, disable_torch_grad_focal_loss=True)\n else:\n ASL_loss = AsymmetricLoss(gamma_neg=6, gamma_pos=0, clip=0.05, disable_torch_grad_focal_loss=True)\n\n loss_m = AverageMeter()\n loss_clip_m = AverageMeter()\n loss_ce_m = AverageMeter()\n loss_ce_image_m = AverageMeter()\n loss_ce_text_m = AverageMeter()\n batch_time_m = AverageMeter()\n data_time_m = AverageMeter()\n end = time.time()\n\n model.train() \n image_encoder.train() \n text_encoder.train()\n metric_logger = utils.MetricLogger(delimiter=\" \")\n metric_logger.add_meter('lr', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss_ce', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss_ce_image', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n if args.use_entity_features:\n metric_logger.add_meter('loss_ce_text', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.add_meter('loss_clip', utils.SmoothedValue(window_size=50, fmt='{value:.6f}'))\n metric_logger.update(loss=1.0)\n metric_logger.update(lr = scheduler._get_lr(epoch)[0])\n\n header = 'Train Epoch: [{}]'.format(epoch)\n print_freq = 50 \n step_size = 100\n warmup_iterations = warmup_steps*step_size \n scalar_step = epoch*len(data_loader)\n num_batches_per_epoch = data_loader.num_batches\n sample_digits = math.ceil(math.log(data_loader.num_samples + 1, 10))\n\n for i, sample in enumerate(metric_logger.log_every(data_loader, print_freq, header)):\n if args.fourier:\n image = fourier_aug(sample['image'].to(device))\n else:\n image = sample['image'].to(device) \n label = sample['label'].long().to(device)\n\n if args.ignore_index:\n pass\n else:\n label[label==-1]=0\n entity = sample['entity']\n\n if args.add_dataset:\n dataset_label = sample['label_dataset']\n\n data_time_m.update(time.time() - end)\n\n optimizer.zero_grad()\n\n if args.add_dataset:\n text_list = ['normal', 'pleural effusion', 'opacity', 'pneumothorax', 'edema', 'atelectasis', 'tube', 'consolidation','enlarged cardiomediastinum','tip', 'pneumonia','line','cardiomegaly', 'fracture','calcification',\n 'device','engorgement', 'nodule', 'wire', 'pacemaker', 'pleural thicken', 'marking', 'scar', 'hyperinflate', 'blunt', 'collapse', 'emphysema', 'aerate', 'mass','infiltration', 'obscure', 'deformity', 'hernia',\n 'drainage', 'distention', 'shift', 'stent', 'lesion', 'hardware', 'dilation', 'aspiration',\n 'fibrosis',\t'No Finding', 'Pleural Other', 'Support Devices', 'Aortic enlargement',\n 'Clavicle fracture', 'Enlarged PA', 'ILD', 'Lung cavity', 'Lung cyst', 'Mediastinal shift',\t\n 'Nodule/Mass', 'Pulmonary fibrosis', 'Rib fracture', 'Other lesion', 'COPD', 'Lung tumor', 'Tuberculosis',\n 'Other diseases']\n\n else:\n\n text_list = ['normal', 'pleural effusion', 'opacity', 'pneumothorax', 'edema', 'atelectasis', 'tube', 'consolidation','enlarged cardiomediastinum','tip', 'pneumonia','line','cardiomegaly', 'fracture','calcification',\n 'device','engorgement', 'nodule', 'wire', 'pacemaker', 'pleural thicken', 'marking', 'scar', 'hyperinflate', 'blunt', 'collapse', 'emphysema', 'aerate', 'mass','infiltration', 'obscure', 'deformity', 'hernia',\n 'drainage', 'distention', 'shift', 'stent', 'lesion', 'hardware', 'dilation', 'aspiration']\n \n \n text_features = get_text_features(text_encoder,text_list,tokenizer,device,max_length=args.max_length)\n entity_features = get_text_features(text_encoder,entity,tokenizer,device,max_length=args.max_length)\n\n image_features,image_features_pool = image_encoder(image)\n if args.add_dataset:\n pred_class_image, moe_img = model(image_features,text_features,args)\n else:\n pred_class_image = model(image_features,text_features)\n\n\n if args.bce or args.asl:\n label = label.float()\n\n label_mask = (label != -1).squeeze()\n\n\n\n if args.add_dataset:\n loss_moe_img = moe_cl_loss(moe_img, dataset_label)\n\n if args.asl:\n pred_class_image = pred_class_image[label_mask]\n label_image = label[label_mask] \n loss_ce_image = ASL_loss(pred_class_image.view(-1,1),label_image.view(-1,1))\n elif args.bce:\n pred_class_image = pred_class_image[label_mask]\n label_image = label[label_mask] \n loss_ce_image = F.binary_cross_entropy(pred_class_image.view(-1,1),label_image.view(-1,1))\n else:\n if args.asl:\n loss_ce_image = ASL_loss(pred_class_image.view(-1,1),label.view(-1,1))\n elif args.bce:\n loss_ce_image = F.binary_cross_entropy_with_logits(pred_class_image.view(-1,1),label.view(-1,1)) \n else:\n loss_ce_image = ce_loss(pred_class_image.view(-1,2),label.view(-1)) \n\n if args.use_entity_features:\n if args.add_dataset:\n pred_class_text, moe_txt = model(entity_features.unsqueeze(1),text_features,args)\n loss_moe_txt = moe_cl_loss(moe_txt, dataset_label)\n else:\n pred_class_text = model(entity_features.unsqueeze(1),text_features)\n\n if args.add_dataset:\n if args.asl:\n pred_class_text = pred_class_text[label_mask]\n label_text = label[label_mask] \n loss_ce_text = ASL_loss(pred_class_text.view(-1,1),label_text.view(-1,1))\n \n elif args.bce:\n pred_class_text = pred_class_text[label_mask]\n label_text = label[label_mask] \n loss_ce_text = F.binary_cross_entropy(pred_class_text.view(-1,1),label_text.view(-1,1))\n\n else:\n if args.asl:\n loss_ce_text = ASL_loss(pred_class_text.view(-1,1),label.view(-1,1))\n elif args.bce:\n loss_ce_text = F.binary_cross_entropy_with_logits(pred_class_text.view(-1,1),label.view(-1,1)) \n else:\n loss_ce_text = ce_loss(pred_class_text.view(-1,2),label.view(-1))\n\n loss_ce = loss_ce_image + loss_ce_text\n if args.add_dataset:\n loss_moe = loss_moe_img + loss_moe_txt\n\n else:\n loss_ce = loss_ce_image\n if args.add_dataset:\n loss_moe = loss_moe_img\n\n\n loss_clip = clip_loss(image_features_pool,entity_features)\n if args.add_dataset:\n loss = loss_ce + loss_clip * args.loss_ratio + args.moe_ratio * loss_moe\n else:\n loss = loss_ce + loss_clip * args.loss_ratio\n \n\n loss.backward()\n optimizer.step() \n \n writer.add_scalar('loss/loss', loss, scalar_step)\n writer.add_scalar('loss/loss_ce', loss_ce, scalar_step)\n writer.add_scalar('loss/loss_ce_image', loss_ce_image, scalar_step)\n if args.use_entity_features:\n writer.add_scalar('loss/loss_ce_text', loss_ce_text, scalar_step)\n writer.add_scalar('loss/loss_clip', loss_clip, scalar_step)\n scalar_step += 1\n\n metric_logger.update(loss=loss.item())\n metric_logger.update(loss_ce=loss_ce.item())\n metric_logger.update(loss_ce_image=loss_ce_image.item())\n if args.use_entity_features:\n metric_logger.update(loss_ce_text=loss_ce_text.item())\n metric_logger.update(loss_clip=loss_clip.item())\n\n\n if epoch==0 and i%step_size==0 and i<=warmup_iterations: \n scheduler.step(i//step_size) \n metric_logger.update(lr = scheduler._get_lr(epoch)[0])\n\n batch_time_m.update(time.time() - end)\n end = time.time()\n batch_count = i + 1\n if i % 100 == 0:\n batch_size = len(image)\n num_samples = batch_count * batch_size\n samples_per_epoch = data_loader.num_samples\n percent_complete = 100.0 * batch_count / num_batches_per_epoch\n\n # NOTE loss is coarsely sampled, just master node and per log update\n loss_m.update(loss.item(), batch_size)\n loss_clip_m.update(loss_clip.item(), batch_size)\n loss_ce_m.update(loss_ce.item(), batch_size)\n loss_ce_image_m.update(loss_ce_image.item(), batch_size)\n if args.use_entity_features:\n loss_ce_text_m.update(loss_ce_text.item(), batch_size)\n logging.info(\n f\"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] \"\n f\"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) \"\n f\"Loss_clip: {loss_clip_m.val:#.5g} ({loss_clip_m.avg:#.4g}) \"\n f\"Loss_ce: {loss_ce_m.val:#.5g} ({loss_ce_m.avg:#.4g}) \"\n f\"Loss_ce_image: {loss_ce_image_m.val:#.5g} ({loss_ce_image_m.avg:#.4g}) \"\n f\"Loss_ce_text: {loss_ce_text_m.val:#.5g} ({loss_ce_text_m.avg:#.4g}) \"\n f\"Data (t): {data_time_m.avg:.3f} \"\n f\"Batch (t): {batch_time_m.avg:.3f}, {batch_size/ batch_time_m.val:#g}/s \"\n f\"LR: { scheduler._get_lr(epoch)[0]:5f} \"\n )\n else:\n logging.info(\n f\"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] \"\n f\"Loss: {loss_m.val:#.5g} ({loss_m.avg:#.4g}) \"\n f\"Loss_clip: {loss_clip_m.val:#.5g} ({loss_clip_m.avg:#.4g}) \"\n f\"Loss_ce: {loss_ce_m.val:#.5g} ({loss_ce_m.avg:#.4g}) \"\n f\"Loss_ce_image: {loss_ce_image_m.val:#.5g} ({loss_ce_image_m.avg:#.4g}) \"\n f\"Data (t): {data_time_m.avg:.3f} \"\n f\"Batch (t): {batch_time_m.avg:.3f}, {batch_size/ batch_time_m.val:#g}/s \"\n f\"LR: { scheduler._get_lr(epoch)[0]:5f} \"\n )\n\n # gather the stats from all processes\n metric_logger.synchronize_between_processes()\n print(\"Averaged stats:\", metric_logger.global_avg()) \n return {k: \"{:.6f}\".format(meter.global_avg) for k, meter in metric_logger.meters.items()} #,loss_epoch.mean()" }, { "identifier": "valid_on_cheXpert", "path": "engine/train.py", "snippet": "def valid_on_cheXpert(model,image_encoder,text_encoder,tokenizer,data_loader, epoch, device, args, config, writer):\n criterion = nn.CrossEntropyLoss()\n model.eval()\n image_encoder.eval()\n text_encoder.eval()\n text_list = ['atelectasis', 'cardiomegaly', 'consolidation', 'edema', 'pleural effusion']\n text_features = get_text_features(text_encoder,text_list,tokenizer,device,max_length=args.max_length)\n \n val_scalar_step = epoch*len(data_loader)\n val_losses = []\n\n # initialize the ground truth and output tensor\n gt = torch.FloatTensor()\n gt = gt.cuda()\n pred = torch.FloatTensor()\n pred = pred.cuda()\n\n for i, sample in enumerate(data_loader):\n image = sample['image'].to(device,non_blocking=True) \n label = sample['label'].long().to(device)\n if args.bce or args.asl:\n label = label.float()\n\n gt = torch.cat((gt, label), 0)\n with torch.no_grad():\n image_features,image_features_pool = image_encoder(image)\n \n # \n if args.add_dataset:\n pred_class,_ = model(image_features,text_features,args)#b,14,2/1\n val_loss = F.binary_cross_entropy(pred_class.view(-1,1),label.view(-1, 1))\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n pred_class = model(image_features,text_features)#b,14,2/1\n if args.bce or args.asl:\n val_loss = F.binary_cross_entropy_with_logits(pred_class.view(-1,1),label.view(-1, 1))\n pred_class = torch.sigmoid(pred_class)\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n val_loss = criterion(pred_class.view(-1,2),label.view(-1))\n pred_class = torch.softmax(pred_class, dim=-1)\n pred = torch.cat((pred, pred_class[:,:,1]), 0)\n \n val_losses.append(val_loss.item())\n writer.add_scalar('val_loss/loss', val_loss, val_scalar_step)\n val_scalar_step += 1\n metrics = compute_AUCs(gt, pred, n_class=5)\n AUROC_avg = metrics['mean_auc']\n avg_val_loss = np.array(val_losses).mean()\n return avg_val_loss,AUROC_avg,metrics" }, { "identifier": "valid_on_chestxray14", "path": "engine/train.py", "snippet": "def valid_on_chestxray14(model, image_encoder, text_encoder, tokenizer, data_loader, epoch, device, args, config, writer):\n criterion = nn.CrossEntropyLoss()\n model.eval()\n image_encoder.eval()\n text_encoder.eval()\n text_list = [\"atelectasis\",\"cardiomegaly\",\"pleural effusion\",\"infiltration\",\"lung mass\",\"lung nodule\",\"pneumonia\",\"pneumothorax\",\"consolidation\",\"edema\",\"emphysema\",\"fibrosis\",\"pleural thicken\",\"hernia\"]\n text_features = get_text_features(text_encoder,text_list,tokenizer,device,max_length=args.max_length)\n \n val_scalar_step = epoch*len(data_loader)\n val_losses = []\n\n gt = torch.FloatTensor()\n gt = gt.cuda()\n pred = torch.FloatTensor()\n pred = pred.cuda()\n\n for i, sample in enumerate(data_loader):\n image = sample['image'].to(device,non_blocking=True) \n label = sample['label'].long().to(device)\n if args.bce or args.asl:\n label = label.float()\n\n gt = torch.cat((gt, label), 0)\n with torch.no_grad():\n image_features,image_features_pool = image_encoder(image)\n\n if args.add_dataset:\n pred_class,_ = model(image_features,text_features,args)#b,14,2/1\n val_loss = F.binary_cross_entropy(pred_class.view(-1,1),label.view(-1, 1))\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n pred_class = model(image_features,text_features)#b,14,2/1\n if args.bce or args.asl:\n val_loss = F.binary_cross_entropy_with_logits(pred_class.view(-1,1),label.view(-1, 1))\n pred_class = torch.sigmoid(pred_class)\n pred = torch.cat((pred, pred_class[:,:,0]), 0)\n else:\n val_loss = criterion(pred_class.view(-1,2),label.view(-1))\n pred_class = torch.softmax(pred_class, dim=-1)\n pred = torch.cat((pred, pred_class[:,:,1]), 0)\n\n\n\n val_losses.append(val_loss.item())\n writer.add_scalar('val_loss/loss', val_loss, val_scalar_step)\n val_scalar_step += 1\n metrics = compute_AUCs(gt, pred, n_class = 14)\n AUROC_avg = metrics['mean_auc']\n avg_val_loss = np.array(val_losses).mean()\n return avg_val_loss,AUROC_avg,metrics" }, { "identifier": "CLP_clinical", "path": "models/clip_tqn.py", "snippet": "class CLP_clinical(nn.Module):\n def __init__(self,\n bert_model_name: str,\n embed_dim: int = 768,\n freeze_layers:Union[Tuple[int, int], int] = None):\n super().__init__()\n self.bert_model = self._get_bert_basemodel(bert_model_name=bert_model_name, freeze_layers=freeze_layers)\n self.mlp_embed = nn.Sequential(\n nn.Linear(embed_dim, embed_dim),\n nn.GELU(),\n nn.Linear(embed_dim, embed_dim)\n )\n self.embed_dim = embed_dim\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n self.init_parameters()\n \n def init_parameters(self):\n nn.init.constant_(self.logit_scale, np.log(1 / 0.07))\n for m in self.mlp_embed:\n if isinstance(m, nn.Linear):\n nn.init.normal_(m.weight, std=self.embed_dim ** -0.5)\n\n def _get_bert_basemodel(self, bert_model_name, freeze_layers=None):#12\n try:\n print(bert_model_name)\n config = BertConfig.from_pretrained(bert_model_name, output_hidden_states=True)#bert-base-uncased\n model = AutoModel.from_pretrained(bert_model_name, config=config)#, return_dict=True)\n print(\"Text feature extractor:\", bert_model_name)\n print(\"bert encoder layers:\",len(model.encoder.layer))\n except:\n raise (\"Invalid model name. Check the config file and pass a BERT model from transformers lybrary\")\n\n if freeze_layers is not None:\n for layer_idx in freeze_layers:\n for param in list(model.encoder.layer[layer_idx].parameters()):\n param.requires_grad = False\n return model\n\n def encode_text(self, text):\n #input batch_size,token, return batch_size,dim \n output = self.bert_model(input_ids = text['input_ids'],attention_mask = text['attention_mask'] )\n last_hidden_state, pooler_output, hidden_states = output[0],output[1],output[2]\n encode_out = self.mlp_embed(pooler_output)\n # encode_out = pooler_output\n return encode_out\n \n def forward(self,text1,text2):\n text1_features = self.encode_text(text1)\n text2_features = self.encode_text(text2)\n text1_features = F.normalize(text1_features, dim=-1)\n text2_features = F.normalize(text2_features, dim=-1)\n return text1_features, text2_features, self.logit_scale.exp()" }, { "identifier": "ModelRes", "path": "models/clip_tqn.py", "snippet": "class ModelRes(nn.Module):\n def __init__(self, res_base_model):\n super(ModelRes, self).__init__()\n self.resnet_dict = {\"resnet50\": models.resnet50(pretrained=True)}\n self.resnet = self._get_res_basemodel(res_base_model)\n\n num_ftrs = int(self.resnet.fc.in_features)\n self.res_features = nn.Sequential(*list(self.resnet.children())[:-2])\n\n self.res_l1 = nn.Linear(num_ftrs, num_ftrs)\n self.res_l2 = nn.Linear(num_ftrs, 768)\n\n def _get_res_basemodel(self, res_model_name):\n try:\n res_model = self.resnet_dict[res_model_name]\n print(\"Image feature extractor:\", res_model_name)\n return res_model\n except:\n raise (\"Invalid model name. Check the config file and pass one of: resnet18 or resnet50\")\n\n def forward(self, img):\n batch_size = img.shape[0]\n res_fea = self.res_features(img)\n\n res_fea = rearrange(res_fea,'b d n1 n2 -> b (n1 n2) d')\n h = rearrange(res_fea,'b n d -> (b n) d')\n x = self.res_l1(h)\n x = F.relu(x)\n x = self.res_l2(x)\n out_emb = rearrange(x,'(b n) d -> b n d',b=batch_size)\n out_pool = torch.mean(out_emb,dim=1)\n return out_emb,out_pool" }, { "identifier": "TQN_Model", "path": "models/clip_tqn.py", "snippet": "class TQN_Model(nn.Module):\n def __init__(self, \n embed_dim: int = 768, \n class_num: int = 1, \n lam: list = [1, 0]\n ):\n super().__init__()\n self.d_model = embed_dim\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n decoder_layer = TransformerDecoderLayer(self.d_model, 4, 1024,\n 0.1, 'relu',normalize_before=True)\n decoder_layerV1 = TransformerDecoderLayerV1(self.d_model, 4, 1024,\n 0.1, 'relu', True, lam)\n self.decoder_norm = nn.LayerNorm(self.d_model)\n self.decoder = TransformerDecoder(decoder_layer, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n \n self.dropout_feas = nn.Dropout(0.1)\n\n self.mlp_head = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.apply(self._init_weights)\n \n @staticmethod\n def _init_weights(module):\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.MultiheadAttention):\n module.in_proj_weight.data.normal_(mean=0.0, std=0.02)\n module.out_proj.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n \n def forward(self, image_features, text_features):\n\n batch_size = image_features.shape[0]\n image_features = image_features.transpose(0,1)\n text_features = text_features.unsqueeze(1).repeat(1, batch_size, 1)\n image_features = self.decoder_norm(image_features)\n text_features = self.decoder_norm(text_features)\n \n image_features_pool = torch.mean(image_features,dim=0).unsqueeze(0)\n features = self.decoderV1(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None) \n \n features = self.dropout_feas(features).transpose(0,1) #b,embed_dim\n out = self.mlp_head(features) #(batch_size, query_num)\n return out" }, { "identifier": "TQN_Model_Add", "path": "models/clip_tqn.py", "snippet": "class TQN_Model_Add(nn.Module):\n def __init__(self, \n embed_dim: int = 768, \n class_num: int = 1, \n gate_num: int = 3,\n high_dim: int = 32,\n lam: list = [1, 0]\n ):\n super().__init__()\n self.d_model = embed_dim\n self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))\n decoder_layer = TransformerDecoderLayer(self.d_model, 4, 1024,\n 0.1, 'relu',normalize_before=True)\n decoder_layerV1 = TransformerDecoderLayerV1(self.d_model, 4, 1024,\n 0.1, 'relu', True, lam)\n self.decoder_norm = nn.LayerNorm(self.d_model)\n self.decoder = TransformerDecoder(decoder_layer, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n \n self.decoderV1_1 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1_2 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n self.decoderV1_3 = TransformerDecoderV1(decoder_layerV1, 4, self.decoder_norm,\n return_intermediate=False)\n\n self.dropout_feas = nn.Dropout(0.1)\n\n self.mlp_head = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.mlp_head_1 = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.mlp_head_2 = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n )\n self.mlp_head_3 = nn.Sequential( # nn.LayerNorm(768),\n nn.Linear(embed_dim, class_num)\n ) \n \n self.gate_head = nn.Sequential(\n nn.Linear(embed_dim, gate_num)\n )\n self.cl_head = nn.Sequential(\n nn.Linear(gate_num, high_dim)\n )\n\n self.apply(self._init_weights)\n \n @staticmethod\n def _init_weights(module):\n if isinstance(module, nn.Linear):\n module.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.MultiheadAttention):\n module.in_proj_weight.data.normal_(mean=0.0, std=0.02)\n module.out_proj.weight.data.normal_(mean=0.0, std=0.02)\n\n elif isinstance(module, nn.Embedding):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if module.padding_idx is not None:\n module.weight.data[module.padding_idx].zero_()\n \n def forward(self, image_features, text_features, args):\n\n batch_size = image_features.shape[0]\n image_features = image_features.transpose(0,1)\n text_features = text_features.unsqueeze(1).repeat(1, batch_size, 1)\n image_features = self.decoder_norm(image_features)\n text_features = self.decoder_norm(text_features)\n \n image_features_pool = torch.mean(image_features,dim=0).unsqueeze(0)\n features = self.decoderV1(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n gate_weight = self.gate_head(image_features_pool.squeeze(0)) \n \n features = self.dropout_feas(features).transpose(0,1) #b,embed_dim\n \n \n if args.finetune:\n features_1 = self.decoderV1_1(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n features_1 = self.dropout_feas(features_1).transpose(0,1) \n features_2 = self.decoderV1_2(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n features_2 = self.dropout_feas(features_2).transpose(0,1) \n features_3 = self.decoderV1_3(text_features, image_features, image_features_pool,\n memory_key_padding_mask=None, pos=None, query_pos=None)\n features_3 = self.dropout_feas(features_3).transpose(0,1) \n \n out_1 = torch.sigmoid(self.mlp_head_1(features_1))\n out_2 = torch.sigmoid(self.mlp_head_2(features_2))\n out_3 = torch.sigmoid(self.mlp_head_3(features_3))\n\n\n out = self.mlp_head(features)\n \n gate_weight = torch.softmax(gate_weight, dim=1)\n out = torch.sigmoid(out)\n\n high_dimension = self.cl_head(gate_weight)\n out_bias = gate_weight[:,0].unsqueeze(1).unsqueeze(2) * out_1 + gate_weight[:,1].unsqueeze(1).unsqueeze(2) * out_2 + gate_weight[:,2].unsqueeze(1).unsqueeze(2) * out_3\n\n out = args.main_ratio * out + args.bias_ratio * out_bias\n\n return out, high_dimension" }, { "identifier": "ModelDense", "path": "models/clip_tqn.py", "snippet": "class ModelDense(nn.Module):\n def __init__(self, dense_base_model):\n super(ModelDense, self).__init__()\n \n self.densenet_dict = {\"densenet121\": models.densenet121(pretrained=True)}#,\n # \"densenet161\": models.densenet161(pretrained=True)}\n self.densenet = self._get_dense_basemodel(dense_base_model)\n num_ftrs = int(self.densenet.classifier.in_features)\n self.dense_features = self.densenet.features\n self.dense_l1 = nn.Linear(num_ftrs, num_ftrs)\n self.dense_l2 = nn.Linear(num_ftrs, 768)\n\n def _get_dense_basemodel(self, dense_base_model):\n try:\n dense_model = self.densenet_dict[dense_base_model]\n print(\"Image feature extractor:\", dense_base_model)\n return dense_model\n except:\n raise (\"Invalid model name. Check the config file and pass one of: densenet121 or densenet161\")\n\n def forward(self, img):\n batch_size = img.shape[0]\n dense_fea = self.dense_features(img)#N, 1024, 7,7\n dense_fea = rearrange(dense_fea,'b d n1 n2 -> b (n1 n2) d')\n h = rearrange(dense_fea,'b n d -> (b n) d')\n x = self.dense_l1(h)\n x = F.relu(x)\n x = self.dense_l2(x)\n out_emb = rearrange(x,'(b n) d -> b n d',b=batch_size)\n out_pool = torch.mean(out_emb,dim=1)\n return out_emb,out_pool" }, { "identifier": "CLP_clinical2", "path": "models/clip_tqn.py", "snippet": "class CLP_clinical2(nn.Module):\n def __init__(self,\n bert_model_name: str,\n embed_dim: int = 768,\n freeze_layers:Union[Tuple[int, int], int] = None):\n super().__init__()\n self.bert_model = self._get_bert_basemodel(bert_model_name=bert_model_name, freeze_layers=freeze_layers)\n\n\n def _get_bert_basemodel(self, bert_model_name, freeze_layers=None):#12\n try:\n print(bert_model_name)\n model = AutoModel.from_pretrained(bert_model_name)\n print(\"Text feature extractor:\", bert_model_name)\n print(\"bert encoder layers:\",len(model.encoder.layer))\n except:\n raise (\"Invalid model name. Check the config file and pass a BERT model from transformers lybrary\")\n\n if freeze_layers is not None:\n for layer_idx in freeze_layers:\n for param in list(model.encoder.layer[layer_idx].parameters()):\n param.requires_grad = False\n return model\n\n def encode_text(self, text):\n output = self.bert_model(input_ids = text['input_ids'],attention_mask = text['attention_mask'] )\n encode_out = output.last_hidden_state[:,0,:]\n return encode_out\n \n def forward(self,text1,text2):\n text1_features = self.encode_text(text1)\n text2_features = self.encode_text(text2)\n text1_features = F.normalize(text1_features, dim=-1)\n text2_features = F.normalize(text2_features, dim=-1)\n return text1_features, text2_features, self.logit_scale.exp()" }, { "identifier": "BertTokenizer", "path": "models/tokenization_bert.py", "snippet": "class BertTokenizer(PreTrainedTokenizer):\n r\"\"\"\n Construct a BERT tokenizer. Based on WordPiece.\n This tokenizer inherits from :class:`~transformers.PreTrainedTokenizer` which contains most of the main methods.\n Users should refer to this superclass for more information regarding those methods.\n Args:\n vocab_file (:obj:`str`):\n File containing the vocabulary.\n do_lower_case (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to lowercase the input when tokenizing.\n do_basic_tokenize (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to do basic tokenization before WordPiece.\n never_split (:obj:`Iterable`, `optional`):\n Collection of tokens which will never be split during tokenization. Only has an effect when\n :obj:`do_basic_tokenize=True`\n unk_token (:obj:`str`, `optional`, defaults to :obj:`\"[UNK]\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n sep_token (:obj:`str`, `optional`, defaults to :obj:`\"[SEP]\"`):\n The separator token, which is used when building a sequence from multiple sequences, e.g. two sequences for\n sequence classification or for a text and a question for question answering. It is also used as the last\n token of a sequence built with special tokens.\n pad_token (:obj:`str`, `optional`, defaults to :obj:`\"[PAD]\"`):\n The token used for padding, for example when batching sequences of different lengths.\n cls_token (:obj:`str`, `optional`, defaults to :obj:`\"[CLS]\"`):\n The classifier token which is used when doing sequence classification (classification of the whole sequence\n instead of per-token classification). It is the first token of the sequence when built with special tokens.\n mask_token (:obj:`str`, `optional`, defaults to :obj:`\"[MASK]\"`):\n The token used for masking values. This is the token used when training this model with masked language\n modeling. This is the token which the model will try to predict.\n tokenize_chinese_chars (:obj:`bool`, `optional`, defaults to :obj:`True`):\n Whether or not to tokenize Chinese characters.\n This should likely be deactivated for Japanese (see this `issue\n <https://github.com/huggingface/transformers/issues/328>`__).\n strip_accents: (:obj:`bool`, `optional`):\n Whether or not to strip all accents. If this option is not specified, then it will be determined by the\n value for :obj:`lowercase` (as in the original BERT).\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n pretrained_init_configuration = PRETRAINED_INIT_CONFIGURATION\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n\n def __init__(\n self,\n vocab_file,\n do_lower_case=True,\n do_basic_tokenize=True,\n never_split=None,\n unk_token=\"[UNK]\",\n sep_token=\"[SEP]\",\n pad_token=\"[PAD]\",\n cls_token=\"[CLS]\",\n mask_token=\"[MASK]\",\n tokenize_chinese_chars=True,\n strip_accents=None,\n **kwargs\n ):\n super().__init__(\n do_lower_case=do_lower_case,\n do_basic_tokenize=do_basic_tokenize,\n never_split=never_split,\n unk_token=unk_token,\n sep_token=sep_token,\n pad_token=pad_token,\n cls_token=cls_token,\n mask_token=mask_token,\n tokenize_chinese_chars=tokenize_chinese_chars,\n strip_accents=strip_accents,\n **kwargs,\n )\n\n if not os.path.isfile(vocab_file):\n raise ValueError(\n \"Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained \"\n \"model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(vocab_file)\n )\n self.vocab = load_vocab(vocab_file)\n self.ids_to_tokens = collections.OrderedDict([(ids, tok) for tok, ids in self.vocab.items()])\n self.do_basic_tokenize = do_basic_tokenize\n if do_basic_tokenize:\n self.basic_tokenizer = BasicTokenizer(\n do_lower_case=do_lower_case,\n never_split=never_split,\n tokenize_chinese_chars=tokenize_chinese_chars,\n strip_accents=strip_accents,\n )\n self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab, unk_token=self.unk_token)\n\n @property\n def do_lower_case(self):\n return self.basic_tokenizer.do_lower_case\n\n @property\n def vocab_size(self):\n return len(self.vocab)\n\n def get_vocab(self):\n return dict(self.vocab, **self.added_tokens_encoder)\n\n def _tokenize(self, text):\n split_tokens = []\n if self.do_basic_tokenize:\n for token in self.basic_tokenizer.tokenize(text, never_split=self.all_special_tokens):\n\n # If the token is part of the never_split set\n if token in self.basic_tokenizer.never_split:\n split_tokens.append(token)\n else:\n split_tokens += self.wordpiece_tokenizer.tokenize(token)\n else:\n split_tokens = self.wordpiece_tokenizer.tokenize(text)\n return split_tokens\n\n def _convert_token_to_id(self, token):\n \"\"\" Converts a token (str) in an id using the vocab. \"\"\"\n return self.vocab.get(token, self.vocab.get(self.unk_token))\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n return self.ids_to_tokens.get(index, self.unk_token)\n\n def convert_tokens_to_string(self, tokens):\n \"\"\" Converts a sequence of tokens (string) in a single string. \"\"\"\n out_string = \" \".join(tokens).replace(\" ##\", \"\").strip()\n return out_string\n\n def build_inputs_with_special_tokens(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and\n adding special tokens. A BERT sequence has the following format:\n - single sequence: ``[CLS] X ``\n - pair of sequences: ``[CLS] A [SEP] B [SEP]``\n Args:\n token_ids_0 (:obj:`List[int]`):\n List of IDs to which the special tokens will be added.\n token_ids_1 (:obj:`List[int]`, `optional`):\n Optional second list of IDs for sequence pairs.\n Returns:\n :obj:`List[int]`: List of `input IDs <../glossary.html#input-ids>`__ with the appropriate special tokens.\n \"\"\"\n if token_ids_1 is None:\n return [self.cls_token_id] + token_ids_0\n cls = [self.cls_token_id]\n sep = [self.sep_token_id]\n return cls + token_ids_0 + sep + token_ids_1 + sep\n\n def get_special_tokens_mask(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False\n ) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer ``prepare_for_model`` method.\n Args:\n token_ids_0 (:obj:`List[int]`):\n List of IDs.\n token_ids_1 (:obj:`List[int]`, `optional`):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (:obj:`bool`, `optional`, defaults to :obj:`False`):\n Whether or not the token list is already formatted with special tokens for the model.\n Returns:\n :obj:`List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n\n if already_has_special_tokens:\n if token_ids_1 is not None:\n raise ValueError(\n \"You should not supply a second sequence if the provided sequence of \"\n \"ids is already formatted with special tokens for the model.\"\n )\n return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))\n\n if token_ids_1 is not None:\n return [1] + ([0] * len(token_ids_0)) + [1] + ([0] * len(token_ids_1)) + [1]\n return [1] + ([0] * len(token_ids_0)) + [1]\n\n def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Create a mask from the two sequences passed to be used in a sequence-pair classification task. A BERT sequence\n pair mask has the following format:\n ::\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n If :obj:`token_ids_1` is :obj:`None`, this method only returns the first portion of the mask (0s).\n Args:\n token_ids_0 (:obj:`List[int]`):\n List of IDs.\n token_ids_1 (:obj:`List[int]`, `optional`):\n Optional second list of IDs for sequence pairs.\n Returns:\n :obj:`List[int]`: List of `token type IDs <../glossary.html#token-type-ids>`_ according to the given\n sequence(s).\n \"\"\"\n sep = [self.sep_token_id]\n cls = [self.cls_token_id]\n if token_ids_1 is None:\n return len(cls + token_ids_0 + sep) * [0]\n return len(cls + token_ids_0 + sep) * [0] + len(token_ids_1 + sep) * [1]\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n index = 0\n if os.path.isdir(save_directory):\n vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n else:\n vocab_file = (filename_prefix + \"-\" if filename_prefix else \"\") + save_directory\n with open(vocab_file, \"w\", encoding=\"utf-8\") as writer:\n for token, token_index in sorted(self.vocab.items(), key=lambda kv: kv[1]):\n if index != token_index:\n logger.warning(\n \"Saving vocabulary to {}: vocabulary indices are not consecutive.\"\n \" Please check that the vocabulary is not corrupted!\".format(vocab_file)\n )\n index = token_index\n writer.write(token + \"\\n\")\n index += 1\n return (vocab_file,)" }, { "identifier": "MIMIC_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class MIMIC_Dataset(Dataset):\n def __init__(self, json_path, csv_path, sty_path,image_res,args):\n self.json_info = json.load(open(json_path,'r'))\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,1:])#40 class for fine-grained query list\n sty_info = pd.read_csv(sty_path)\n self.sty_dict_info = self.csv_to_dict(sty_info)\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n\n if args.colourjitter:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.4),\n transforms.RandomGrayscale(),\n\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ])\n\n else:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ]) \n\n \n def csv_to_dict(self,sty_info):\n tui_list = sty_info.iloc[:,0]\n sty_list = sty_info.iloc[:,1]\n sty_dict = defaultdict(list)\n for idx in tqdm(range(len(tui_list))):\n tui_idx = tui_list[idx]\n sty_idx = sty_list[idx]\n sty_dict[tui_idx] = sty_idx\n return sty_dict\n \n def __len__(self):\n return len(self.img_path_list)\n \n def __getitem__(self, index):\n img_path = self.img_path_list[index].replace(\"/nvme/zhangruipeng/zhangxiaoman/dataset/MIMIC-CXR-DCM/files\", '/remote-home/share/medical/public/MIMIC-CXR-JPG/MIMIC-CXR/small/files')\n class_label = self.class_list[index] \n\n # index_transit = np.load(\"/remote-home/tianjiedai/KAD/R1_CLIP_LR/A1_DATA/small/index0626.npy\")\n # new_index_json = index_transit[index]\n # entities = self.json_info[new_index_json]['entities']\n # captions = self.json_info[new_index_json]['caption']\n \n entities = self.json_info[index]['entities']\n captions = self.json_info[index]['caption']\n\n\n if len(entities) != 0:\n caption_list = ''\n entity_details = ''\n for entity in entities:\n sub_caption = entity['caption']\n sub_entities = entity['entity']#搞错了 还不是list\n sub_entity_details = ''\n for sub_entity in sub_entities:\n try:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n except:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n entity_details = entity_details + sub_entity_details + ' [SEP] '\n caption_list = caption_list + sub_caption + ' [SEP] '\n else:\n caption_list = ''\n entity_details = ''\n for sub_caption in captions:\n caption_list = caption_list + sub_caption + ' [SEP] '\n entity_details = caption_list\n \n # img = open_jpg(img_path).convert('RGB') \n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n return {\n \"image\": image,\n \"label\": class_label,\n \"caption\": caption_list,\n \"entity\": entity_details\n }" }, { "identifier": "Mergetrain_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class Mergetrain_Dataset(Dataset):\n def __init__(self, json_path, csv_path, sty_path,image_res,args):\n self.json_info = json.load(open(json_path,'r'))\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,2:])#60 class for fine-grained query list\n self.label_dataset_list = np.asarray(data_info.iloc[:,1])\n\n sty_info = pd.read_csv(sty_path)\n self.sty_dict_info = self.csv_to_dict(sty_info)\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n\n if args.colourjitter:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n\n transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.4),\n transforms.RandomGrayscale(),\n\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ])\n\n else:\n self.transform = transforms.Compose([ \n transforms.RandomResizedCrop(image_res,scale=(0.2, 1.0), interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.RandomHorizontalFlip(),\n RandomAugment(2,7,isPIL=True,augs=['Identity','AutoContrast','Equalize','Brightness','Sharpness',\n 'ShearX', 'ShearY', 'TranslateX', 'TranslateY', 'Rotate']), \n transforms.ToTensor(),\n normalize,\n ]) \n\n \n def csv_to_dict(self,sty_info):\n tui_list = sty_info.iloc[:,0]\n sty_list = sty_info.iloc[:,1]\n sty_dict = defaultdict(list)\n for idx in tqdm(range(len(tui_list))):\n tui_idx = tui_list[idx]\n sty_idx = sty_list[idx]\n sty_dict[tui_idx] = sty_idx\n return sty_dict\n \n def __len__(self):\n return len(self.img_path_list)\n \n def __getitem__(self, index):\n\n if self.label_dataset_list[index] == 0:\n img_path = self.img_path_list[index].replace(\"/nvme/zhangruipeng/zhangxiaoman/dataset/MIMIC-CXR-DCM/files\", '/remote-home/share/medical/public/MIMIC-CXR-JPG/MIMIC-CXR/small/files')\n class_label = self.class_list[index] \n\n # index_transit = np.load(\"/remote-home/tianjiedai/KAD/R1_CLIP_LR/A1_DATA/small/index0626.npy\")\n # new_index_json = index_transit[index]\n # entities = self.json_info[new_index_json]['entities']\n # captions = self.json_info[new_index_json]['caption']\n \n entities = self.json_info[index]['entities']\n captions = self.json_info[index]['caption']\n\n\n if len(entities) != 0:\n caption_list = ''\n entity_details = ''\n for entity in entities:\n sub_caption = entity['caption']\n sub_entities = entity['entity']#搞错了 还不是list\n sub_entity_details = ''\n for sub_entity in sub_entities:\n try:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n except:\n sub_entity_details += ' [ENT] ' + sub_entity['Entity'] \n entity_details = entity_details + sub_entity_details + ' [SEP] '\n caption_list = caption_list + sub_caption + ' [SEP] '\n else:\n caption_list = ''\n entity_details = ''\n for sub_caption in captions:\n caption_list = caption_list + sub_caption + ' [SEP] '\n entity_details = caption_list\n \n # img = open_jpg(img_path).convert('RGB') \n # img = Image.open(img_path).convert('RGB') \n # image = self.transform(img)\n # return {\n # \"image\": image,\n # \"label\": class_label,\n # \"caption\": caption_list,\n # \"entity\": entity_details\n # }\n \n else:\n img_path = self.img_path_list[index]\n class_label = self.class_list[index] \n caption_list = ''\n head = ['normal', 'pleural effusion', 'opacity', 'pneumothorax', 'edema', 'atelectasis', 'tube', 'consolidation','enlarged cardiomediastinum','tip', 'pneumonia','line','cardiomegaly', 'fracture','calcification',\n 'device','engorgement', 'nodule', 'wire', 'pacemaker', 'pleural thicken', 'marking', 'scar', 'hyperinflate', 'blunt', 'collapse', 'emphysema', 'aerate', 'mass','infiltration', 'obscure', 'deformity', 'hernia',\n 'drainage', 'distention', 'shift', 'stent', 'lesion', 'hardware', 'dilation', 'aspiration',\n 'fibrosis',\t'No Finding', 'Pleural Other', 'Support Devices', 'Aortic enlargement',\n 'Clavicle fracture', 'Enlarged PA', 'ILD', 'Lung cavity', 'Lung cyst', 'Mediastinal shift',\t\n 'Nodule/Mass', 'Pulmonary fibrosis', 'Rib fracture', 'Other lesion', 'COPD', 'Lung tumor', 'Tuberculosis',\n 'Other diseases']\n index_positive = np.where(class_label == 1)\n entity = np.array(head)[index_positive]\n entity_details = ''\n for sub_entity in entity:\n entity_details = entity_details + sub_entity + ' [SEP] '\n\n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n label_dataset = self.label_dataset_list[index]\n\n return {\n \"image\": image,\n \"label\": class_label,\n \"label_dataset\": label_dataset,\n \"caption\": caption_list,\n \"entity\": entity_details\n }" }, { "identifier": "Chestxray14_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class Chestxray14_Dataset(Dataset):\n def __init__(self, csv_path,image_res):\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,3:])\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n self.transform = transforms.Compose([ \n transforms.Resize(image_res, interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.ToTensor(),\n normalize,\n ])\n \n def __getitem__(self, index):\n img_path = self.img_path_list[index].replace('/mnt/petrelfs/zhangxiaoman/DATA/Chestxray/ChestXray8/','/remote-home/share/medical/public/ChestXray8/')\n class_label = self.class_list[index] \n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n return {\n \"image\": image,\n \"label\": class_label\n }\n \n def __len__(self):\n return len(self.img_path_list)" }, { "identifier": "CheXpert_Dataset", "path": "dataset/dataset_entity.py", "snippet": "class CheXpert_Dataset(Dataset):\n def __init__(self, csv_path,image_res):\n data_info = pd.read_csv(csv_path)\n self.img_path_list = np.asarray(data_info.iloc[:,0])\n self.class_list = np.asarray(data_info.iloc[:,[13,7,11,10,15]])\n\n normalize = transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))\n self.transform = transforms.Compose([ \n transforms.Resize([image_res,image_res], interpolation=transforms.InterpolationMode.BICUBIC),\n transforms.ToTensor(),\n normalize,\n ]) \n \n def __getitem__(self, index):\n img_path = os.path.join('/remote-home/share/tianjiedai/',self.img_path_list[index])\n class_label = self.class_list[index] \n img = Image.open(img_path).convert('RGB') \n image = self.transform(img)\n return {\n \"image\": image,\n \"label\": class_label\n }\n \n def __len__(self):\n return len(self.img_path_list)" } ]
import argparse import os import logging import yaml import numpy as np import random import time import datetime import json import math import torch import torch.nn as nn import torch.nn.functional as F import torch.backends.cudnn as cudnn import torch.distributed as dist import socket from pathlib import Path from functools import partial from sklearn.metrics import roc_auc_score from collections import OrderedDict from torch.utils.data import DataLoader from tensorboardX import SummaryWriter from transformers import AutoModel,BertConfig,AutoTokenizer from factory import utils from scheduler import create_scheduler from optim import create_optimizer from engine.train import train,valid_on_cheXpert,valid_on_chestxray14 from models.clip_tqn import CLP_clinical,ModelRes,TQN_Model,TQN_Model_Add,ModelDense,CLP_clinical2 from models.tokenization_bert import BertTokenizer from dataset.dataset_entity import MIMIC_Dataset,Mergetrain_Dataset, Chestxray14_Dataset,CheXpert_Dataset from io import BytesIO
17,491
if args.add_dataset == True: train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) else: train_dataset = MIMIC_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) train_dataloader = DataLoader( train_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=train_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) train_dataloader.num_samples = len(train_dataset) train_dataloader.num_batches = len(train_dataloader) val_dataset = Chestxray14_Dataset(config['chestxray_valid_file'],config['image_res']) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) val_dataloader =DataLoader( val_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=val_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) val_dataloader.num_samples = len(val_dataset) val_dataloader.num_batches = len(val_dataloader) test_dataset = Chestxray14_Dataset(config['chestxray_test_file'],config['image_res']) test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader =DataLoader( test_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=test_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader.num_samples = len(test_dataset) test_dataloader.num_batches = len(test_dataloader) test_dataset_chexpert = CheXpert_Dataset(config['chexpert_valid_file'],config['image_res']) test_sampler_chexpert = torch.utils.data.distributed.DistributedSampler(test_dataset_chexpert,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader_chexpert =DataLoader( test_dataset_chexpert, batch_size=config['batch_size'], num_workers=4, pin_memory=True, sampler=test_sampler_chexpert, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader_chexpert.num_samples = len(test_dataset_chexpert) test_dataloader_chexpert.num_batches = len(test_dataloader_chexpert) if args.image_encoder_name == 'resnet': image_encoder = ModelRes(res_base_model='resnet50').cuda() elif args.image_encoder_name == 'dense': image_encoder = ModelDense(dense_base_model = 'densenet121').cuda() if args.bert_model_name == 'emilyalsentzer/Bio_ClinicalBERT': tokenizer = BertTokenizer.from_pretrained(args.bert_model_name) text_encoder = CLP_clinical2(bert_model_name=args.bert_model_name).cuda() else: tokenizer = AutoTokenizer.from_pretrained(args.bert_model_name,do_lower_case=True, local_files_only=True) text_encoder = CLP_clinical(bert_model_name=args.bert_model_name).cuda() if args.bert_pretrained: checkpoint = torch.load(args.bert_pretrained, map_location='cpu') state_dict = checkpoint["state_dict"] text_encoder.load_state_dict(state_dict) print('Load pretrained bert success from: ',args.bert_pretrained) if args.freeze_bert: for param in text_encoder.parameters(): param.requires_grad = False if args.add_dataset: if 'lam' in config: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim, lam = config['lam']).cuda() else: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim).cuda() else: if 'lam' in config: model = TQN_Model(class_num = args.class_num, lam = config['lam']).cuda() else: model = TQN_Model(class_num = args.class_num).cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) model_without_ddp = model.module if args.finetune: image_encoder_without_ddp = image_encoder else: image_encoder = torch.nn.parallel.DistributedDataParallel(image_encoder, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) image_encoder_without_ddp = image_encoder.module text_encoder_without_ddp = text_encoder arg_opt = utils.AttrDict(config['optimizer']) optimizer = create_optimizer(arg_opt, model_without_ddp,image_encoder_without_ddp,text_encoder_without_ddp) arg_sche = utils.AttrDict(config['schedular'])
# import ruamel.yaml as yaml def main(args, config): torch.cuda.current_device() torch.cuda._initialized = True print("Total CUDA devices: ", torch.cuda.device_count()) torch.set_default_tensor_type('torch.FloatTensor') utils.init_distributed_mode(args) device = torch.device(args.device) # fix the seed for reproducibility seed = args.seed + utils.get_rank() torch.manual_seed(seed) np.random.seed(seed) random.seed(seed) cudnn.benchmark = True start_epoch = 0 max_epoch = config['schedular']['epochs'] warmup_steps = config['schedular']['warmup_epochs'] num_tasks = utils.get_world_size() global_rank = utils.get_rank() sampler_rank = global_rank print('sampler_rank',sampler_rank,'num_tasks',num_tasks) #### Dataset #### print("Creating dataset") if args.add_dataset == True: train_dataset = Mergetrain_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) else: train_dataset = MIMIC_Dataset(config['train_entity_file'], config['train_fg_query_file_v1'], config['mrsty_file'],config['image_res'], args) train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) train_dataloader = DataLoader( train_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=train_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) train_dataloader.num_samples = len(train_dataset) train_dataloader.num_batches = len(train_dataloader) val_dataset = Chestxray14_Dataset(config['chestxray_valid_file'],config['image_res']) val_sampler = torch.utils.data.distributed.DistributedSampler(val_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) val_dataloader =DataLoader( val_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=val_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) val_dataloader.num_samples = len(val_dataset) val_dataloader.num_batches = len(val_dataloader) test_dataset = Chestxray14_Dataset(config['chestxray_test_file'],config['image_res']) test_sampler = torch.utils.data.distributed.DistributedSampler(test_dataset,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader =DataLoader( test_dataset, batch_size=config['batch_size'], num_workers=8, pin_memory=True, sampler=test_sampler, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader.num_samples = len(test_dataset) test_dataloader.num_batches = len(test_dataloader) test_dataset_chexpert = CheXpert_Dataset(config['chexpert_valid_file'],config['image_res']) test_sampler_chexpert = torch.utils.data.distributed.DistributedSampler(test_dataset_chexpert,num_replicas=num_tasks, rank=sampler_rank, shuffle=True) test_dataloader_chexpert =DataLoader( test_dataset_chexpert, batch_size=config['batch_size'], num_workers=4, pin_memory=True, sampler=test_sampler_chexpert, collate_fn=None, worker_init_fn=utils.seed_worker, drop_last=True, ) test_dataloader_chexpert.num_samples = len(test_dataset_chexpert) test_dataloader_chexpert.num_batches = len(test_dataloader_chexpert) if args.image_encoder_name == 'resnet': image_encoder = ModelRes(res_base_model='resnet50').cuda() elif args.image_encoder_name == 'dense': image_encoder = ModelDense(dense_base_model = 'densenet121').cuda() if args.bert_model_name == 'emilyalsentzer/Bio_ClinicalBERT': tokenizer = BertTokenizer.from_pretrained(args.bert_model_name) text_encoder = CLP_clinical2(bert_model_name=args.bert_model_name).cuda() else: tokenizer = AutoTokenizer.from_pretrained(args.bert_model_name,do_lower_case=True, local_files_only=True) text_encoder = CLP_clinical(bert_model_name=args.bert_model_name).cuda() if args.bert_pretrained: checkpoint = torch.load(args.bert_pretrained, map_location='cpu') state_dict = checkpoint["state_dict"] text_encoder.load_state_dict(state_dict) print('Load pretrained bert success from: ',args.bert_pretrained) if args.freeze_bert: for param in text_encoder.parameters(): param.requires_grad = False if args.add_dataset: if 'lam' in config: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim, lam = config['lam']).cuda() else: model = TQN_Model_Add(class_num = args.class_num, gate_num = args.gate_num, high_dim = args.high_dim).cuda() else: if 'lam' in config: model = TQN_Model(class_num = args.class_num, lam = config['lam']).cuda() else: model = TQN_Model(class_num = args.class_num).cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) model_without_ddp = model.module if args.finetune: image_encoder_without_ddp = image_encoder else: image_encoder = torch.nn.parallel.DistributedDataParallel(image_encoder, device_ids = [args.gpu], find_unused_parameters=True, broadcast_buffers=False) image_encoder_without_ddp = image_encoder.module text_encoder_without_ddp = text_encoder arg_opt = utils.AttrDict(config['optimizer']) optimizer = create_optimizer(arg_opt, model_without_ddp,image_encoder_without_ddp,text_encoder_without_ddp) arg_sche = utils.AttrDict(config['schedular'])
lr_scheduler, _ = create_scheduler(arg_sche, optimizer)
1
2023-10-30 00:24:16+00:00
24k
ifrit98/storage-subnet
neurons/miner.py
[ { "identifier": "hash_data", "path": "storage/shared/ecc.py", "snippet": "def hash_data(data):\n \"\"\"\n Compute a SHA3-256 hash of the input data and return its integer representation.\n\n The function handles both byte-like and non-byte-like inputs by converting non-byte inputs to\n strings and then encoding to bytes before hashing.\n\n Parameters:\n - data (bytes | bytearray | object): Data to be hashed.\n\n Returns:\n - int: Integer representation of the SHA3-256 hash of the input data.\n\n Raises:\n - TypeError: If the hashing operation encounters an incompatible data type.\n \"\"\"\n if not isinstance(data, (bytes, bytearray)):\n data_str = str(data)\n data = data_str.encode()\n h = hashlib.sha3_256(data).hexdigest()\n return int(h, 16)" }, { "identifier": "setup_CRS", "path": "storage/shared/ecc.py", "snippet": "def setup_CRS(curve=\"P-256\"):\n \"\"\"\n Generate a pair of random points to serve as a Common Reference String (CRS) for elliptic curve operations.\n\n The CRS is essential for various cryptographic protocols that rely on a shared reference\n between parties, typically for the purpose of ensuring consistent cryptographic operations.\n\n Parameters:\n - curve (str, optional): Name of the elliptic curve to use; defaults to \"P-256\".\n\n Returns:\n - tuple(ECC.EccPoint, ECC.EccPoint): A 2-tuple of ECC.EccPoint instances representing the base points (g, h).\n\n Raises:\n - ValueError: If the specified elliptic curve name is not recognized.\n \"\"\"\n curve_obj = ECC.generate(curve=curve)\n g = curve_obj.pointQ # Base point\n h = ECC.generate(curve=curve).pointQ # Another random point\n return g, h" }, { "identifier": "ECCommitment", "path": "storage/shared/ecc.py", "snippet": "class ECCommitment:\n \"\"\"\n Elliptic Curve based commitment scheme allowing one to commit to a chosen value while keeping it hidden to others.\n\n Attributes:\n g (ECC.EccPoint): The base point of the elliptic curve used as part of the commitment.\n h (ECC.EccPoint): Another random point on the elliptic curve used as part of the commitment.\n\n Methods:\n commit(m): Accepts a message, hashes it, and produces a commitment to the hashed message.\n open(c, m_val, r): Accepts a commitment, a hashed message, and a random value to verify the commitment.\n\n The `commit` method will print the commitment process, and the `open` method will print the verification process.\n \"\"\"\n\n def __init__(self, g, h, verbose=False):\n self.g = g # Base point of the curve\n self.h = h # Another random point on the curve\n self.verbose = verbose\n\n def commit(self, m): # AKA Seal.\n \"\"\"\n Create a cryptographic commitment to a message.\n\n The message is hashed, and the hash is used along with a random number to form the commitment\n using the public parameters g and h. The commitment can be verified with the `open` method.\n\n Parameters:\n - m (bytes | bytearray | object): The message to commit to.\n\n Returns:\n - tuple: A 3-tuple (commitment, hashed message value, random number used in the commitment).\n\n Side Effects:\n - This method will print the commitment details to the console.\n\n Raises:\n - Exception: If the commitment calculation fails.\n \"\"\"\n m_val = hash_data(m) # Compute hash of the data\n r = random.randint(1, 2**256)\n c1 = self.g.__mul__(m_val)\n c2 = self.h.__mul__(r)\n c = c1.__add__(c2)\n if self.verbose:\n print(\n f\"Committing: Data = {m}\\nHashed Value = {m_val}\\nRandom Value = {r}\\nComputed Commitment = {c}\\n\"\n )\n return c, m_val, r\n\n def open(self, c, m_val, r):\n \"\"\"\n Verify a commitment using the original message hash and randomness.\n\n This method recomputes the commitment using the public parameters and compares it with\n the provided commitment to check its validity.\n\n Parameters:\n - c (ECC.EccPoint): The commitment point to verify.\n - m_val (int): The integer value of the hashed message used in the commitment.\n - r (int): The random number used in the commitment.\n\n Returns:\n - bool: True if the verification succeeds (commitment is valid), False otherwise.\n\n Side Effects:\n - This method will print the verification details to the console.\n\n Raises:\n - Exception: If the verification calculation fails.\n \"\"\"\n c1 = self.g.__mul__(m_val)\n c2 = self.h.__mul__(r)\n computed_c = c1.__add__(c2)\n if self.verbose:\n print(\n f\"\\nOpening: Hashed Value = {m_val}\\nRandom Value = {r}\\nRecomputed Commitment = {computed_c}\\nOriginal Commitment = {c}\"\n )\n return computed_c == c" }, { "identifier": "ecc_point_to_hex", "path": "storage/shared/ecc.py", "snippet": "def ecc_point_to_hex(point):\n \"\"\"\n Convert an elliptic curve point to a hexadecimal string.\n\n This encoding is typically used for compact representation or for preparing the data\n to be transmitted over protocols that may not support binary data.\n\n Parameters:\n - point (ECC.EccPoint): An ECC point to convert.\n\n Returns:\n - str: Hexadecimal string representing the elliptic curve point.\n\n Raises:\n - AttributeError: If the input is not a valid ECC point with accessible x and y coordinates.\n \"\"\"\n point_str = \"{},{}\".format(point.x, point.y)\n return binascii.hexlify(point_str.encode()).decode()" }, { "identifier": "hex_to_ecc_point", "path": "storage/shared/ecc.py", "snippet": "def hex_to_ecc_point(hex_str, curve):\n \"\"\"\n Convert a hexadecimal string back into an elliptic curve point.\n\n This function is typically used to deserialize an ECC point that has been transmitted or stored as a hex string.\n\n Parameters:\n - hex_str (str): The hex string representing an elliptic curve point.\n - curve (str): The name of the elliptic curve the point belongs to.\n\n Returns:\n - ECC.EccPoint: The elliptic curve point represented by the hex string.\n\n Raises:\n - ValueError: If the hex string is not properly formatted or does not represent a valid point on the specified curve.\n \"\"\"\n point_str = binascii.unhexlify(hex_str).decode()\n x, y = map(int, point_str.split(\",\"))\n return ECC.EccPoint(x, y, curve=curve)" }, { "identifier": "MerkleTree", "path": "storage/shared/merkle.py", "snippet": "class MerkleTree(object):\n \"\"\"\n Represents a Merkle Tree, a data structure used for efficiently summarizing and verifying the\n integrity of large sets of data. The Merkle Tree is a binary tree where each leaf node is the hash\n of a data block and every non-leaf node is the hash of its children nodes.\n\n Attributes:\n hash_function (callable): The hash function used for generating hashes of the blocks\n and non-leaf nodes in the Merkle Tree.\n leaves (list): A list where each element is a bytearray representing the hashed value of a leaf.\n levels (list of lists): A list of lists where each sublist represents a level of the tree, starting\n from the leaves up to the root.\n is_ready (bool): Indicates whether the tree has been fully constructed and is ready to provide\n the Merkle root and proofs.\n\n Methods:\n add_leaf(values, do_hash=False): Adds one or multiple leaves to the tree. If `do_hash` is True,\n it will hash the values before adding them as leaves.\n get_leaf(index): Retrieves the hexadecimal string representation of a leaf at the given index.\n get_leaf_count(): Returns the total number of leaves in the tree.\n get_tree_ready_state(): Checks if the tree has been fully constructed.\n make_tree(): Constructs the Merkle Tree from the current leaves. This method must be called\n after all leaves are added and before retrieving the Merkle root or proofs.\n get_merkle_root(): Retrieves the Merkle root as a hexadecimal string if the tree is ready.\n get_proof(index): Generates a proof of inclusion for the leaf at the given index. This proof\n consists of a list of sibling hashes that, when combined with the target leaf,\n can reproduce the Merkle root.\n update_leaf(index, new_value): Updates the value of the leaf at the given index with `new_value`\n and recalculates the hashes up the tree to reflect this change.\n serialize(): Converts the Merkle Tree into a JSON-formatted string for storage or transmission.\n deserialize(json_data, hash_type=\"sha3_256\"): Reconstructs the Merkle Tree from a JSON string,\n using the specified hash function.\n\n Raises:\n Exception: If the `hash_type` provided during initialization is not supported or recognized.\n\n Example:\n # Create a Merkle tree using the SHA3-256 hash function\n merkle_tree = MerkleTree(hash_type='sha3_256')\n\n # Add data blocks (as leaves) to the tree\n merkle_tree.add_leaf(['block1', 'block2', 'block3'], do_hash=True)\n\n # Construct the tree\n merkle_tree.make_tree()\n\n # Retrieve the Merkle root\n root = merkle_tree.get_merkle_root()\n\n # Get proof of inclusion for the first data block\n proof = merkle_tree.get_proof(0)\n\n # Update the value of the first leaf and reconstruct the tree\n merkle_tree.update_leaf(0, 'new_block1_hashed_value')\n merkle_tree.make_tree()\n\n # Serialize the tree for storage\n serialized_tree = merkle_tree.serialize()\n\n # Deserialize the tree for later use\n deserialized_tree = MerkleTree.deserialize(serialized_tree, hash_type='sha3_256')\n\n Note:\n The hash_function attribute is determined by the hash_type parameter provided at initialization.\n Only hash types supported by the `hashlib` library can be used. Attempting to use an unsupported\n hash type will result in an exception.\n \"\"\"\n\n def __init__(self, hash_type=\"sha3_256\"):\n hash_type = hash_type.lower()\n if hash_type in [\"sha3_256\"]:\n self.hash_function = getattr(hashlib, hash_type)\n else:\n raise Exception(\"`hash_type` {} nor supported\".format(hash_type))\n\n self.reset_tree()\n\n def __eq__(self, other):\n if not isinstance(other, MerkleTree):\n return False\n return self.serialize() == other.serialize()\n\n def _to_hex(self, x):\n try: # python3\n return x.hex()\n except: # python2\n return binascii.hexlify(x)\n\n def reset_tree(self):\n self.leaves = list()\n self.levels = None\n self.is_ready = False\n\n def add_leaf(self, values, do_hash=False):\n self.is_ready = False\n # check if single leaf\n if not isinstance(values, tuple) and not isinstance(values, list):\n values = [values]\n for v in values:\n if do_hash:\n v = v.encode(\"utf-8\")\n v = self.hash_function(v).hexdigest()\n v = bytearray.fromhex(v)\n self.leaves.append(v)\n\n def get_leaf(self, index):\n return self._to_hex(self.leaves[index])\n\n def get_leaf_count(self):\n return len(self.leaves)\n\n def get_tree_ready_state(self):\n return self.is_ready\n\n def _calculate_next_level(self):\n solo_leave = None\n N = len(self.levels[0]) # number of leaves on the level\n if N % 2 == 1: # if odd number of leaves on the level\n solo_leave = self.levels[0][-1]\n N -= 1\n\n new_level = []\n for l, r in zip(self.levels[0][0:N:2], self.levels[0][1:N:2]):\n new_level.append(self.hash_function(l + r).digest())\n if solo_leave is not None:\n new_level.append(solo_leave)\n self.levels = [\n new_level,\n ] + self.levels # prepend new level\n\n def make_tree(self):\n \"\"\"\n Constructs the Merkle Tree from the leaves that have been added.\n\n This must be called after adding all the leaves and before calling\n get_merkle_root or get_proof to ensure the tree is constructed.\n \"\"\"\n self.is_ready = False\n if self.get_leaf_count() > 0:\n self.levels = [\n self.leaves,\n ]\n while len(self.levels[0]) > 1:\n self._calculate_next_level()\n self.is_ready = True\n\n def get_merkle_root(self):\n if self.is_ready:\n if self.levels is not None:\n return self._to_hex(self.levels[0][0])\n else:\n return None\n else:\n return None\n\n def get_proof(self, index):\n \"\"\"\n Generates the proof for the existence of a leaf at the specified index within the Merkle Tree.\n\n A Merkle proof is a collection of sibling hashes on the path from a leaf to the root of the tree.\n This proof can be used to independently verify that a leaf is indeed part of the Merkle tree without\n needing the entire tree. Each element of the proof shows the direction ('left' or 'right') and the\n corresponding hash that pairs with the path to the root.\n\n Parameters:\n index (int): The index of the target leaf for which to generate the Merkle proof. The index must\n correspond to the position of the leaf in the original list of leaves when the tree\n was constructed.\n\n Returns:\n list of dicts: A list where each dictionary contains a single key-value pair. The key is either\n 'left' or 'right', indicating the side of the sibling hash, and the value is a\n string representing the hexadecimal hash value of the sibling. If the tree is not\n ready or the index is out of bounds, None is returned.\n\n Raises:\n IndexError: If the index provided is not within the range of the leaves in the tree.\n ValueError: If the tree has not been constructed by calling `make_tree` method, or the index\n is not an integer.\n\n Example:\n # Assuming `merkle_tree` is an instance of `MerkleTree` and has been populated with leaves and made ready\n proof = merkle_tree.get_proof(2)\n print(proof) # Outputs something like [{'left': 'abcd...'}, {'right': 'ef01...'}]\n\n Note:\n The Merkle proof is only valid if the tree is in the ready state (`is_ready` attribute is True),\n which occurs after the `make_tree` method has been called. If the tree is not ready or the index\n is not valid, the method will return None.\n \"\"\"\n if self.levels is None:\n return None\n elif not self.is_ready or index > len(self.leaves) - 1 or index < 0:\n return None\n else:\n proof = []\n for x in range(len(self.levels) - 1, 0, -1):\n level_len = len(self.levels[x])\n if (index == level_len - 1) and (\n level_len % 2 == 1\n ): # skip if this is an odd end node\n index = int(index / 2.0)\n continue\n is_right_node = index % 2\n sibling_index = index - 1 if is_right_node else index + 1\n sibling_pos = \"left\" if is_right_node else \"right\"\n sibling_value = self._to_hex(self.levels[x][sibling_index])\n proof.append({sibling_pos: sibling_value})\n index = int(index / 2.0)\n return proof\n\n def update_leaf(self, index, new_value):\n \"\"\"\n Updates the value of a leaf at a given index in the Merkle Tree and recalculates the hashes along\n the path from the updated leaf to the root of the tree to reflect the change.\n\n This method allows the Merkle Tree to maintain integrity by ensuring that any updates to the leaf\n nodes are propagated upwards, resulting in a new Merkle root that represents the current state of\n the leaves.\n\n Parameters:\n index (int): The index of the leaf to update. The index is zero-based and must be less than\n the number of leaves in the tree.\n new_value (str): The new value in hexadecimal format to which the leaf should be updated. This\n value should be a valid hexadecimal string that represents the hashed data\n if hashing was applied to the leaves upon tree construction.\n\n Returns:\n None\n\n Raises:\n ValueError: If the tree is not ready for updates (i.e., `is_ready` is False), if the index is\n not an integer, if the new_value is not a hexadecimal string, or if the index is\n out of bounds (less than 0 or greater than or equal to the number of leaves).\n IndexError: If the index is out of the range of current leaves.\n\n Example:\n # Assuming `merkle_tree` is an instance of `MerkleTree`, populated with leaves and made ready.\n merkle_tree.update_leaf(0, 'a1b2c3d4e5f67890')\n # The leaf at index 0 is updated, and changes are propagated to the root.\n\n Note:\n The tree must have been constructed and be in a ready state before calling this method. If the\n tree has not been made by calling the `make_tree` method, or the index is invalid, this method\n will not perform an update and will return None.\n \"\"\"\n if not self.is_ready:\n return None\n new_value = bytearray.fromhex(new_value)\n self.levels[-1][index] = new_value\n for x in range(len(self.levels) - 1, 0, -1):\n parent_index = index // 2\n left_child = self.levels[x][parent_index * 2]\n try:\n right_child = self.levels[x][parent_index * 2 + 1]\n except IndexError:\n right_child = bytearray()\n self.levels[x - 1][parent_index] = self.hash_function(\n left_child + right_child\n ).digest()\n index = parent_index\n\n def serialize(self):\n \"\"\"\n Serializes the MerkleTree object into a JSON string.\n \"\"\"\n # Convert the bytearray leaves and levels to hex strings for serialization\n leaves = [self._to_hex(leaf) for leaf in self.leaves]\n levels = None\n if self.levels is not None:\n levels = []\n for level in self.levels:\n levels.append([self._to_hex(item) for item in level])\n\n # Construct a dictionary with the MerkleTree properties\n merkle_tree_data = {\n \"leaves\": leaves,\n \"levels\": levels,\n \"is_ready\": self.is_ready,\n }\n\n # Convert the dictionary to a JSON string\n return json.dumps(merkle_tree_data)\n\n @classmethod\n def deserialize(cls, json_data, hash_type=\"sha3_256\"):\n \"\"\"\n Deserializes the JSON string into a MerkleTree object.\n \"\"\"\n # Convert the JSON string back to a dictionary\n merkle_tree_data = json.loads(json_data)\n\n # Create a new MerkleTree object\n m_tree = cls(hash_type)\n\n # Convert the hex strings back to bytearrays and set the leaves and levels\n m_tree.leaves = [bytearray.fromhex(leaf) for leaf in merkle_tree_data[\"leaves\"]]\n if merkle_tree_data[\"levels\"] is not None:\n m_tree.levels = []\n for level in merkle_tree_data[\"levels\"]:\n m_tree.levels.append([bytearray.fromhex(item) for item in level])\n m_tree.is_ready = merkle_tree_data[\"is_ready\"]\n\n return m_tree" }, { "identifier": "b64_encode", "path": "storage/shared/utils.py", "snippet": "def b64_encode(data: Union[bytes, str, List[str], List[bytes], dict]) -> str:\n \"\"\"\n Encodes the given data into a base64 string. If the data is a list or dictionary of bytes, it converts\n the bytes into hexadecimal strings before encoding.\n\n Args:\n data (list or dict): The data to be base64 encoded. Can be a list of bytes or a dictionary with bytes values.\n\n Returns:\n str: The base64 encoded string of the input data.\n\n Raises:\n TypeError: If the input is not a list, dict, or bytes.\n \"\"\"\n if isinstance(data, bytes):\n data = data.hex()\n if isinstance(data, list) and len(data) and isinstance(data[0], bytes):\n data = [d.hex() for d in data]\n if isinstance(data, dict) and isinstance(data[list(data.keys())[0]], bytes):\n data = {k: v.hex() for k, v in data.items()}\n return base64.b64encode(json.dumps(data).encode()).decode(\"utf-8\")" }, { "identifier": "b64_decode", "path": "storage/shared/utils.py", "snippet": "def b64_decode(data: bytes, decode_hex: bool = False, encrypted: bool = False):\n \"\"\"\n Decodes a base64 string into a list or dictionary. If decode_hex is True, it converts any hexadecimal strings\n within the data back into bytes.\n\n Args:\n data (bytes or str): The base64 encoded data to be decoded.\n decode_hex (bool): A flag to indicate whether to decode hex strings into bytes. Defaults to False.\n\n Returns:\n list or dict: The decoded data. Returns a list if the original encoded data was a list, and a dict if it was a dict.\n\n Raises:\n ValueError: If the input is not properly base64 encoded or if hex decoding fails.\n \"\"\"\n data = data.decode(\"utf-8\") if isinstance(data, bytes) else data\n decoded_data = json.loads(\n base64.b64decode(data) if encrypted else base64.b64decode(data).decode(\"utf-8\")\n )\n if decode_hex:\n try:\n decoded_data = (\n [bytes.fromhex(d) for d in decoded_data]\n if isinstance(decoded_data, list)\n else {k: bytes.fromhex(v) for k, v in decoded_data.items()}\n )\n except:\n pass\n return decoded_data" }, { "identifier": "chunk_data", "path": "storage/shared/utils.py", "snippet": "def chunk_data(data: bytes, chunksize: int) -> List[bytes]:\n \"\"\"\n Generator function that chunks the given data into pieces of a specified size.\n\n Args:\n data (bytes): The binary data to be chunked.\n chunksize (int): The size of each chunk in bytes.\n\n Yields:\n bytes: A chunk of the data with the size equal to 'chunksize' or the remaining size of data.\n\n Raises:\n ValueError: If 'chunksize' is less than or equal to 0.\n \"\"\"\n for i in range(0, len(data), chunksize):\n yield data[i : i + chunksize]" }, { "identifier": "safe_key_search", "path": "storage/shared/utils.py", "snippet": "async def safe_key_search(database: aioredis.Redis, pattern: str) -> List[str]:\n \"\"\"\n Safely search for keys in the database that doesn't block.\n `scan_iter` uses cursor under the hood.\n \"\"\"\n return [key for key in await database.scan_iter(pattern)]" }, { "identifier": "run", "path": "storage/miner/run.py", "snippet": "def run(self):\n \"\"\"\n Initiates and manages the main loop for the miner on the Bittensor network.\n\n This function performs the following primary tasks:\n 1. Check for registration on the Bittensor network.\n 2. Attaches the miner's forward, blacklist, and priority functions to its axon.\n 3. Starts the miner's axon, making it active on the network.\n 4. Regularly updates the metagraph with the latest network state.\n 5. Optionally sets weights on the network, defining how much trust to assign to other nodes.\n 6. Handles graceful shutdown on keyboard interrupts and logs unforeseen errors.\n\n The miner continues its operations until `should_exit` is set to True or an external interruption occurs.\n During each epoch of its operation, the miner waits for new blocks on the Bittensor network, updates its\n knowledge of the network (metagraph), and sets its weights. This process ensures the miner remains active\n and up-to-date with the network's latest state.\n\n Note:\n - The function leverages the global configurations set during the initialization of the miner.\n - The miner's axon serves as its interface to the Bittensor network, handling incoming and outgoing requests.\n\n Raises:\n KeyboardInterrupt: If the miner is stopped by a manual interruption.\n Exception: For unforeseen errors during the miner's operation, which are logged for diagnosis.\n \"\"\"\n block_handler_substrate = SubstrateInterface(\n ss58_format=bt.__ss58_format__,\n use_remote_preset=True,\n url=self.subtensor.chain_endpoint,\n type_registry=bt.__type_registry__,\n )\n\n netuid = self.config.netuid\n\n # --- Check for registration.\n if not self.subtensor.is_hotkey_registered(\n netuid=netuid,\n hotkey_ss58=self.wallet.hotkey.ss58_address,\n ):\n bt.logging.error(\n f\"Wallet: {self.wallet} is not registered on netuid {netuid}\"\n f\"Please register the hotkey using `btcli subnets register` before trying again\"\n )\n exit()\n\n tempo = block_handler_substrate.query(\n module=\"SubtensorModule\", storage_function=\"Tempo\", params=[netuid]\n ).value\n\n last_extrinsic_hash = None\n checked_extrinsics_count = 0\n should_retry = False\n\n def handler(obj, update_nr, subscription_id):\n current_block = obj[\"header\"][\"number\"]\n block_hash = block_handler_substrate.get_block_hash(current_block)\n bt.logging.debug(f\"New block #{current_block}\")\n\n bt.logging.debug(\n f\"Blocks since epoch: {(current_block + netuid + 1) % (tempo + 1)}\"\n )\n\n nonlocal last_extrinsic_hash\n nonlocal checked_extrinsics_count\n nonlocal should_retry\n\n if last_extrinsic_hash != None:\n try:\n receipt = block_handler_substrate.retrieve_extrinsic_by_hash(\n block_hash, last_extrinsic_hash\n )\n bt.logging.debug(\n f\"Last set-weights call: {'Success' if receipt.is_success else format('Failure, reason: %s', receipt.error_message['name'] if receipt.error_message != None else 'nil')}\"\n )\n\n should_retry = False\n last_extrinsic_hash = None\n checked_extrinsics_count = 0\n except Exception as e:\n checked_extrinsics_count += 1\n bt.logging.debug(f\"An error occurred, extrinsic not found in block.\")\n finally:\n if checked_extrinsics_count >= 20:\n should_retry = True\n last_extrinsic_hash = None\n checked_extrinsics_count = 0\n\n if ((current_block + netuid + 1) % (tempo + 1) == 0) or should_retry:\n bt.logging.info(\n f\"New epoch started, setting weights at block {current_block}\"\n )\n with self.subtensor.substrate as substrate:\n call = substrate.compose_call(\n call_module=\"SubtensorModule\",\n call_function=\"set_weights\",\n call_params={\n \"dests\": [self.my_subnet_uid],\n \"weights\": [65535],\n \"netuid\": netuid,\n \"version_key\": 1,\n },\n )\n\n # Period dictates how long the extrinsic will stay as part of waiting pool\n extrinsic = substrate.create_signed_extrinsic(\n call=call, keypair=self.wallet.hotkey, era={\"period\": 1000}\n )\n\n dry_run = runtime_call(\n substrate=substrate,\n api=\"TaggedTransactionQueue\",\n method=\"validate_transaction\",\n params=[\"InBlock\", extrinsic, block_hash],\n block_hash=block_hash,\n )\n bt.logging.debug(dry_run)\n\n response = substrate.submit_extrinsic(\n extrinsic,\n wait_for_inclusion=False,\n wait_for_finalization=False,\n )\n\n result_data = substrate.rpc_request(\"author_pendingExtrinsics\", [])\n for extrinsic_data in result_data[\"result\"]:\n extrinsic = substrate.runtime_config.create_scale_object(\n \"Extrinsic\", metadata=substrate.metadata\n )\n extrinsic.decode(\n ScaleBytes(extrinsic_data),\n check_remaining=substrate.config.get(\"strict_scale_decode\"),\n )\n\n if extrinsic.value[\"extrinsic_hash\"] == response.extrinsic_hash:\n bt.logging.debug(\n \"Weights transaction is in the pending transaction pool\"\n )\n\n last_extrinsic_hash = response.extrinsic_hash\n should_retry = False\n\n # --- Update the miner storage information periodically.\n if not should_retry:\n update_storage_stats(self)\n bt.logging.debug(\"Storage statistics updated...\")\n\n if self.should_exit:\n return True\n\n block_handler_substrate.subscribe_block_headers(handler)" }, { "identifier": "set_weights", "path": "storage/miner/set_weights.py", "snippet": "def set_weights_for_miner(\n subtensor: \"bt.subtensor\",\n netuid: int,\n uid: int,\n wallet: \"bt.wallet\",\n metagraph: \"bt.metagraph\",\n wandb_on: bool = False,\n tempo: int = 360,\n wait_for_inclusion: bool = False,\n wait_for_finalization: bool = False,\n) -> bool:" }, { "identifier": "compute_subsequent_commitment", "path": "storage/miner/utils.py", "snippet": "def compute_subsequent_commitment(data, previous_seed, new_seed, verbose=False):\n \"\"\"\n Computes a new commitment based on provided data and a change from an old seed to a new seed.\n This function is typically used in cryptographic operations to update commitments without\n altering the underlying data.\n\n Parameters:\n - data: The original data for which the commitment is being updated.\n - previous_seed: The seed used in the previous commitment.\n - new_seed: The seed to be used for the new commitment.\n - verbose (bool): If True, additional debug information will be printed. Defaults to False.\n\n Returns:\n - A tuple containing the new commitment and the proof of the old commitment.\n\n If verbose is set to True, debug information about the types and contents of the parameters\n will be printed to aid in debugging.\n \"\"\"\n if verbose:\n bt.logging.debug(\"IN COMPUTE SUBESEQUENT COMMITMENT\")\n bt.logging.debug(\"type of data :\", type(data))\n bt.logging.debug(\"type of prev_seed:\", type(previous_seed))\n bt.logging.debug(\"type of new_seed :\", type(new_seed))\n proof = hash_data(data + previous_seed)\n return hash_data(str(proof).encode(\"utf-8\") + new_seed), proof" }, { "identifier": "save_data_to_filesystem", "path": "storage/miner/utils.py", "snippet": "def save_data_to_filesystem(data, directory, filename):\n \"\"\"\n Saves data to the filesystem at the specified directory and filename. If the directory does\n not exist, it is created.\n\n Parameters:\n - data: The data to be saved.\n - directory (str): The directory path where the data should be saved.\n - filename (str): The name of the file to save the data in.\n\n Returns:\n - file_path (str): The full path to the saved file.\n\n This function is useful for persisting data to the disk.\n \"\"\"\n # Ensure the directory exists\n directory = os.path.expanduser(directory)\n os.makedirs(directory, exist_ok=True)\n file_path = os.path.join(directory, filename)\n with open(file_path, \"wb\") as file:\n file.write(data)\n return file_path" }, { "identifier": "load_from_filesystem", "path": "storage/miner/utils.py", "snippet": "def load_from_filesystem(filepath):\n \"\"\"\n Loads data from a file in the filesystem.\n\n Parameters:\n - filepath (str): The path to the file from which data is to be loaded.\n\n Returns:\n - data: The data read from the file.\n\n This function is a straightforward utility for reading binary data from a file.\n \"\"\"\n with open(os.path.expanduser(filepath), \"rb\") as file:\n data = file.read()\n return data" }, { "identifier": "commit_data_with_seed", "path": "storage/miner/utils.py", "snippet": "def commit_data_with_seed(committer, data_chunks, n_chunks, seed):\n \"\"\"\n Commits chunks of data with a seed using a Merkle tree structure to create a proof of\n integrity for each chunk. This function is used in environments where the integrity\n and order of data need to be verifiable.\n\n Parameters:\n - committer: The committing object, which should have a commit method.\n - data_chunks (list): A list of data chunks to be committed.\n - n_chunks (int): The number of chunks expected to be committed.\n - seed: A seed value that is combined with data chunks before commitment.\n\n Returns:\n - randomness (list): A list of randomness values associated with each data chunk's commitment.\n - chunks (list): The list of original data chunks that were committed.\n - points (list): A list of commitment points in hex format.\n - merkle_tree (MerkleTree): A Merkle tree constructed from the commitment points.\n\n This function handles the conversion of commitment points to hex format and adds them to the\n Merkle tree. The completed tree represents the combined commitments.\n \"\"\"\n merkle_tree = MerkleTree()\n\n # Commit each chunk of data\n randomness, chunks, points = [None] * n_chunks, [None] * n_chunks, [None] * n_chunks\n for index, chunk in enumerate(data_chunks):\n c, m_val, r = committer.commit(chunk + str(seed).encode())\n c_hex = ecc_point_to_hex(c)\n randomness[index] = r\n chunks[index] = chunk\n points[index] = c_hex\n merkle_tree.add_leaf(c_hex)\n\n # Create the tree from the leaves\n merkle_tree.make_tree()\n return randomness, chunks, points, merkle_tree" }, { "identifier": "init_wandb", "path": "storage/miner/utils.py", "snippet": "def init_wandb(self, reinit=False):\n \"\"\"Starts a new wandb run.\"\"\"\n tags = [\n self.wallet.hotkey.ss58_address,\n storage.__version__,\n str(storage.__spec_version__),\n f\"netuid_{self.metagraph.netuid}\",\n ]\n\n if self.config.mock:\n tags.append(\"mock\")\n\n wandb_config = {\n key: copy.deepcopy(self.config.get(key, None))\n for key in (\"neuron\", \"reward\", \"netuid\", \"wandb\")\n }\n\n if wandb_config[\"neuron\"] is not None:\n wandb_config[\"neuron\"].pop(\"full_path\", None)\n\n self.wandb = wandb.init(\n anonymous=\"allow\",\n reinit=reinit,\n project=self.config.wandb.project_name,\n entity=self.config.wandb.entity,\n config=wandb_config,\n mode=\"offline\" if self.config.wandb.offline else \"online\",\n dir=self.config.neuron.full_path\n if self.config.neuron is not None\n else \"wandb_logs\",\n tags=tags,\n notes=self.config.wandb.notes,\n )\n bt.logging.success(\n prefix=\"Started a new wandb run\",\n sufix=f\"<blue> {self.wandb.name} </blue>\",\n )" }, { "identifier": "get_directory_size", "path": "storage/miner/utils.py", "snippet": "def get_directory_size(path):\n \"\"\"\n Calculates the total size of files in a specified directory.\n\n This function traverses the directory at the given path, including all subdirectories, and sums up the size\n of each file to calculate the total directory size.\n\n Args:\n path (str): The file path of the directory whose size is to be calculated.\n\n Returns:\n int: The total size of the directory in bytes (B).\n\n Usage:\n directory_size_gb = get_directory_size('/path/to/directory')\n \"\"\"\n total_size = 0\n path = os.path.expanduser(path)\n for dirpath, dirnames, filenames in os.walk(path):\n for f in filenames:\n fp = os.path.join(dirpath, f)\n if not os.path.islink(fp):\n total_size += os.path.getsize(fp)\n return total_size" }, { "identifier": "get_free_disk_space", "path": "storage/miner/utils.py", "snippet": "def get_free_disk_space(path=\".\"):\n \"\"\"\n Retrieves the free disk space for the drive containing the specified path.\n\n This function provides the free disk space of the drive on which the specified path resides.\n It's useful for understanding the storage capacity and usage of the system where the miner is running.\n\n Args:\n path (str): A file path on the drive whose free disk space is to be fetched. Typically, you can\n provide the root path ('/') to get the stats for the primary drive.\n\n Returns:\n int: The free space on the disk in bytes (B).\n\n Usage:\n free_disk_space_gb = get_free_disk_space('/')\n \"\"\"\n stats = get_disk_space_stats(path)\n free = stats.get(\"free_bytes\", 0)\n return free" }, { "identifier": "update_storage_stats", "path": "storage/miner/utils.py", "snippet": "def update_storage_stats(self):\n \"\"\"\n Updates the miner's storage statistics.\n\n This function updates the miner's storage statistics, including the free disk space, current storage usage,\n and percent disk usage. It's useful for understanding the storage capacity and usage of the system where\n the miner is running.\n \"\"\"\n\n self.free_memory = get_free_disk_space()\n bt.logging.info(f\"Free memory: {self.free_memory} bytes\")\n self.current_storage_usage = get_directory_size(self.config.database.directory)\n bt.logging.info(f\"Miner storage usage: {self.current_storage_usage} bytes\")\n self.percent_disk_usage = self.current_storage_usage / self.free_memory\n bt.logging.info(f\"Miner % disk usage : {100 * self.percent_disk_usage:.3f}%\")" }, { "identifier": "config", "path": "storage/miner/config.py", "snippet": "def config(cls):\n parser = argparse.ArgumentParser()\n bt.subtensor.add_args(parser)\n bt.logging.add_args(parser)\n bt.wallet.add_args(parser)\n bt.axon.add_args(parser)\n cls.add_args(parser)\n return bt.config(parser)" }, { "identifier": "check_config", "path": "storage/miner/config.py", "snippet": "def check_config(cls, config: \"bt.Config\"):\n r\"\"\"Checks/validates the config namespace object.\"\"\"\n bt.logging.check_config(config)\n\n if config.mock:\n config.wallet._mock = True\n\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n full_path = os.path.expanduser(\n \"{}/{}/{}/netuid{}/{}\".format(\n config.logging.logging_dir,\n config.wallet.name,\n config.wallet.hotkey,\n config.netuid,\n config.miner.name,\n )\n )\n log_path = os.path.join(full_path, \"logs\", timestamp)\n\n config.miner.log_path = os.path.expanduser(log_path)\n config.miner.full_path = os.path.expanduser(full_path)\n\n if not os.path.exists(config.miner.full_path):\n os.makedirs(config.miner.full_path, exist_ok=True)\n if not os.path.exists(config.miner.log_path):\n os.makedirs(config.miner.log_path, exist_ok=True)\n\n if not config.miner.dont_save_events:\n # Add custom event logger for the events.\n logger.level(\"EVENTS\", no=38, icon=\"📝\")\n logger.add(\n config.miner.full_path + \"/\" + \"EVENTS.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"EVENTS\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.miner.full_path + \"/\" + \"INFO.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"INFO\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.miner.full_path + \"/\" + \"DEBUG.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"DEBUG\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )\n\n logger.add(\n config.miner.full_path + \"/\" + \"TRACE.log\",\n rotation=config.miner.events_retention_size,\n serialize=True,\n enqueue=True,\n backtrace=False,\n diagnose=False,\n level=\"TRACE\",\n format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\",\n )" }, { "identifier": "add_args", "path": "storage/miner/config.py", "snippet": "def add_args(cls, parser):\n parser.add_argument(\"--netuid\", type=int, default=21, help=\"The chain subnet uid.\")\n parser.add_argument(\"--test\", default=False, action=\"store_true\")\n parser.add_argument(\n \"--miner.name\",\n type=str,\n help=\"Trials for this miner go in miner.root / (wallet_cold - wallet_hot) / miner.name. \",\n default=\"core_storage_miner\",\n )\n parser.add_argument(\n \"--miner.device\",\n type=str,\n help=\"Device to run the validator on.\",\n default=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n )\n parser.add_argument(\"--miner.verbose\", default=False, action=\"store_true\")\n\n parser.add_argument(\n \"--database.host\", default=\"localhost\", help=\"The host of the redis database.\"\n )\n parser.add_argument(\n \"--database.port\",\n type=int,\n default=6379,\n help=\"The port of the redis database.\",\n )\n parser.add_argument(\n \"--database.index\",\n type=int,\n default=0,\n help=\"The index of the redis database.\",\n )\n parser.add_argument(\n \"--database.directory\",\n default=\"~/.data\",\n help=\"The directory to store data in.\",\n )\n\n # Run config.\n parser.add_argument(\n \"--miner.set_weights_wait_for_inclusion\",\n action=\"store_true\",\n help=\"Wether to wait for the set_weights extrinsic to enter a block\",\n default=False,\n )\n parser.add_argument(\n \"--miner.set_weights_wait_for_finalization\",\n action=\"store_true\",\n help=\"Wether to wait for the set_weights extrinsic to be finalized on the chain\",\n default=False,\n )\n parser.add_argument(\n \"--miner.seconds_to_wait_to_log_presence_message\",\n type=int,\n help=\"How many seconds to wait before logging a presence message.\",\n default=4,\n )\n\n # Blacklist.\n parser.add_argument(\n \"--miner.blacklist.blacklist\",\n type=str,\n required=False,\n nargs=\"*\",\n help=\"Blacklist certain hotkeys\",\n default=[],\n )\n parser.add_argument(\n \"--miner.blacklist.whitelist\",\n type=str,\n required=False,\n nargs=\"*\",\n help=\"Whitelist certain hotkeys\",\n default=[],\n )\n parser.add_argument(\n \"--miner.blacklist.force_validator_permit\",\n action=\"store_true\",\n help=\"Only allow requests from validators\",\n default=False,\n )\n parser.add_argument(\n \"--miner.blacklist.allow_non_registered\",\n action=\"store_true\",\n help=\"If True, the miner will allow non-registered hotkeys to mine.\",\n default=False,\n )\n parser.add_argument(\n \"--miner.blacklist.minimum_stake_requirement\",\n type=float,\n help=\"Minimum stake requirement\",\n default=0.0,\n )\n parser.add_argument(\n \"--miner.blacklist.min_request_period\",\n type=int,\n help=\"Time period (in minute) to serve a maximum of 50 requests for each hotkey\",\n default=5,\n )\n\n # Priority.\n parser.add_argument(\n \"--miner.priority.default\",\n type=float,\n help=\"Default priority of non-registered requests\",\n default=0.0,\n )\n parser.add_argument(\n \"--miner.priority.time_stake_multiplicate\",\n type=int,\n help=\"Time (in minute) it takes to make the stake twice more important in the priority queue\",\n default=10,\n )\n parser.add_argument(\n \"--miner.priority.len_request_timestamps\",\n type=int,\n help=\"Number of historic request timestamps to record\",\n default=50,\n )\n # Switches.\n parser.add_argument(\n \"--miner.no_set_weights\",\n action=\"store_true\",\n help=\"If True, the miner does not set weights.\",\n default=False,\n )\n parser.add_argument(\n \"--miner.no_serve\",\n action=\"store_true\",\n help=\"If True, the miner doesnt serve the axon.\",\n default=False,\n )\n parser.add_argument(\n \"--miner.no_start_axon\",\n action=\"store_true\",\n help=\"If True, the miner doesnt start the axon.\",\n default=False,\n )\n\n # Mocks.\n parser.add_argument(\n \"--miner.mock_subtensor\",\n action=\"store_true\",\n help=\"If True, the miner will allow non-registered hotkeys to mine.\",\n default=False,\n )\n\n # Wandb args\n parser.add_argument(\n \"--wandb.off\", action=\"store_true\", help=\"Turn off wandb.\", default=False\n )\n parser.add_argument(\n \"--wandb.project_name\",\n type=str,\n help=\"The name of the project where you are sending the new run.\",\n default=\"philanthropic-thunder\",\n )\n parser.add_argument(\n \"--wandb.entity\",\n type=str,\n help=\"An entity is a username or team name where youre sending runs.\",\n default=\"philanthrope\",\n )\n parser.add_argument(\n \"--wandb.offline\",\n action=\"store_true\",\n help=\"Runs wandb in offline mode.\",\n default=False,\n )\n parser.add_argument(\n \"--wandb.weights_step_length\",\n type=int,\n help=\"How many steps before we log the weights.\",\n default=10,\n )\n parser.add_argument(\n \"--wandb.run_step_length\",\n type=int,\n help=\"How many steps before we rollover to a new run.\",\n default=1500,\n )\n parser.add_argument(\n \"--wandb.notes\",\n type=str,\n help=\"Notes to add to the wandb run.\",\n default=\"\",\n )" }, { "identifier": "store_chunk_metadata", "path": "storage/miner/database.py", "snippet": "async def store_chunk_metadata(r, chunk_hash, filepath, hotkey, size, seed):\n \"\"\"\n Stores the metadata of a chunk in a Redis database.\n\n Args:\n r (redis.Redis): The Redis connection instance.\n chunk_hash (str): The unique hash identifying the chunk.\n hotkey (str): Miner hotkey associated with the chunk.\n size (int): The size of the chunk.\n seed (str): The seed associated with the chunk.\n\n This function stores the filepath, size (as a string), and seed for the given chunk hash.\n \"\"\"\n # Ensure that all data are in the correct format\n metadata = {\n \"filepath\": filepath,\n \"hotkey\": hotkey,\n \"size\": str(size), # Convert size to string\n \"seed\": seed, # Store seed directly\n }\n\n # Use hmset (or hset which is its modern equivalent) to store the hash\n for key, value in metadata.items():\n await r.hset(chunk_hash, key, value)" }, { "identifier": "update_seed_info", "path": "storage/miner/database.py", "snippet": "async def update_seed_info(r, chunk_hash, hotkey, seed):\n \"\"\"\n Updates the seed information for a specific chunk in the Redis database.\n\n Args:\n r (redis.Redis): The Redis connection instance.\n chunk_hash (str): The unique hash identifying the chunk.\n hotkey (str): The caller hotkey value to be updated.\n seed (str): The new seed value to be updated.\n\n This function updates the seed information for the specified chunk hash.\n \"\"\"\n # Update the existing seed information\n await r.hset(chunk_hash, \"seed\", seed)\n await r.hset(chunk_hash, \"hotkey\", hotkey)" }, { "identifier": "get_chunk_metadata", "path": "storage/miner/database.py", "snippet": "async def get_chunk_metadata(r, chunk_hash):\n \"\"\"\n Retrieves the metadata for a specific chunk from the Redis database.\n\n Args:\n r (redis.Redis): The Redis connection instance.\n chunk_hash (str): The unique hash identifying the chunk.\n\n Returns:\n dict: A dictionary containing the chunk's metadata, including filepath, size, and seed.\n Size is converted to an integer, and seed is decoded from bytes to a string.\n \"\"\"\n metadata = await r.hgetall(chunk_hash)\n if metadata:\n metadata[b\"size\"] = int(metadata[b\"size\"])\n metadata[b\"seed\"] = metadata[b\"seed\"].decode(\"utf-8\")\n return metadata" } ]
import os import sys import copy import json import time import torch import typing import base64 import asyncio import aioredis import argparse import threading import traceback import bittensor as bt import storage from collections import defaultdict from Crypto.Random import get_random_bytes from typing import Dict from pprint import pprint, pformat from storage.shared.ecc import ( hash_data, setup_CRS, ECCommitment, ecc_point_to_hex, hex_to_ecc_point, ) from storage.shared.merkle import ( MerkleTree, ) from storage.shared.utils import b64_encode, b64_decode, chunk_data, safe_key_search from storage.miner import ( run, set_weights, ) from storage.miner.utils import ( compute_subsequent_commitment, save_data_to_filesystem, load_from_filesystem, commit_data_with_seed, init_wandb, get_directory_size, get_free_disk_space, update_storage_stats, ) from storage.miner.config import ( config, check_config, add_args, ) from storage.miner.database import ( store_chunk_metadata, update_seed_info, get_chunk_metadata, )
14,993
""" caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def retrieve_blacklist_fn( self, synapse: storage.protocol.Retrieve ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()")
# The MIT License (MIT) # Copyright © 2023 Yuma Rao # Copyright © 2023 philanthrope # Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated # documentation files (the “Software”), to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all copies or substantial portions of # the Software. # THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO # THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # import this repo class miner: @classmethod def check_config(cls, config: "bt.Config"): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ check_config(cls, config) @classmethod def add_args(cls, parser): """ Adds neuron-specific arguments to the argument parser. Args: parser (argparse.ArgumentParser): Parser to add arguments to. This class method enriches the argument parser with options specific to the neuron's configuration. """ add_args(cls, parser) @classmethod def config(cls): """ Retrieves the configuration for the neuron. Returns: bt.Config: The configuration object for the neuron. This class method returns the neuron's configuration, which is used throughout the neuron's lifecycle for various functionalities and operations. """ return config(cls) subtensor: "bt.subtensor" wallet: "bt.wallet" metagraph: "bt.metagraph" def __init__(self): self.config = miner.config() self.check_config(self.config) bt.logging(config=self.config, logging_dir=self.config.miner.full_path) bt.logging.info(f"{self.config}") bt.logging.info("miner.__init__()") # Init device. bt.logging.debug("loading device") self.device = torch.device(self.config.miner.device) bt.logging.debug(str(self.device)) # Init subtensor bt.logging.debug("loading subtensor") self.subtensor = bt.subtensor(config=self.config) bt.logging.debug(str(self.subtensor)) self.current_block = self.subtensor.get_current_block() # Init wallet. bt.logging.debug("loading wallet") self.wallet = bt.wallet(config=self.config) self.wallet.create_if_non_existent() if not self.config.wallet._mock: if not self.subtensor.is_hotkey_registered_on_subnet( hotkey_ss58=self.wallet.hotkey.ss58_address, netuid=self.config.netuid ): raise Exception( f"Wallet not currently registered on netuid {self.config.netuid}, please first register wallet before running" ) bt.logging.debug(f"wallet: {str(self.wallet)}") # Init metagraph. bt.logging.debug("loading metagraph") self.metagraph = bt.metagraph( netuid=self.config.netuid, network=self.subtensor.network, sync=False ) # Make sure not to sync without passing subtensor self.metagraph.sync(subtensor=self.subtensor) # Sync metagraph with subtensor. bt.logging.debug(str(self.metagraph)) # Setup database self.database = aioredis.StrictRedis( host=self.config.database.host, port=self.config.database.port, db=self.config.database.index, socket_keepalive=True, socket_connect_timeout=300, ) self.my_subnet_uid = self.metagraph.hotkeys.index( self.wallet.hotkey.ss58_address ) bt.logging.info(f"Running miner on uid: {self.my_subnet_uid}") # Init wandb. if not self.config.wandb.off: bt.logging.debug("loading wandb") init_wandb(self) # The axon handles request processing, allowing validators to send this process requests. self.axon = bt.axon(wallet=self.wallet, config=self.config) bt.logging.info(f"Axon {self.axon}") # Attach determiners which functions are called when servicing a request. bt.logging.info(f"Attaching forward functions to axon.") self.axon.attach( forward_fn=self.store, blacklist_fn=self.store_blacklist_fn, priority_fn=self.store_priority_fn, ).attach( forward_fn=self.challenge, blacklist_fn=self.challenge_blacklist_fn, priority_fn=self.challenge_priority_fn, ).attach( forward_fn=self.retrieve, blacklist_fn=self.retrieve_blacklist_fn, priority_fn=self.retrieve_priority_fn, ) # Serve passes the axon information to the network + netuid we are hosting on. # This will auto-update if the axon port of external ip have changed. bt.logging.info( f"Serving axon {self.axon} on network: {self.subtensor.chain_endpoint} with netuid: {self.config.netuid}" ) self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor) # Start starts the miner's axon, making it active on the network. bt.logging.info(f"Starting axon server on port: {self.config.axon.port}") self.axon.start() # Init the event loop. self.loop = asyncio.get_event_loop() # Instantiate runners self.should_exit: bool = False self.is_running: bool = False self.thread: threading.Thread = None self.lock = asyncio.Lock() self.request_timestamps: Dict = {} self.step = 0 # Init the miner's storage request tracker self.request_count = 0 self.start_request_count_timer() self.requests_per_hour = [] self.average_requests_per_hour = 0 # Init the miner's storage usage tracker update_storage_stats(self) def start_request_count_timer(self): """ Initializes and starts a timer for tracking the number of requests received by the miner in an hour. This method sets up a one-hour timer that, upon expiration, calls the `reset_request_count` method to log the number of requests received and reset the count for the next hour. The timer is set to run in a separate thread to avoid blocking the main execution. Usage: Should be called during the initialization of the miner to start tracking requests per hour. """ self.request_count_timer = threading.Timer(3600, self.reset_request_count) self.request_count_timer.start() def reset_request_count(self): """ Logs the number of requests received in the last hour and resets the count. This method is automatically called when the one-hour timer set by `start_request_count_timer` expires. It logs the count of requests received in the last hour and then resets the count. Additionally, it restarts the timer for the next hour. Usage: This method is intended to be called automatically by a timer and typically should not be called directly. """ bt.logging.info( f"Number of requests received in the last hour: {self.request_count}" ) self.requests_per_hour.append(self.request_count) bt.logging.info(f"Requests per hour: {self.requests_per_hour}") self.average_requests_per_hour = sum(self.requests_per_hour) / len( self.requests_per_hour ) bt.logging.info(f"Average requests per hour: {self.average_requests_per_hour}") self.request_count = 0 self.start_request_count_timer() @property async def total_storage(self): """ Calculates the total size of data stored by the miner. This method fetches all data keys from the Redis database and sums up the size of each data object. It provides an estimate of the total amount of data currently held by the miner. Returns: int: Total size of data (in bytes) stored by the miner. Example: >>> miner.total_storage() 102400 # Example output indicating 102,400 bytes of data stored """ # Fetch all keys from Redis all_keys = await safe_key_search(self.database, "*") # Filter out keys that contain a period (temporary, remove later) filtered_keys = [key for key in all_keys if b"." not in key] # Get the size of each data object and sum them up total_size = sum( [ await get_chunk_metadata(self.database, key).get(b"size", 0) for key in filtered_keys ] ) return total_size def store_blacklist_fn( self, synapse: storage.protocol.Store ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def store_priority_fn(self, synapse: storage.protocol.Store) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def challenge_blacklist_fn( self, synapse: storage.protocol.Challenge ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def challenge_priority_fn(self, synapse: storage.protocol.Challenge) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority def retrieve_blacklist_fn( self, synapse: storage.protocol.Retrieve ) -> typing.Tuple[bool, str]: """ Determines whether a given synapse should be blacklisted based on the recognition of the hotkey in the metagraph. This function is used to filter out requests from entities that are not part of the network's current state. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey. Returns: - (bool, str): A tuple where the first element is a boolean indicating whether the synapse's hotkey is blacklisted, and the second element is a string message explaining the reason. If the hotkey is not recognized in the metagraph, the synapse is blacklisted, and the function returns (True, "Unrecognized hotkey"). Otherwise, it returns (False, "Hotkey recognized!"), allowing the synapse to interact with the network. Usage: This method is internally used by the network to ensure that only recognized entities can participate in communication or transactions. """ if synapse.dendrite.hotkey not in self.metagraph.hotkeys: # Ignore requests from unrecognized entities. bt.logging.trace( f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}" ) return True, "Unrecognized hotkey" bt.logging.trace( f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}" ) return False, "Hotkey recognized!" def retrieve_priority_fn(self, synapse: storage.protocol.Retrieve) -> float: """ Assigns a priority to a given synapse based on the stake of the calling entity in the metagraph. This function is crucial for prioritizing network requests and ensuring that higher-stake entities are given precedence in processing. Parameters: - synapse (bt.Synapse): The synapse object which contains the dendrite information including the hotkey of the caller. Returns: - float: The priority value assigned to the synapse, derived from the stake of the calling hotkey in the metagraph. The priority is determined by the stake associated with the caller's UID in the metagraph. A higher stake results in a higher priority. Usage: This method is used within the network's request handling mechanism to allocate resources and processing time based on the stake-based priority of each request. """ caller_uid = self.metagraph.hotkeys.index( synapse.dendrite.hotkey ) # Get the caller index. prirority = float( self.metagraph.S[caller_uid] ) # Return the stake as the priority. bt.logging.trace( f"Prioritizing {synapse.dendrite.hotkey} with value: ", prirority ) return prirority async def store(self, synapse: storage.protocol.Store) -> storage.protocol.Store: """ Processes the storage request from a synapse by securely storing the provided data and returning a proof of storage. The data is committed using elliptic curve cryptography, stored on the filesystem, and the metadata is recorded in a Redis database. A cryptographic proof of the commitment, along with a digital signature from the server's hotkey, is returned in the synapse for verification by the requester. Args: synapse (storage.protocol.Store): An object containing the data to be stored, encoded in base64 format, along with associated metadata like the cryptographic curve parameters, a seed for the commitment, and the expected commitment group elements. Returns: storage.protocol.Store: The synapse is returned with additional fields populated, including the randomness used in the commitment, the commitment point itself, a signature from this storage server's hotkey, and a commitment hash that can be used for chained proofs. The method performs the following operations: 1. Decodes the base64-encoded data into raw bytes. 2. Commits to the data using the provided elliptic curve parameters and the seed to generate a commitment point. 3. Stores the raw byte data in the filesystem using a hash of the data as the filename. 4. Records metadata about the stored data in the Redis database, including the file path, previous seed, and data size. 5. Updates the synapse object with the commitment details and a digital signature. This process ensures the integrity and non-repudiation of the data storage, allowing clients to verify that their data has been stored correctly without the need to retrieve the full data set. Example usage: Assuming an initialized 'committer' object and 'synapse' with necessary data: >>> updated_synapse = self.store(synapse) """ bt.logging.info(f"received store request: {synapse.encrypted_data[:24]}") self.request_count += 1 # Decode the data from base64 to raw bytes encrypted_byte_data = base64.b64decode(synapse.encrypted_data) bt.logging.trace(f"store b64decrypted data: {encrypted_byte_data[:24]}") # Store the data with the hash as the key in the filesystem bt.logging.trace(f"entering hash_data()")
data_hash = hash_data(encrypted_byte_data)
0
2023-10-26 18:54:47+00:00
24k
cpacker/MemGPT
memgpt/agent.py
[ { "identifier": "AgentState", "path": "memgpt/data_types.py", "snippet": "class AgentState:\n def __init__(\n self,\n name: str,\n user_id: uuid.UUID,\n persona: str, # the filename where the persona was originally sourced from\n human: str, # the filename where the human was originally sourced from\n llm_config: LLMConfig,\n embedding_config: EmbeddingConfig,\n preset: str,\n # (in-context) state contains:\n # persona: str # the current persona text\n # human: str # the current human text\n # system: str, # system prompt (not required if initializing with a preset)\n # functions: dict, # schema definitions ONLY (function code linked at runtime)\n # messages: List[dict], # in-context messages\n id: Optional[uuid.UUID] = None,\n state: Optional[dict] = None,\n created_at: Optional[str] = None,\n ):\n if id is None:\n self.id = uuid.uuid4()\n else:\n self.id = id\n assert isinstance(self.id, uuid.UUID), f\"UUID {self.id} must be a UUID type\"\n assert isinstance(user_id, uuid.UUID), f\"UUID {user_id} must be a UUID type\"\n\n # TODO(swooders) we need to handle the case where name is None here\n # in AgentConfig we autogenerate a name, not sure what the correct thing w/ DBs is, what about NounAdjective combos? Like giphy does? BoredGiraffe etc\n self.name = name\n self.user_id = user_id\n self.preset = preset\n self.persona = persona\n self.human = human\n\n self.llm_config = llm_config\n self.embedding_config = embedding_config\n\n self.created_at = created_at if created_at is not None else datetime.now()\n\n # state\n self.state = {} if not state else state" }, { "identifier": "Message", "path": "memgpt/data_types.py", "snippet": "class Message(Record):\n \"\"\"Representation of a message sent.\n\n Messages can be:\n - agent->user (role=='agent')\n - user->agent and system->agent (role=='user')\n - or function/tool call returns (role=='function'/'tool').\n \"\"\"\n\n def __init__(\n self,\n user_id: uuid.UUID,\n agent_id: uuid.UUID,\n role: str,\n text: str,\n model: Optional[str] = None, # model used to make function call\n name: Optional[str] = None, # optional participant name\n created_at: Optional[str] = None,\n tool_calls: Optional[List[ToolCall]] = None, # list of tool calls requested\n tool_call_id: Optional[str] = None,\n embedding: Optional[np.ndarray] = None,\n id: Optional[uuid.UUID] = None,\n ):\n super().__init__(id)\n self.user_id = user_id\n self.agent_id = agent_id\n self.text = text\n self.model = model # model name (e.g. gpt-4)\n self.created_at = datetime.now().astimezone() if created_at is None else created_at\n\n # openai info\n assert role in [\"system\", \"assistant\", \"user\", \"tool\"]\n self.role = role # role (agent/user/function)\n self.name = name\n\n # tool (i.e. function) call info (optional)\n\n # if role == \"assistant\", this MAY be specified\n # if role != \"assistant\", this must be null\n assert tool_calls is None or isinstance(tool_calls, list)\n self.tool_calls = tool_calls\n\n # if role == \"tool\", then this must be specified\n # if role != \"tool\", this must be null\n if role == \"tool\":\n assert tool_call_id is not None\n else:\n assert tool_call_id is None\n self.tool_call_id = tool_call_id\n\n # embedding (optional)\n self.embedding = embedding\n\n # def __repr__(self):\n # pass\n\n @staticmethod\n def dict_to_message(\n user_id: uuid.UUID,\n agent_id: uuid.UUID,\n openai_message_dict: dict,\n model: Optional[str] = None, # model used to make function call\n allow_functions_style: bool = False, # allow deprecated functions style?\n ):\n \"\"\"Convert a ChatCompletion message object into a Message object (synced to DB)\"\"\"\n\n # If we're going from deprecated function form\n if openai_message_dict[\"role\"] == \"function\":\n if not allow_functions_style:\n raise DeprecationWarning(openai_message_dict)\n assert \"tool_call_id\" in openai_message_dict, openai_message_dict\n\n # Convert from 'function' response to a 'tool' response\n # NOTE: this does not conventionally include a tool_call_id, it's on the caster to provide it\n return Message(\n user_id=user_id,\n agent_id=agent_id,\n model=model,\n # standard fields expected in an OpenAI ChatCompletion message object\n role=\"tool\", # NOTE\n text=openai_message_dict[\"content\"],\n name=openai_message_dict[\"name\"] if \"name\" in openai_message_dict else None,\n tool_calls=openai_message_dict[\"tool_calls\"] if \"tool_calls\" in openai_message_dict else None,\n tool_call_id=openai_message_dict[\"tool_call_id\"] if \"tool_call_id\" in openai_message_dict else None,\n )\n\n elif \"function_call\" in openai_message_dict and openai_message_dict[\"function_call\"] is not None:\n if not allow_functions_style:\n raise DeprecationWarning(openai_message_dict)\n assert openai_message_dict[\"role\"] == \"assistant\", openai_message_dict\n assert \"tool_call_id\" in openai_message_dict, openai_message_dict\n\n # Convert a function_call (from an assistant message) into a tool_call\n # NOTE: this does not conventionally include a tool_call_id (ToolCall.id), it's on the caster to provide it\n tool_calls = [\n ToolCall(\n id=openai_message_dict[\"tool_call_id\"], # NOTE: unconventional source, not to spec\n tool_call_type=\"function\",\n function={\n \"name\": openai_message_dict[\"function_call\"][\"name\"],\n \"arguments\": openai_message_dict[\"function_call\"][\"arguments\"],\n },\n )\n ]\n\n return Message(\n user_id=user_id,\n agent_id=agent_id,\n model=model,\n # standard fields expected in an OpenAI ChatCompletion message object\n role=openai_message_dict[\"role\"],\n text=openai_message_dict[\"content\"],\n name=openai_message_dict[\"name\"] if \"name\" in openai_message_dict else None,\n tool_calls=tool_calls,\n tool_call_id=None, # NOTE: None, since this field is only non-null for role=='tool'\n )\n\n else:\n # Basic sanity check\n if openai_message_dict[\"role\"] == \"tool\":\n assert \"tool_call_id\" in openai_message_dict and openai_message_dict[\"tool_call_id\"] is not None, openai_message_dict\n else:\n if \"tool_call_id\" in openai_message_dict:\n assert openai_message_dict[\"tool_call_id\"] is None, openai_message_dict\n\n if \"tool_calls\" in openai_message_dict and openai_message_dict[\"tool_calls\"] is not None:\n assert openai_message_dict[\"role\"] == \"assistant\", openai_message_dict\n\n tool_calls = [\n ToolCall(id=tool_call[\"id\"], tool_call_type=tool_call[\"type\"], function=tool_call[\"function\"])\n for tool_call in openai_message_dict[\"tool_calls\"]\n ]\n else:\n tool_calls = None\n\n # If we're going from tool-call style\n return Message(\n user_id=user_id,\n agent_id=agent_id,\n model=model,\n # standard fields expected in an OpenAI ChatCompletion message object\n role=openai_message_dict[\"role\"],\n text=openai_message_dict[\"content\"],\n name=openai_message_dict[\"name\"] if \"name\" in openai_message_dict else None,\n tool_calls=tool_calls,\n tool_call_id=openai_message_dict[\"tool_call_id\"] if \"tool_call_id\" in openai_message_dict else None,\n )\n\n def to_openai_dict(self):\n \"\"\"Go from Message class to ChatCompletion message object\"\"\"\n\n # TODO change to pydantic casting, eg `return SystemMessageModel(self)`\n\n if self.role == \"system\":\n assert all([v is not None for v in [self.text, self.role]]), vars(self)\n openai_message = {\n \"content\": self.text,\n \"role\": self.role,\n }\n # Optional field, do not include if null\n if self.name is not None:\n openai_message[\"name\"] = self.name\n\n elif self.role == \"user\":\n assert all([v is not None for v in [self.text, self.role]]), vars(self)\n openai_message = {\n \"content\": self.text,\n \"role\": self.role,\n }\n # Optional field, do not include if null\n if self.name is not None:\n openai_message[\"name\"] = self.name\n\n elif self.role == \"assistant\":\n assert all([v is not None for v in [self.text, self.role]]), vars(self)\n openai_message = {\n \"content\": self.text,\n \"role\": self.role,\n }\n # Optional fields, do not include if null\n if self.name is not None:\n openai_message[\"name\"] = self.name\n if self.tool_calls is not None:\n openai_message[\"tool_calls\"] = [tool_call.to_dict() for tool_call in self.tool_calls]\n\n elif self.role == \"tool\":\n assert all([v is not None for v in [self.text, self.role, self.tool_call_id]]), vars(self)\n openai_message = {\n \"content\": self.text,\n \"role\": self.role,\n \"tool_call_id\": self.tool_call_id,\n }\n\n else:\n raise ValueError(self.role)\n\n return openai_message" }, { "identifier": "chat_completion_response", "path": "memgpt/models/chat_completion_response.py", "snippet": "class FunctionCall(BaseModel):\nclass ToolCall(BaseModel):\nclass LogProbToken(BaseModel):\nclass MessageContentLogProb(BaseModel):\nclass Message(BaseModel):\nclass Choice(BaseModel):\nclass UsageStatistics(BaseModel):\nclass ChatCompletionResponse(BaseModel):" }, { "identifier": "AgentInterface", "path": "memgpt/interface.py", "snippet": "class AgentInterface(ABC):\r\n \"\"\"Interfaces handle MemGPT-related events (observer pattern)\"\"\"\r\n\r\n @abstractmethod\r\n def user_message(self, msg):\r\n \"\"\"MemGPT receives a user message\"\"\"\r\n raise NotImplementedError\r\n\r\n @abstractmethod\r\n def internal_monologue(self, msg):\r\n \"\"\"MemGPT generates some internal monologue\"\"\"\r\n raise NotImplementedError\r\n\r\n @abstractmethod\r\n def assistant_message(self, msg):\r\n \"\"\"MemGPT uses send_message\"\"\"\r\n raise NotImplementedError\r\n\r\n @abstractmethod\r\n def function_message(self, msg):\r\n \"\"\"MemGPT calls a function\"\"\"\r\n raise NotImplementedError\r" }, { "identifier": "PersistenceManager", "path": "memgpt/persistence_manager.py", "snippet": "class PersistenceManager(ABC):\r\n @abstractmethod\r\n def trim_messages(self, num):\r\n pass\r\n\r\n @abstractmethod\r\n def prepend_to_messages(self, added_messages):\r\n pass\r\n\r\n @abstractmethod\r\n def append_to_messages(self, added_messages):\r\n pass\r\n\r\n @abstractmethod\r\n def swap_system_message(self, new_system_message):\r\n pass\r\n\r\n @abstractmethod\r\n def update_memory(self, new_memory):\r\n pass\r" }, { "identifier": "LocalStateManager", "path": "memgpt/persistence_manager.py", "snippet": "class LocalStateManager(PersistenceManager):\r\n \"\"\"In-memory state manager has nothing to manage, all agents are held in-memory\"\"\"\r\n\r\n recall_memory_cls = BaseRecallMemory\r\n archival_memory_cls = EmbeddingArchivalMemory\r\n\r\n def __init__(self, agent_state: AgentState):\r\n # Memory held in-state useful for debugging stateful versions\r\n self.memory = None\r\n self.messages = [] # current in-context messages\r\n # self.all_messages = [] # all messages seen in current session (needed if lazily synchronizing state with DB)\r\n self.archival_memory = EmbeddingArchivalMemory(agent_state)\r\n self.recall_memory = BaseRecallMemory(agent_state)\r\n self.agent_state = agent_state\r\n\r\n def save(self):\r\n \"\"\"Ensure storage connectors save data\"\"\"\r\n self.archival_memory.save()\r\n self.recall_memory.save()\r\n\r\n def init(self, agent):\r\n \"\"\"Connect persistent state manager to agent\"\"\"\r\n printd(f\"Initializing {self.__class__.__name__} with agent object\")\r\n # self.all_messages = [{\"timestamp\": get_local_time(), \"message\": msg} for msg in agent.messages.copy()]\r\n self.messages = [{\"timestamp\": get_local_time(), \"message\": msg} for msg in agent.messages.copy()]\r\n self.memory = agent.memory\r\n # printd(f\"{self.__class__.__name__}.all_messages.len = {len(self.all_messages)}\")\r\n printd(f\"{self.__class__.__name__}.messages.len = {len(self.messages)}\")\r\n\r\n '''\r\n def json_to_message(self, message_json) -> Message:\r\n \"\"\"Convert agent message JSON into Message object\"\"\"\r\n\r\n # get message\r\n if \"message\" in message_json:\r\n message = message_json[\"message\"]\r\n else:\r\n message = message_json\r\n\r\n # get timestamp\r\n if \"timestamp\" in message_json:\r\n timestamp = parse_formatted_time(message_json[\"timestamp\"])\r\n else:\r\n timestamp = get_local_time()\r\n\r\n # TODO: change this when we fully migrate to tool calls API\r\n if \"function_call\" in message:\r\n tool_calls = [\r\n ToolCall(\r\n id=message[\"tool_call_id\"],\r\n tool_call_type=\"function\",\r\n function={\r\n \"name\": message[\"function_call\"][\"name\"],\r\n \"arguments\": message[\"function_call\"][\"arguments\"],\r\n },\r\n )\r\n ]\r\n printd(f\"Saving tool calls {[vars(tc) for tc in tool_calls]}\")\r\n else:\r\n tool_calls = None\r\n\r\n # if message[\"role\"] == \"function\":\r\n # message[\"role\"] = \"tool\"\r\n\r\n return Message(\r\n user_id=self.agent_state.user_id,\r\n agent_id=self.agent_state.id,\r\n role=message[\"role\"],\r\n text=message[\"content\"],\r\n name=message[\"name\"] if \"name\" in message else None,\r\n model=self.agent_state.llm_config.model,\r\n created_at=timestamp,\r\n tool_calls=tool_calls,\r\n tool_call_id=message[\"tool_call_id\"] if \"tool_call_id\" in message else None,\r\n id=message[\"id\"] if \"id\" in message else None,\r\n )\r\n '''\r\n\r\n def trim_messages(self, num):\r\n # printd(f\"InMemoryStateManager.trim_messages\")\r\n self.messages = [self.messages[0]] + self.messages[num:]\r\n\r\n def prepend_to_messages(self, added_messages: List[Message]):\r\n # first tag with timestamps\r\n # added_messages = [{\"timestamp\": get_local_time(), \"message\": msg} for msg in added_messages]\r\n\r\n printd(f\"{self.__class__.__name__}.prepend_to_message\")\r\n self.messages = [self.messages[0]] + added_messages + self.messages[1:]\r\n\r\n # add to recall memory\r\n self.recall_memory.insert_many([m for m in added_messages])\r\n\r\n def append_to_messages(self, added_messages: List[Message]):\r\n # first tag with timestamps\r\n # added_messages = [{\"timestamp\": get_local_time(), \"message\": msg} for msg in added_messages]\r\n\r\n printd(f\"{self.__class__.__name__}.append_to_messages\")\r\n self.messages = self.messages + added_messages\r\n\r\n # add to recall memory\r\n self.recall_memory.insert_many([m for m in added_messages])\r\n\r\n def swap_system_message(self, new_system_message: Message):\r\n # first tag with timestamps\r\n # new_system_message = {\"timestamp\": get_local_time(), \"message\": new_system_message}\r\n\r\n printd(f\"{self.__class__.__name__}.swap_system_message\")\r\n self.messages[0] = new_system_message\r\n\r\n # add to recall memory\r\n self.recall_memory.insert(new_system_message)\r\n\r\n def update_memory(self, new_memory):\r\n printd(f\"{self.__class__.__name__}.update_memory\")\r\n self.memory = new_memory\r" }, { "identifier": "MemGPTConfig", "path": "memgpt/config.py", "snippet": "class MemGPTConfig:\n config_path: str = os.path.join(MEMGPT_DIR, \"config\")\n anon_clientid: str = None\n\n # preset\n preset: str = DEFAULT_PRESET\n\n # persona parameters\n persona: str = DEFAULT_PERSONA\n human: str = DEFAULT_HUMAN\n agent: str = None\n\n # model parameters\n default_llm_config: LLMConfig = field(default_factory=LLMConfig)\n\n # embedding parameters\n default_embedding_config: EmbeddingConfig = field(default_factory=EmbeddingConfig)\n\n # database configs: archival\n archival_storage_type: str = \"chroma\" # local, db\n archival_storage_path: str = os.path.join(MEMGPT_DIR, \"chroma\")\n archival_storage_uri: str = None # TODO: eventually allow external vector DB\n\n # database configs: recall\n recall_storage_type: str = \"sqlite\" # local, db\n recall_storage_path: str = MEMGPT_DIR\n recall_storage_uri: str = None # TODO: eventually allow external vector DB\n\n # database configs: metadata storage (sources, agents, data sources)\n metadata_storage_type: str = \"sqlite\"\n metadata_storage_path: str = MEMGPT_DIR\n metadata_storage_uri: str = None\n\n # database configs: agent state\n persistence_manager_type: str = None # in-memory, db\n persistence_manager_save_file: str = None # local file\n persistence_manager_uri: str = None # db URI\n\n # version (for backcompat)\n memgpt_version: str = None\n\n # user info\n policies_accepted: bool = False\n\n def __post_init__(self):\n # ensure types\n # self.embedding_chunk_size = int(self.embedding_chunk_size)\n # self.embedding_dim = int(self.embedding_dim)\n # self.context_window = int(self.context_window)\n pass\n\n @staticmethod\n def generate_uuid() -> str:\n return uuid.UUID(int=uuid.getnode()).hex\n\n @classmethod\n def load(cls) -> \"MemGPTConfig\":\n # avoid circular import\n from memgpt.migrate import config_is_compatible, VERSION_CUTOFF\n\n if not config_is_compatible(allow_empty=True):\n error_message = \" \".join(\n [\n f\"\\nYour current config file is incompatible with MemGPT versions later than {VERSION_CUTOFF}.\",\n f\"\\nTo use MemGPT, you must either downgrade your MemGPT version (<= {VERSION_CUTOFF}) or regenerate your config using `memgpt configure`, or `memgpt migrate` if you would like to migrate old agents.\",\n ]\n )\n raise ValueError(error_message)\n\n config = configparser.ConfigParser()\n\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CONFIG_PATH\"):\n config_path = os.getenv(\"MEMGPT_CONFIG_PATH\")\n else:\n config_path = MemGPTConfig.config_path\n\n # insure all configuration directories exist\n cls.create_config_dir()\n if os.path.exists(config_path):\n # read existing config\n config.read(config_path)\n\n # Handle extraction of nested LLMConfig and EmbeddingConfig\n llm_config_dict = {\n # Extract relevant LLM configuration from the config file\n \"model\": get_field(config, \"model\", \"model\"),\n \"model_endpoint\": get_field(config, \"model\", \"model_endpoint\"),\n \"model_endpoint_type\": get_field(config, \"model\", \"model_endpoint_type\"),\n \"model_wrapper\": get_field(config, \"model\", \"model_wrapper\"),\n \"context_window\": get_field(config, \"model\", \"context_window\"),\n }\n embedding_config_dict = {\n # Extract relevant Embedding configuration from the config file\n \"embedding_endpoint\": get_field(config, \"embedding\", \"embedding_endpoint\"),\n \"embedding_model\": get_field(config, \"embedding\", \"embedding_model\"),\n \"embedding_endpoint_type\": get_field(config, \"embedding\", \"embedding_endpoint_type\"),\n \"embedding_dim\": get_field(config, \"embedding\", \"embedding_dim\"),\n \"embedding_chunk_size\": get_field(config, \"embedding\", \"chunk_size\"),\n }\n # Correct the types that aren't strings\n if llm_config_dict[\"context_window\"] is not None:\n llm_config_dict[\"context_window\"] = int(llm_config_dict[\"context_window\"])\n if embedding_config_dict[\"embedding_dim\"] is not None:\n embedding_config_dict[\"embedding_dim\"] = int(embedding_config_dict[\"embedding_dim\"])\n if embedding_config_dict[\"embedding_chunk_size\"] is not None:\n embedding_config_dict[\"embedding_chunk_size\"] = int(embedding_config_dict[\"embedding_chunk_size\"])\n # Construct the inner properties\n llm_config = LLMConfig(**llm_config_dict)\n embedding_config = EmbeddingConfig(**embedding_config_dict)\n\n # Everything else\n config_dict = {\n # Two prepared configs\n \"default_llm_config\": llm_config,\n \"default_embedding_config\": embedding_config,\n # Agent related\n \"preset\": get_field(config, \"defaults\", \"preset\"),\n \"persona\": get_field(config, \"defaults\", \"persona\"),\n \"human\": get_field(config, \"defaults\", \"human\"),\n \"agent\": get_field(config, \"defaults\", \"agent\"),\n # Storage related\n \"archival_storage_type\": get_field(config, \"archival_storage\", \"type\"),\n \"archival_storage_path\": get_field(config, \"archival_storage\", \"path\"),\n \"archival_storage_uri\": get_field(config, \"archival_storage\", \"uri\"),\n \"recall_storage_type\": get_field(config, \"recall_storage\", \"type\"),\n \"recall_storage_path\": get_field(config, \"recall_storage\", \"path\"),\n \"recall_storage_uri\": get_field(config, \"recall_storage\", \"uri\"),\n \"metadata_storage_type\": get_field(config, \"metadata_storage\", \"type\"),\n \"metadata_storage_path\": get_field(config, \"metadata_storage\", \"path\"),\n \"metadata_storage_uri\": get_field(config, \"metadata_storage\", \"uri\"),\n # Misc\n \"anon_clientid\": get_field(config, \"client\", \"anon_clientid\"),\n \"config_path\": config_path,\n \"memgpt_version\": get_field(config, \"version\", \"memgpt_version\"),\n }\n\n # Don't include null values\n config_dict = {k: v for k, v in config_dict.items() if v is not None}\n\n return cls(**config_dict)\n\n # create new config\n anon_clientid = MemGPTConfig.generate_uuid()\n config = cls(anon_clientid=anon_clientid, config_path=config_path)\n config.create_config_dir() # create dirs\n config.save() # save updated config\n\n return config\n\n def save(self):\n import memgpt\n\n config = configparser.ConfigParser()\n\n # CLI defaults\n set_field(config, \"defaults\", \"preset\", self.preset)\n set_field(config, \"defaults\", \"persona\", self.persona)\n set_field(config, \"defaults\", \"human\", self.human)\n set_field(config, \"defaults\", \"agent\", self.agent)\n\n # model defaults\n set_field(config, \"model\", \"model\", self.default_llm_config.model)\n set_field(config, \"model\", \"model_endpoint\", self.default_llm_config.model_endpoint)\n set_field(config, \"model\", \"model_endpoint_type\", self.default_llm_config.model_endpoint_type)\n set_field(config, \"model\", \"model_wrapper\", self.default_llm_config.model_wrapper)\n set_field(config, \"model\", \"context_window\", str(self.default_llm_config.context_window))\n\n # embeddings\n set_field(config, \"embedding\", \"embedding_endpoint_type\", self.default_embedding_config.embedding_endpoint_type)\n set_field(config, \"embedding\", \"embedding_endpoint\", self.default_embedding_config.embedding_endpoint)\n set_field(config, \"embedding\", \"embedding_model\", self.default_embedding_config.embedding_model)\n set_field(config, \"embedding\", \"embedding_dim\", str(self.default_embedding_config.embedding_dim))\n set_field(config, \"embedding\", \"embedding_chunk_size\", str(self.default_embedding_config.embedding_chunk_size))\n\n # archival storage\n set_field(config, \"archival_storage\", \"type\", self.archival_storage_type)\n set_field(config, \"archival_storage\", \"path\", self.archival_storage_path)\n set_field(config, \"archival_storage\", \"uri\", self.archival_storage_uri)\n\n # recall storage\n set_field(config, \"recall_storage\", \"type\", self.recall_storage_type)\n set_field(config, \"recall_storage\", \"path\", self.recall_storage_path)\n set_field(config, \"recall_storage\", \"uri\", self.recall_storage_uri)\n\n # metadata storage\n set_field(config, \"metadata_storage\", \"type\", self.metadata_storage_type)\n set_field(config, \"metadata_storage\", \"path\", self.metadata_storage_path)\n set_field(config, \"metadata_storage\", \"uri\", self.metadata_storage_uri)\n\n # set version\n set_field(config, \"version\", \"memgpt_version\", memgpt.__version__)\n\n # client\n if not self.anon_clientid:\n self.anon_clientid = self.generate_uuid()\n set_field(config, \"client\", \"anon_clientid\", self.anon_clientid)\n\n # always make sure all directories are present\n self.create_config_dir()\n\n with open(self.config_path, \"w\") as f:\n config.write(f)\n logger.debug(f\"Saved Config: {self.config_path}\")\n\n @staticmethod\n def exists():\n # allow overriding with env variables\n if os.getenv(\"MEMGPT_CONFIG_PATH\"):\n config_path = os.getenv(\"MEMGPT_CONFIG_PATH\")\n else:\n config_path = MemGPTConfig.config_path\n\n assert not os.path.isdir(config_path), f\"Config path {config_path} cannot be set to a directory.\"\n return os.path.exists(config_path)\n\n @staticmethod\n def create_config_dir():\n if not os.path.exists(MEMGPT_DIR):\n os.makedirs(MEMGPT_DIR, exist_ok=True)\n\n folders = [\"personas\", \"humans\", \"archival\", \"agents\", \"functions\", \"system_prompts\", \"presets\", \"settings\"]\n\n for folder in folders:\n if not os.path.exists(os.path.join(MEMGPT_DIR, folder)):\n os.makedirs(os.path.join(MEMGPT_DIR, folder))" }, { "identifier": "get_login_event", "path": "memgpt/system.py", "snippet": "def get_login_event(last_login=\"Never (first login)\", include_location=False, location_name=\"San Francisco, CA, USA\"):\r\n # Package the message with time and location\r\n formatted_time = get_local_time()\r\n packaged_message = {\r\n \"type\": \"login\",\r\n \"last_login\": last_login,\r\n \"time\": formatted_time,\r\n }\r\n\r\n if include_location:\r\n packaged_message[\"location\"] = location_name\r\n\r\n return json.dumps(packaged_message, ensure_ascii=JSON_ENSURE_ASCII)\r" }, { "identifier": "package_function_response", "path": "memgpt/system.py", "snippet": "def package_function_response(was_success, response_string, timestamp=None):\r\n formatted_time = get_local_time() if timestamp is None else timestamp\r\n packaged_message = {\r\n \"status\": \"OK\" if was_success else \"Failed\",\r\n \"message\": response_string,\r\n \"time\": formatted_time,\r\n }\r\n\r\n return json.dumps(packaged_message, ensure_ascii=JSON_ENSURE_ASCII)\r" }, { "identifier": "package_summarize_message", "path": "memgpt/system.py", "snippet": "def package_summarize_message(summary, summary_length, hidden_message_count, total_message_count, timestamp=None):\r\n context_message = (\r\n f\"Note: prior messages ({hidden_message_count} of {total_message_count} total messages) have been hidden from view due to conversation memory constraints.\\n\"\r\n + f\"The following is a summary of the previous {summary_length} messages:\\n {summary}\"\r\n )\r\n\r\n formatted_time = get_local_time() if timestamp is None else timestamp\r\n packaged_message = {\r\n \"type\": \"system_alert\",\r\n \"message\": context_message,\r\n \"time\": formatted_time,\r\n }\r\n\r\n return json.dumps(packaged_message, ensure_ascii=JSON_ENSURE_ASCII)\r" }, { "identifier": "get_initial_boot_messages", "path": "memgpt/system.py", "snippet": "def get_initial_boot_messages(version=\"startup\"):\r\n if version == \"startup\":\r\n initial_boot_message = INITIAL_BOOT_MESSAGE\r\n messages = [\r\n {\"role\": \"assistant\", \"content\": initial_boot_message},\r\n ]\r\n\r\n elif version == \"startup_with_send_message\":\r\n tool_call_id = str(uuid.uuid4())\r\n messages = [\r\n # first message includes both inner monologue and function call to send_message\r\n {\r\n \"role\": \"assistant\",\r\n \"content\": INITIAL_BOOT_MESSAGE_SEND_MESSAGE_THOUGHT,\r\n # \"function_call\": {\r\n # \"name\": \"send_message\",\r\n # \"arguments\": '{\\n \"message\": \"' + f\"{INITIAL_BOOT_MESSAGE_SEND_MESSAGE_FIRST_MSG}\" + '\"\\n}',\r\n # },\r\n \"tool_calls\": [\r\n {\r\n \"id\": tool_call_id,\r\n \"type\": \"function\",\r\n \"function\": {\r\n \"name\": \"send_message\",\r\n \"arguments\": '{\\n \"message\": \"' + f\"{INITIAL_BOOT_MESSAGE_SEND_MESSAGE_FIRST_MSG}\" + '\"\\n}',\r\n },\r\n }\r\n ],\r\n },\r\n # obligatory function return message\r\n {\r\n # \"role\": \"function\",\r\n \"role\": \"tool\",\r\n \"name\": \"send_message\", # NOTE: technically not up to spec, this is old functions style\r\n \"content\": package_function_response(True, None),\r\n \"tool_call_id\": tool_call_id,\r\n },\r\n ]\r\n\r\n elif version == \"startup_with_send_message_gpt35\":\r\n tool_call_id = str(uuid.uuid4())\r\n messages = [\r\n # first message includes both inner monologue and function call to send_message\r\n {\r\n \"role\": \"assistant\",\r\n \"content\": \"*inner thoughts* Still waiting on the user. Sending a message with function.\",\r\n # \"function_call\": {\"name\": \"send_message\", \"arguments\": '{\\n \"message\": \"' + f\"Hi, is anyone there?\" + '\"\\n}'},\r\n \"tool_calls\": [\r\n {\r\n \"id\": tool_call_id,\r\n \"type\": \"function\",\r\n \"function\": {\r\n \"name\": \"send_message\",\r\n \"arguments\": '{\\n \"message\": \"' + f\"Hi, is anyone there?\" + '\"\\n}',\r\n },\r\n }\r\n ],\r\n },\r\n # obligatory function return message\r\n {\r\n # \"role\": \"function\",\r\n \"role\": \"tool\",\r\n \"name\": \"send_message\",\r\n \"content\": package_function_response(True, None),\r\n \"tool_call_id\": tool_call_id,\r\n },\r\n ]\r\n\r\n else:\r\n raise ValueError(version)\r\n\r\n return messages\r" }, { "identifier": "CoreMemory", "path": "memgpt/memory.py", "snippet": "class CoreMemory(object):\r\n \"\"\"Held in-context inside the system message\r\n\r\n Core Memory: Refers to the system block, which provides essential, foundational context to the AI.\r\n This includes the persona information, essential user details,\r\n and any other baseline data you deem necessary for the AI's basic functioning.\r\n \"\"\"\r\n\r\n def __init__(self, persona=None, human=None, persona_char_limit=None, human_char_limit=None, archival_memory_exists=True):\r\n self.persona = persona\r\n self.human = human\r\n self.persona_char_limit = persona_char_limit\r\n self.human_char_limit = human_char_limit\r\n\r\n # affects the error message the AI will see on overflow inserts\r\n self.archival_memory_exists = archival_memory_exists\r\n\r\n def __repr__(self) -> str:\r\n return f\"\\n### CORE MEMORY ###\" + f\"\\n=== Persona ===\\n{self.persona}\" + f\"\\n\\n=== Human ===\\n{self.human}\"\r\n\r\n def to_dict(self):\r\n return {\r\n \"persona\": self.persona,\r\n \"human\": self.human,\r\n }\r\n\r\n @classmethod\r\n def load(cls, state):\r\n return cls(state[\"persona\"], state[\"human\"])\r\n\r\n def edit_persona(self, new_persona):\r\n if self.persona_char_limit and len(new_persona) > self.persona_char_limit:\r\n error_msg = f\"Edit failed: Exceeds {self.persona_char_limit} character limit (requested {len(new_persona)}).\"\r\n if self.archival_memory_exists:\r\n error_msg = f\"{error_msg} Consider summarizing existing core memories in 'persona' and/or moving lower priority content to archival memory to free up space in core memory, then trying again.\"\r\n raise ValueError(error_msg)\r\n\r\n self.persona = new_persona\r\n return len(self.persona)\r\n\r\n def edit_human(self, new_human):\r\n if self.human_char_limit and len(new_human) > self.human_char_limit:\r\n error_msg = f\"Edit failed: Exceeds {self.human_char_limit} character limit (requested {len(new_human)}).\"\r\n if self.archival_memory_exists:\r\n error_msg = f\"{error_msg} Consider summarizing existing core memories in 'human' and/or moving lower priority content to archival memory to free up space in core memory, then trying again.\"\r\n raise ValueError(error_msg)\r\n\r\n self.human = new_human\r\n return len(self.human)\r\n\r\n def edit(self, field, content):\r\n if field == \"persona\":\r\n return self.edit_persona(content)\r\n elif field == \"human\":\r\n return self.edit_human(content)\r\n else:\r\n raise KeyError(f'No memory section named {field} (must be either \"persona\" or \"human\")')\r\n\r\n def edit_append(self, field, content, sep=\"\\n\"):\r\n if field == \"persona\":\r\n new_content = self.persona + sep + content\r\n return self.edit_persona(new_content)\r\n elif field == \"human\":\r\n new_content = self.human + sep + content\r\n return self.edit_human(new_content)\r\n else:\r\n raise KeyError(f'No memory section named {field} (must be either \"persona\" or \"human\")')\r\n\r\n def edit_replace(self, field, old_content, new_content):\r\n if len(old_content) == 0:\r\n raise ValueError(\"old_content cannot be an empty string (must specify old_content to replace)\")\r\n\r\n if field == \"persona\":\r\n if old_content in self.persona:\r\n new_persona = self.persona.replace(old_content, new_content)\r\n return self.edit_persona(new_persona)\r\n else:\r\n raise ValueError(\"Content not found in persona (make sure to use exact string)\")\r\n elif field == \"human\":\r\n if old_content in self.human:\r\n new_human = self.human.replace(old_content, new_content)\r\n return self.edit_human(new_human)\r\n else:\r\n raise ValueError(\"Content not found in human (make sure to use exact string)\")\r\n else:\r\n raise KeyError(f'No memory section named {field} (must be either \"persona\" or \"human\")')\r" }, { "identifier": "summarize_messages", "path": "memgpt/memory.py", "snippet": "def summarize_messages(\r\n agent_state: AgentState,\r\n message_sequence_to_summarize,\r\n):\r\n \"\"\"Summarize a message sequence using GPT\"\"\"\r\n # we need the context_window\r\n context_window = agent_state.llm_config.context_window\r\n\r\n summary_prompt = SUMMARY_PROMPT_SYSTEM\r\n summary_input = str(message_sequence_to_summarize)\r\n summary_input_tkns = count_tokens(summary_input)\r\n if summary_input_tkns > MESSAGE_SUMMARY_WARNING_FRAC * context_window:\r\n trunc_ratio = (MESSAGE_SUMMARY_WARNING_FRAC * context_window / summary_input_tkns) * 0.8 # For good measure...\r\n cutoff = int(len(message_sequence_to_summarize) * trunc_ratio)\r\n summary_input = str(\r\n [summarize_messages(agent_state, message_sequence_to_summarize=message_sequence_to_summarize[:cutoff])]\r\n + message_sequence_to_summarize[cutoff:]\r\n )\r\n message_sequence = [\r\n {\"role\": \"system\", \"content\": summary_prompt},\r\n {\"role\": \"user\", \"content\": summary_input},\r\n ]\r\n\r\n response = create(\r\n agent_state=agent_state,\r\n messages=message_sequence,\r\n )\r\n\r\n printd(f\"summarize_messages gpt reply: {response.choices[0]}\")\r\n reply = response.choices[0].message.content\r\n return reply\r" }, { "identifier": "create", "path": "memgpt/llm_api_tools.py", "snippet": "@retry_with_exponential_backoff\r\ndef create(\r\n agent_state: AgentState,\r\n messages,\r\n functions=None,\r\n functions_python=None,\r\n function_call=\"auto\",\r\n # hint\r\n first_message=False,\r\n # use tool naming?\r\n # if false, will use deprecated 'functions' style\r\n use_tool_naming=True,\r\n) -> ChatCompletionResponse:\r\n \"\"\"Return response to chat completion with backoff\"\"\"\r\n from memgpt.utils import printd\r\n\r\n printd(f\"Using model {agent_state.llm_config.model_endpoint_type}, endpoint: {agent_state.llm_config.model_endpoint}\")\r\n\r\n # TODO eventually refactor so that credentials are passed through\r\n credentials = MemGPTCredentials.load()\r\n\r\n # openai\r\n if agent_state.llm_config.model_endpoint_type == \"openai\":\r\n # TODO do the same for Azure?\r\n if credentials.openai_key is None:\r\n raise ValueError(f\"OpenAI key is missing from MemGPT config file\")\r\n if use_tool_naming:\r\n data = dict(\r\n model=agent_state.llm_config.model,\r\n messages=messages,\r\n tools=[{\"type\": \"function\", \"function\": f} for f in functions],\r\n tool_choice=function_call,\r\n user=str(agent_state.user_id),\r\n )\r\n else:\r\n data = dict(\r\n model=agent_state.llm_config.model,\r\n messages=messages,\r\n functions=functions,\r\n function_call=function_call,\r\n user=str(agent_state.user_id),\r\n )\r\n return openai_chat_completions_request(\r\n url=agent_state.llm_config.model_endpoint, # https://api.openai.com/v1 -> https://api.openai.com/v1/chat/completions\r\n api_key=credentials.openai_key,\r\n data=data,\r\n )\r\n\r\n # azure\r\n elif agent_state.llm_config.model_endpoint_type == \"azure\":\r\n azure_deployment = (\r\n credentials.azure_deployment\r\n if credentials.azure_deployment is not None\r\n else MODEL_TO_AZURE_ENGINE[agent_state.llm_config.model]\r\n )\r\n if use_tool_naming:\r\n data = dict(\r\n # NOTE: don't pass model to Azure calls, that is the deployment_id\r\n # model=agent_config.model,\r\n messages=messages,\r\n tools=[{\"type\": \"function\", \"function\": f} for f in functions],\r\n tool_choice=function_call,\r\n user=str(agent_state.user_id),\r\n )\r\n else:\r\n data = dict(\r\n # NOTE: don't pass model to Azure calls, that is the deployment_id\r\n # model=agent_config.model,\r\n messages=messages,\r\n functions=functions,\r\n function_call=function_call,\r\n user=str(agent_state.user_id),\r\n )\r\n return azure_openai_chat_completions_request(\r\n resource_name=credentials.azure_endpoint,\r\n deployment_id=azure_deployment,\r\n api_version=credentials.azure_version,\r\n api_key=credentials.azure_key,\r\n data=data,\r\n )\r\n\r\n # local model\r\n else:\r\n return get_chat_completion(\r\n model=agent_state.llm_config.model,\r\n messages=messages,\r\n functions=functions,\r\n functions_python=functions_python,\r\n function_call=function_call,\r\n context_window=agent_state.llm_config.context_window,\r\n endpoint=agent_state.llm_config.model_endpoint,\r\n endpoint_type=agent_state.llm_config.model_endpoint_type,\r\n wrapper=agent_state.llm_config.model_wrapper,\r\n user=str(agent_state.user_id),\r\n # hint\r\n first_message=first_message,\r\n # auth-related\r\n auth_type=credentials.openllm_auth_type,\r\n auth_key=credentials.openllm_key,\r\n )\r" }, { "identifier": "is_context_overflow_error", "path": "memgpt/llm_api_tools.py", "snippet": "def is_context_overflow_error(exception):\r\n from memgpt.utils import printd\r\n\r\n match_string = \"maximum context length\"\r\n\r\n # Backwards compatability with openai python package/client v0.28 (pre-v1 client migration)\r\n if match_string in str(exception):\r\n printd(f\"Found '{match_string}' in str(exception)={(str(exception))}\")\r\n return True\r\n\r\n # Based on python requests + OpenAI REST API (/v1)\r\n elif isinstance(exception, requests.exceptions.HTTPError):\r\n if exception.response is not None and \"application/json\" in exception.response.headers.get(\"Content-Type\", \"\"):\r\n try:\r\n error_details = exception.response.json()\r\n if \"error\" not in error_details:\r\n printd(f\"HTTPError occured, but couldn't find error field: {error_details}\")\r\n return False\r\n else:\r\n error_details = error_details[\"error\"]\r\n\r\n # Check for the specific error code\r\n if error_details.get(\"code\") == \"context_length_exceeded\":\r\n printd(f\"HTTPError occured, caught error code {error_details.get('code')}\")\r\n return True\r\n # Soft-check for \"maximum context length\" inside of the message\r\n elif error_details.get(\"message\") and \"maximum context length\" in error_details.get(\"message\"):\r\n printd(f\"HTTPError occured, found '{match_string}' in error message contents ({error_details})\")\r\n return True\r\n else:\r\n printd(f\"HTTPError occured, but unknown error message: {error_details}\")\r\n return False\r\n except ValueError:\r\n # JSON decoding failed\r\n printd(f\"HTTPError occurred ({exception}), but no JSON error message.\")\r\n\r\n # Generic fail\r\n else:\r\n return False\r" }, { "identifier": "get_tool_call_id", "path": "memgpt/utils.py", "snippet": "def get_tool_call_id() -> str:\r\n return str(uuid.uuid4())\r" }, { "identifier": "get_local_time", "path": "memgpt/utils.py", "snippet": "def get_local_time(timezone=None):\r\n if timezone is not None:\r\n time_str = get_local_time_timezone(timezone)\r\n else:\r\n # Get the current time, which will be in the local timezone of the computer\r\n local_time = datetime.now().astimezone()\r\n\r\n # You may format it as you desire, including AM/PM\r\n time_str = local_time.strftime(\"%Y-%m-%d %I:%M:%S %p %Z%z\")\r\n\r\n return time_str.strip()\r" }, { "identifier": "parse_json", "path": "memgpt/utils.py", "snippet": "def parse_json(string):\r\n \"\"\"Parse JSON string into JSON with both json and demjson\"\"\"\r\n result = None\r\n try:\r\n result = json.loads(string)\r\n return result\r\n except Exception as e:\r\n print(f\"Error parsing json with json package: {e}\")\r\n\r\n try:\r\n result = demjson.decode(string)\r\n return result\r\n except demjson.JSONDecodeError as e:\r\n print(f\"Error parsing json with demjson package: {e}\")\r\n raise e\r" }, { "identifier": "united_diff", "path": "memgpt/utils.py", "snippet": "def united_diff(str1, str2):\r\n lines1 = str1.splitlines(True)\r\n lines2 = str2.splitlines(True)\r\n diff = difflib.unified_diff(lines1, lines2)\r\n return \"\".join(diff)\r" }, { "identifier": "printd", "path": "memgpt/utils.py", "snippet": "def printd(*args, **kwargs):\r\n if DEBUG:\r\n print(*args, **kwargs)\r" }, { "identifier": "count_tokens", "path": "memgpt/utils.py", "snippet": "def count_tokens(s: str, model: str = \"gpt-4\") -> int:\r\n encoding = tiktoken.encoding_for_model(model)\r\n return len(encoding.encode(s))\r" }, { "identifier": "get_schema_diff", "path": "memgpt/utils.py", "snippet": "def get_schema_diff(schema_a, schema_b):\r\n # Assuming f_schema and linked_function['json_schema'] are your JSON schemas\r\n f_schema_json = json.dumps(schema_a, indent=2, ensure_ascii=JSON_ENSURE_ASCII)\r\n linked_function_json = json.dumps(schema_b, indent=2, ensure_ascii=JSON_ENSURE_ASCII)\r\n\r\n # Compute the difference using difflib\r\n difference = list(difflib.ndiff(f_schema_json.splitlines(keepends=True), linked_function_json.splitlines(keepends=True)))\r\n\r\n # Filter out lines that don't represent changes\r\n difference = [line for line in difference if line.startswith(\"+ \") or line.startswith(\"- \")]\r\n\r\n return \"\".join(difference)\r" }, { "identifier": "validate_function_response", "path": "memgpt/utils.py", "snippet": "def validate_function_response(function_response_string: any, strict: bool = False, truncate: bool = True) -> str:\r\n \"\"\"Check to make sure that a function used by MemGPT returned a valid response\r\n\r\n Responses need to be strings (or None) that fall under a certain text count limit.\r\n \"\"\"\r\n if not isinstance(function_response_string, str):\r\n # Soft correction for a few basic types\r\n\r\n if function_response_string is None:\r\n # function_response_string = \"Empty (no function output)\"\r\n function_response_string = \"None\" # backcompat\r\n\r\n elif isinstance(function_response_string, dict):\r\n if strict:\r\n # TODO add better error message\r\n raise ValueError(function_response_string)\r\n\r\n # Allow dict through since it will be cast to json.dumps()\r\n try:\r\n # TODO find a better way to do this that won't result in double escapes\r\n function_response_string = json.dumps(function_response_string, ensure_ascii=JSON_ENSURE_ASCII)\r\n except:\r\n raise ValueError(function_response_string)\r\n\r\n else:\r\n if strict:\r\n # TODO add better error message\r\n raise ValueError(function_response_string)\r\n\r\n # Try to convert to a string, but throw a warning to alert the user\r\n try:\r\n function_response_string = str(function_response_string)\r\n except:\r\n raise ValueError(function_response_string)\r\n\r\n # Now check the length and make sure it doesn't go over the limit\r\n # TODO we should change this to a max token limit that's variable based on tokens remaining (or context-window)\r\n if truncate and len(function_response_string) > FUNCTION_RETURN_CHAR_LIMIT:\r\n print(\r\n f\"{CLI_WARNING_PREFIX}function return was over limit ({len(function_response_string)} > {FUNCTION_RETURN_CHAR_LIMIT}) and was truncated\"\r\n )\r\n function_response_string = f\"{function_response_string[:FUNCTION_RETURN_CHAR_LIMIT]}... [NOTE: function output was truncated since it exceeded the character limit ({len(function_response_string)} > {FUNCTION_RETURN_CHAR_LIMIT})]\"\r\n\r\n return function_response_string\r" }, { "identifier": "verify_first_message_correctness", "path": "memgpt/utils.py", "snippet": "def verify_first_message_correctness(\r\n response: ChatCompletionResponse, require_send_message: bool = True, require_monologue: bool = False\r\n) -> bool:\r\n \"\"\"Can be used to enforce that the first message always uses send_message\"\"\"\r\n response_message = response.choices[0].message\r\n\r\n # First message should be a call to send_message with a non-empty content\r\n if require_send_message and not (response_message.function_call or response_message.tool_calls):\r\n printd(f\"First message didn't include function call: {response_message}\")\r\n return False\r\n\r\n assert not (response_message.function_call and response_message.tool_calls), response_message\r\n function_call = response_message.function_call if response_message.function_call else response_message.tool_calls[0].function\r\n function_name = function_call.name if function_call is not None else \"\"\r\n if require_send_message and function_name != \"send_message\" and function_name != \"archival_memory_search\":\r\n printd(f\"First message function call wasn't send_message or archival_memory_search: {response_message}\")\r\n return False\r\n\r\n if require_monologue and (not response_message.content or response_message.content is None or response_message.content == \"\"):\r\n printd(f\"First message missing internal monologue: {response_message}\")\r\n return False\r\n\r\n if response_message.content:\r\n ### Extras\r\n monologue = response_message.content\r\n\r\n def contains_special_characters(s):\r\n special_characters = '(){}[]\"'\r\n return any(char in s for char in special_characters)\r\n\r\n if contains_special_characters(monologue):\r\n printd(f\"First message internal monologue contained special characters: {response_message}\")\r\n return False\r\n # if 'functions' in monologue or 'send_message' in monologue or 'inner thought' in monologue.lower():\r\n if \"functions\" in monologue or \"send_message\" in monologue:\r\n # Sometimes the syntax won't be correct and internal syntax will leak into message.context\r\n printd(f\"First message internal monologue contained reserved words: {response_message}\")\r\n return False\r\n\r\n return True\r" }, { "identifier": "FIRST_MESSAGE_ATTEMPTS", "path": "memgpt/constants.py", "snippet": "FIRST_MESSAGE_ATTEMPTS = 10\r" }, { "identifier": "MESSAGE_SUMMARY_WARNING_FRAC", "path": "memgpt/constants.py", "snippet": "MESSAGE_SUMMARY_WARNING_FRAC = 0.75\r" }, { "identifier": "MESSAGE_SUMMARY_TRUNC_TOKEN_FRAC", "path": "memgpt/constants.py", "snippet": "MESSAGE_SUMMARY_TRUNC_TOKEN_FRAC = 0.75\r" }, { "identifier": "MESSAGE_SUMMARY_TRUNC_KEEP_N_LAST", "path": "memgpt/constants.py", "snippet": "MESSAGE_SUMMARY_TRUNC_KEEP_N_LAST = 3\r" }, { "identifier": "CORE_MEMORY_HUMAN_CHAR_LIMIT", "path": "memgpt/constants.py", "snippet": "CORE_MEMORY_HUMAN_CHAR_LIMIT = 2000\r" }, { "identifier": "CORE_MEMORY_PERSONA_CHAR_LIMIT", "path": "memgpt/constants.py", "snippet": "CORE_MEMORY_PERSONA_CHAR_LIMIT = 2000\r" }, { "identifier": "LLM_MAX_TOKENS", "path": "memgpt/constants.py", "snippet": "LLM_MAX_TOKENS = {\r\n \"DEFAULT\": 8192,\r\n ## OpenAI models: https://platform.openai.com/docs/models/overview\r\n # gpt-4\r\n \"gpt-4-1106-preview\": 128000,\r\n \"gpt-4\": 8192,\r\n \"gpt-4-32k\": 32768,\r\n \"gpt-4-0613\": 8192,\r\n \"gpt-4-32k-0613\": 32768,\r\n \"gpt-4-0314\": 8192, # legacy\r\n \"gpt-4-32k-0314\": 32768, # legacy\r\n # gpt-3.5\r\n \"gpt-3.5-turbo-1106\": 16385,\r\n \"gpt-3.5-turbo\": 4096,\r\n \"gpt-3.5-turbo-16k\": 16385,\r\n \"gpt-3.5-turbo-0613\": 4096, # legacy\r\n \"gpt-3.5-turbo-16k-0613\": 16385, # legacy\r\n \"gpt-3.5-turbo-0301\": 4096, # legacy\r\n}\r" }, { "identifier": "CLI_WARNING_PREFIX", "path": "memgpt/constants.py", "snippet": "CLI_WARNING_PREFIX = \"Warning: \"\r" }, { "identifier": "JSON_ENSURE_ASCII", "path": "memgpt/constants.py", "snippet": "JSON_ENSURE_ASCII = False\r" }, { "identifier": "LLMError", "path": "memgpt/errors.py", "snippet": "class LLMError(Exception):\n \"\"\"Base class for all LLM-related errors.\"\"\"\n\n pass" }, { "identifier": "USER_FUNCTIONS_DIR", "path": "memgpt/functions/functions.py", "snippet": "USER_FUNCTIONS_DIR = os.path.join(MEMGPT_DIR, \"functions\")" }, { "identifier": "load_all_function_sets", "path": "memgpt/functions/functions.py", "snippet": "def load_all_function_sets(merge=True):\n # functions/examples/*.py\n scripts_dir = os.path.dirname(os.path.abspath(__file__)) # Get the directory of the current script\n function_sets_dir = os.path.join(scripts_dir, \"function_sets\") # Path to the function_sets directory\n # List all .py files in the directory (excluding __init__.py)\n example_module_files = [f for f in os.listdir(function_sets_dir) if f.endswith(\".py\") and f != \"__init__.py\"]\n\n # ~/.memgpt/functions/*.py\n # create if missing\n if not os.path.exists(USER_FUNCTIONS_DIR):\n os.makedirs(USER_FUNCTIONS_DIR)\n user_module_files = [f for f in os.listdir(USER_FUNCTIONS_DIR) if f.endswith(\".py\") and f != \"__init__.py\"]\n\n # combine them both (pull from both examples and user-provided)\n # all_module_files = example_module_files + user_module_files\n\n # Add user_scripts_dir to sys.path\n if USER_FUNCTIONS_DIR not in sys.path:\n sys.path.append(USER_FUNCTIONS_DIR)\n\n schemas_and_functions = {}\n for dir_path, module_files in [(function_sets_dir, example_module_files), (USER_FUNCTIONS_DIR, user_module_files)]:\n for file in module_files:\n module_name = file[:-3] # Remove '.py' from filename\n if dir_path == USER_FUNCTIONS_DIR:\n # For user scripts, adjust the module name appropriately\n module_full_path = os.path.join(dir_path, file)\n try:\n spec = importlib.util.spec_from_file_location(module_name, module_full_path)\n module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(module)\n except ModuleNotFoundError as e:\n # Handle missing module imports\n missing_package = str(e).split(\"'\")[1] # Extract the name of the missing package\n print(f\"{CLI_WARNING_PREFIX}skipped loading python file '{module_full_path}'!\")\n print(\n f\"'{file}' imports '{missing_package}', but '{missing_package}' is not installed locally - install python package '{missing_package}' to link functions from '{file}' to MemGPT.\"\n )\n continue\n except SyntaxError as e:\n # Handle syntax errors in the module\n print(f\"{CLI_WARNING_PREFIX}skipped loading python file '{file}' due to a syntax error: {e}\")\n continue\n except Exception as e:\n # Handle other general exceptions\n print(f\"{CLI_WARNING_PREFIX}skipped loading python file '{file}': {e}\")\n continue\n else:\n # For built-in scripts, use the existing method\n full_module_name = f\"memgpt.functions.function_sets.{module_name}\"\n try:\n module = importlib.import_module(full_module_name)\n except Exception as e:\n # Handle other general exceptions\n print(f\"{CLI_WARNING_PREFIX}skipped loading python module '{full_module_name}': {e}\")\n continue\n\n try:\n # Load the function set\n function_set = load_function_set(module)\n schemas_and_functions[module_name] = function_set\n except ValueError as e:\n print(f\"Error loading function set '{module_name}': {e}\")\n\n if merge:\n # Put all functions from all sets into the same level dict\n merged_functions = {}\n for set_name, function_set in schemas_and_functions.items():\n for function_name, function_info in function_set.items():\n if function_name in merged_functions:\n raise ValueError(f\"Duplicate function name '{function_name}' found in function set '{set_name}'\")\n merged_functions[function_name] = function_info\n return merged_functions\n else:\n # Nested dict where the top level is organized by the function set name\n return schemas_and_functions" } ]
import datetime import uuid import glob import inspect import os import json import traceback from pathlib import Path from typing import List, Tuple from box import Box from memgpt.data_types import AgentState, Message from memgpt.models import chat_completion_response from memgpt.interface import AgentInterface from memgpt.persistence_manager import PersistenceManager, LocalStateManager from memgpt.config import MemGPTConfig from memgpt.system import get_login_event, package_function_response, package_summarize_message, get_initial_boot_messages from memgpt.memory import CoreMemory as InContextMemory, summarize_messages from memgpt.llm_api_tools import create, is_context_overflow_error from memgpt.utils import ( get_tool_call_id, get_local_time, parse_json, united_diff, printd, count_tokens, get_schema_diff, validate_function_response, verify_first_message_correctness, ) from memgpt.constants import ( FIRST_MESSAGE_ATTEMPTS, MESSAGE_SUMMARY_WARNING_FRAC, MESSAGE_SUMMARY_TRUNC_TOKEN_FRAC, MESSAGE_SUMMARY_TRUNC_KEEP_N_LAST, CORE_MEMORY_HUMAN_CHAR_LIMIT, CORE_MEMORY_PERSONA_CHAR_LIMIT, LLM_MAX_TOKENS, CLI_WARNING_PREFIX, JSON_ENSURE_ASCII, ) from .errors import LLMError from .functions.functions import USER_FUNCTIONS_DIR, load_all_function_sets
16,133
Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response else: # Standard non-function reply self.interface.internal_monologue(response_message.content) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=response_message.model_dump(), ) ) # extend conversation with assistant's reply heartbeat_request = None function_failed = None return messages, heartbeat_request, function_failed def step(self, user_message, first_message=False, first_message_retry_limit=FIRST_MESSAGE_ATTEMPTS, skip_verify=False): """Top-level event message handler for the MemGPT agent""" try: # Step 0: add user message if user_message is not None: self.interface.user_message(user_message) packed_user_message = {"role": "user", "content": user_message} # Special handling for AutoGen messages with 'name' field try: user_message_json = json.loads(user_message) # Treat 'name' as a special field # If it exists in the input message, elevate it to the 'message' level if "name" in user_message_json: packed_user_message["name"] = user_message_json["name"] user_message_json.pop("name", None) packed_user_message["content"] = json.dumps(user_message_json, ensure_ascii=JSON_ENSURE_ASCII) except Exception as e: print(f"{CLI_WARNING_PREFIX}handling of 'name' field failed with: {e}") input_message_sequence = self.messages + [packed_user_message] else: input_message_sequence = self.messages if len(input_message_sequence) > 1 and input_message_sequence[-1]["role"] != "user": printd(f"{CLI_WARNING_PREFIX}Attempting to run ChatCompletion without user as the last message in the queue") # Step 1: send the conversation and available functions to GPT if not skip_verify and (first_message or self.messages_total == self.messages_total_init): printd(f"This is the first message. Running extra verifier on AI response.") counter = 0 while True: response = self._get_ai_reply( message_sequence=input_message_sequence, first_message=True, # passed through to the prompt formatter ) if verify_first_message_correctness(response, require_monologue=self.first_message_verify_mono): break counter += 1 if counter > first_message_retry_limit: raise Exception(f"Hit first message retry limit ({first_message_retry_limit})") else: response = self._get_ai_reply( message_sequence=input_message_sequence, ) # Step 2: check if LLM wanted to call a function # (if yes) Step 3: call the function # (if yes) Step 4: send the info on the function call and function response to LLM response_message = response.choices[0].message response_message_copy = response_message.copy() all_response_messages, heartbeat_request, function_failed = self._handle_ai_response(response_message) # Add the extra metadata to the assistant response # (e.g. enough metadata to enable recreating the API call) # assert "api_response" not in all_response_messages[0] # all_response_messages[0]["api_response"] = response_message_copy # assert "api_args" not in all_response_messages[0] # all_response_messages[0]["api_args"] = { # "model": self.model, # "messages": input_message_sequence, # "functions": self.functions, # } # Step 4: extend the message history if user_message is not None: all_new_messages = [ Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=packed_user_message, ) ] + all_response_messages else: all_new_messages = all_response_messages # Check the memory pressure and potentially issue a memory pressure warning current_total_tokens = response.usage.total_tokens active_memory_warning = False # We can't do summarize logic properly if context_window is undefined if self.agent_state.llm_config.context_window is None: # Fallback if for some reason context_window is missing, just set to the default print(f"{CLI_WARNING_PREFIX}could not find context_window in config, setting to default {LLM_MAX_TOKENS['DEFAULT']}") print(f"{self.agent_state}") self.agent_state.llm_config.context_window = ( str(LLM_MAX_TOKENS[self.model]) if (self.model is not None and self.model in LLM_MAX_TOKENS) else str(LLM_MAX_TOKENS["DEFAULT"]) )
def link_functions(function_schemas): """Link function definitions to list of function schemas""" # need to dynamically link the functions # the saved agent.functions will just have the schemas, but we need to # go through the functions library and pull the respective python functions # Available functions is a mapping from: # function_name -> { # json_schema: schema # python_function: function # } # agent.functions is a list of schemas (OpenAI kwarg functions style, see: https://platform.openai.com/docs/api-reference/chat/create) # [{'name': ..., 'description': ...}, {...}] available_functions = load_all_function_sets() linked_function_set = {} for f_schema in function_schemas: # Attempt to find the function in the existing function library f_name = f_schema.get("name") if f_name is None: raise ValueError(f"While loading agent.state.functions encountered a bad function schema object with no name:\n{f_schema}") linked_function = available_functions.get(f_name) if linked_function is None: raise ValueError( f"Function '{f_name}' was specified in agent.state.functions, but is not in function library:\n{available_functions.keys()}" ) # Once we find a matching function, make sure the schema is identical if json.dumps(f_schema, ensure_ascii=JSON_ENSURE_ASCII) != json.dumps( linked_function["json_schema"], ensure_ascii=JSON_ENSURE_ASCII ): # error_message = ( # f"Found matching function '{f_name}' from agent.state.functions inside function library, but schemas are different." # + f"\n>>>agent.state.functions\n{json.dumps(f_schema, indent=2, ensure_ascii=JSON_ENSURE_ASCII)}" # + f"\n>>>function library\n{json.dumps(linked_function['json_schema'], indent=2, ensure_ascii=JSON_ENSURE_ASCII)}" # ) schema_diff = get_schema_diff(f_schema, linked_function["json_schema"]) error_message = ( f"Found matching function '{f_name}' from agent.state.functions inside function library, but schemas are different.\n" + "".join(schema_diff) ) # NOTE to handle old configs, instead of erroring here let's just warn # raise ValueError(error_message) printd(error_message) linked_function_set[f_name] = linked_function return linked_function_set def initialize_memory(ai_notes, human_notes): if ai_notes is None: raise ValueError(ai_notes) if human_notes is None: raise ValueError(human_notes) memory = InContextMemory(human_char_limit=CORE_MEMORY_HUMAN_CHAR_LIMIT, persona_char_limit=CORE_MEMORY_PERSONA_CHAR_LIMIT) memory.edit_persona(ai_notes) memory.edit_human(human_notes) return memory def construct_system_with_memory(system, memory, memory_edit_timestamp, archival_memory=None, recall_memory=None, include_char_count=True): full_system_message = "\n".join( [ system, "\n", f"### Memory [last modified: {memory_edit_timestamp.strip()}]", f"{len(recall_memory) if recall_memory else 0} previous messages between you and the user are stored in recall memory (use functions to access them)", f"{len(archival_memory) if archival_memory else 0} total memories you created are stored in archival memory (use functions to access them)", "\nCore memory shown below (limited in size, additional information stored in archival / recall memory):", f'<persona characters="{len(memory.persona)}/{memory.persona_char_limit}">' if include_char_count else "<persona>", memory.persona, "</persona>", f'<human characters="{len(memory.human)}/{memory.human_char_limit}">' if include_char_count else "<human>", memory.human, "</human>", ] ) return full_system_message def initialize_message_sequence( model, system, memory, archival_memory=None, recall_memory=None, memory_edit_timestamp=None, include_initial_boot_message=True, ): if memory_edit_timestamp is None: memory_edit_timestamp = get_local_time() full_system_message = construct_system_with_memory( system, memory, memory_edit_timestamp, archival_memory=archival_memory, recall_memory=recall_memory ) first_user_message = get_login_event() # event letting MemGPT know the user just logged in if include_initial_boot_message: if model is not None and "gpt-3.5" in model: initial_boot_messages = get_initial_boot_messages("startup_with_send_message_gpt35") else: initial_boot_messages = get_initial_boot_messages("startup_with_send_message") messages = ( [ {"role": "system", "content": full_system_message}, ] + initial_boot_messages + [ {"role": "user", "content": first_user_message}, ] ) else: messages = [ {"role": "system", "content": full_system_message}, {"role": "user", "content": first_user_message}, ] return messages class Agent(object): def __init__( self, agent_state: AgentState, interface: AgentInterface, # extras messages_total=None, # TODO remove? first_message_verify_mono=True, # TODO move to config? memgpt_config: MemGPTConfig = None, ): # Hold a copy of the state that was used to init the agent self.agent_state = agent_state # gpt-4, gpt-3.5-turbo, ... self.model = agent_state.llm_config.model # Store the system instructions (used to rebuild memory) if "system" not in agent_state.state: raise ValueError(f"'system' not found in provided AgentState") self.system = agent_state.state["system"] if "functions" not in agent_state.state: raise ValueError(f"'functions' not found in provided AgentState") # Store the functions schemas (this is passed as an argument to ChatCompletion) self.functions = agent_state.state["functions"] # these are the schema # Link the actual python functions corresponding to the schemas self.functions_python = {k: v["python_function"] for k, v in link_functions(function_schemas=self.functions).items()} assert all([callable(f) for k, f in self.functions_python.items()]), self.functions_python # Initialize the memory object if "persona" not in agent_state.state: raise ValueError(f"'persona' not found in provided AgentState") if "human" not in agent_state.state: raise ValueError(f"'human' not found in provided AgentState") self.memory = initialize_memory(ai_notes=agent_state.state["persona"], human_notes=agent_state.state["human"]) # Interface must implement: # - internal_monologue # - assistant_message # - function_message # ... # Different interfaces can handle events differently # e.g., print in CLI vs send a discord message with a discord bot self.interface = interface # Create the persistence manager object based on the AgentState info # TODO self.persistence_manager = LocalStateManager(agent_state=agent_state) # State needed for heartbeat pausing self.pause_heartbeats_start = None self.pause_heartbeats_minutes = 0 self.first_message_verify_mono = first_message_verify_mono # Controls if the convo memory pressure warning is triggered # When an alert is sent in the message queue, set this to True (to avoid repeat alerts) # When the summarizer is run, set this back to False (to reset) self.agent_alerted_about_memory_pressure = False # Read local config if not provided if not memgpt_config: self.memgpt_config = MemGPTConfig() else: self.memgpt_config = memgpt_config # Initialize connection to metedata store # self.ms = MetadataStore(self.memgpt_config) # Once the memory object is initialized, use it to "bake" the system message if "messages" in agent_state.state and agent_state.state["messages"] is not None: # print(f"Agent.__init__ :: loading, state={agent_state.state['messages']}") if not isinstance(agent_state.state["messages"], list): raise ValueError(f"'messages' in AgentState was bad type: {type(agent_state.state['messages'])}") assert all([isinstance(msg, str) for msg in agent_state.state["messages"]]) # Convert to IDs, and pull from the database self._messages = [ self.persistence_manager.recall_memory.storage.get(uuid.UUID(msg_id)) for msg_id in agent_state.state["messages"] ] assert all([isinstance(msg, Message) for msg in self._messages]), (self._messages, agent_state.state["messages"]) else: # print(f"Agent.__init__ :: creating, state={agent_state.state['messages']}") init_messages = initialize_message_sequence( self.model, self.system, self.memory, ) init_messages_objs = [] for msg in init_messages: init_messages_objs.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=msg ) ) self._messages = [] self.messages_total = 0 self._append_to_messages(added_messages=init_messages_objs) assert all([isinstance(msg, Message) for msg in self._messages]), (self._messages, init_messages) # Keep track of the total number of messages throughout all time self.messages_total = messages_total if messages_total is not None else (len(self._messages) - 1) # (-system) # self.messages_total_init = self.messages_total self.messages_total_init = len(self._messages) - 1 printd(f"Agent initialized, self.messages_total={self.messages_total}") # Create the agent in the DB # self.save() self.update_state() @property def messages(self) -> List[dict]: """Getter method that converts the internal Message list into OpenAI-style dicts""" return [msg.to_openai_dict() for msg in self._messages] @messages.setter def messages(self, value): raise Exception("Modifying message list directly not allowed") def _trim_messages(self, num): """Trim messages from the front, not including the system message""" self.persistence_manager.trim_messages(num) new_messages = [self.messages[0]] + self.messages[num:] self._messages = new_messages def _prepend_to_messages(self, added_messages: List[Message]): """Wrapper around self.messages.prepend to allow additional calls to a state/persistence manager""" assert all([isinstance(msg, Message) for msg in added_messages]) self.persistence_manager.prepend_to_messages(added_messages) new_messages = [self.messages[0]] + added_messages + self.messages[1:] # prepend (no system) self._messages = new_messages self.messages_total += len(added_messages) # still should increment the message counter (summaries are additions too) def _append_to_messages(self, added_messages: List[Message]): """Wrapper around self.messages.append to allow additional calls to a state/persistence manager""" assert all([isinstance(msg, Message) for msg in added_messages]) self.persistence_manager.append_to_messages(added_messages) # strip extra metadata if it exists # for msg in added_messages: # msg.pop("api_response", None) # msg.pop("api_args", None) new_messages = self._messages + added_messages # append self._messages = new_messages self.messages_total += len(added_messages) def _swap_system_message(self, new_system_message: Message): assert isinstance(new_system_message, Message) assert new_system_message.role == "system", new_system_message assert self._messages[0].role == "system", self._messages self.persistence_manager.swap_system_message(new_system_message) new_messages = [new_system_message] + self._messages[1:] # swap index 0 (system) self._messages = new_messages def _get_ai_reply( self, message_sequence: List[dict], function_call: str = "auto", first_message: bool = False, # hint ) -> chat_completion_response.ChatCompletionResponse: """Get response from LLM API""" try: response = create( agent_state=self.agent_state, messages=message_sequence, functions=self.functions, functions_python=self.functions_python, function_call=function_call, # hint first_message=first_message, ) # special case for 'length' if response.choices[0].finish_reason == "length": raise Exception("Finish reason was length (maximum context length)") # catches for soft errors if response.choices[0].finish_reason not in ["stop", "function_call", "tool_calls"]: raise Exception(f"API call finish with bad finish reason: {response}") # unpack with response.choices[0].message.content return response except Exception as e: raise e def _handle_ai_response( self, response_message: chat_completion_response.Message, override_tool_call_id: bool = True ) -> Tuple[List[Message], bool, bool]: """Handles parsing and function execution""" messages = [] # append these to the history when done # Step 2: check if LLM wanted to call a function if response_message.function_call or (response_message.tool_calls is not None and len(response_message.tool_calls) > 0): if response_message.function_call: raise DeprecationWarning(response_message) if response_message.tool_calls is not None and len(response_message.tool_calls) > 1: raise NotImplementedError(f">1 tool call not supported") # The content if then internal monologue, not chat self.interface.internal_monologue(response_message.content) # generate UUID for tool call if override_tool_call_id or response_message.function_call: tool_call_id = get_tool_call_id() # needs to be a string for JSON response_message.tool_calls[0].id = tool_call_id else: tool_call_id = response_message.tool_calls[0].id assert tool_call_id is not None # should be defined # only necessary to add the tool_cal_id to a function call (antipattern) # response_message_dict = response_message.model_dump() # response_message_dict["tool_call_id"] = tool_call_id # role: assistant (requesting tool call, set tool call ID) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=response_message.model_dump(), ) ) # extend conversation with assistant's reply printd(f"Function call message: {messages[-1]}") # Step 3: call the function # Note: the JSON response may not always be valid; be sure to handle errors # Failure case 1: function name is wrong function_call = ( response_message.function_call if response_message.function_call is not None else response_message.tool_calls[0].function ) function_name = function_call.name printd(f"Request to call function {function_name} with tool_call_id: {tool_call_id}") try: function_to_call = self.functions_python[function_name] except KeyError as e: error_msg = f"No function named {function_name}" function_response = package_function_response(False, error_msg) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response self.interface.function_message(f"Error: {error_msg}") return messages, None, True # force a heartbeat to allow agent to handle error # Failure case 2: function name is OK, but function args are bad JSON try: raw_function_args = function_call.arguments function_args = parse_json(raw_function_args) except Exception as e: error_msg = f"Error parsing JSON for function '{function_name}' arguments: {raw_function_args}" function_response = package_function_response(False, error_msg) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response self.interface.function_message(f"Error: {error_msg}") return messages, None, True # force a heartbeat to allow agent to handle error # (Still parsing function args) # Handle requests for immediate heartbeat heartbeat_request = function_args.pop("request_heartbeat", None) if not (isinstance(heartbeat_request, bool) or heartbeat_request is None): printd( f"{CLI_WARNING_PREFIX}'request_heartbeat' arg parsed was not a bool or None, type={type(heartbeat_request)}, value={heartbeat_request}" ) heartbeat_request = None # Failure case 3: function failed during execution self.interface.function_message(f"Running {function_name}({function_args})") try: spec = inspect.getfullargspec(function_to_call).annotations for name, arg in function_args.items(): if isinstance(function_args[name], dict): function_args[name] = spec[name](**function_args[name]) function_args["self"] = self # need to attach self to arg since it's dynamically linked function_response = function_to_call(**function_args) if function_name in ["conversation_search", "conversation_search_date", "archival_memory_search"]: # with certain functions we rely on the paging mechanism to handle overflow truncate = False else: # but by default, we add a truncation safeguard to prevent bad functions from # overflow the agent context window truncate = True function_response_string = validate_function_response(function_response, truncate=truncate) function_args.pop("self", None) function_response = package_function_response(True, function_response_string) function_failed = False except Exception as e: function_args.pop("self", None) # error_msg = f"Error calling function {function_name} with args {function_args}: {str(e)}" # Less detailed - don't provide full args, idea is that it should be in recent context so no need (just adds noise) error_msg = f"Error calling function {function_name}: {str(e)}" error_msg_user = f"{error_msg}\n{traceback.format_exc()}" printd(error_msg_user) function_response = package_function_response(False, error_msg) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response self.interface.function_message(f"Error: {error_msg}") return messages, None, True # force a heartbeat to allow agent to handle error # If no failures happened along the way: ... # Step 4: send the info on the function call and function response to GPT self.interface.function_message(f"Success: {function_response_string}") messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict={ "role": "tool", "name": function_name, "content": function_response, "tool_call_id": tool_call_id, }, ) ) # extend conversation with function response else: # Standard non-function reply self.interface.internal_monologue(response_message.content) messages.append( Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=response_message.model_dump(), ) ) # extend conversation with assistant's reply heartbeat_request = None function_failed = None return messages, heartbeat_request, function_failed def step(self, user_message, first_message=False, first_message_retry_limit=FIRST_MESSAGE_ATTEMPTS, skip_verify=False): """Top-level event message handler for the MemGPT agent""" try: # Step 0: add user message if user_message is not None: self.interface.user_message(user_message) packed_user_message = {"role": "user", "content": user_message} # Special handling for AutoGen messages with 'name' field try: user_message_json = json.loads(user_message) # Treat 'name' as a special field # If it exists in the input message, elevate it to the 'message' level if "name" in user_message_json: packed_user_message["name"] = user_message_json["name"] user_message_json.pop("name", None) packed_user_message["content"] = json.dumps(user_message_json, ensure_ascii=JSON_ENSURE_ASCII) except Exception as e: print(f"{CLI_WARNING_PREFIX}handling of 'name' field failed with: {e}") input_message_sequence = self.messages + [packed_user_message] else: input_message_sequence = self.messages if len(input_message_sequence) > 1 and input_message_sequence[-1]["role"] != "user": printd(f"{CLI_WARNING_PREFIX}Attempting to run ChatCompletion without user as the last message in the queue") # Step 1: send the conversation and available functions to GPT if not skip_verify and (first_message or self.messages_total == self.messages_total_init): printd(f"This is the first message. Running extra verifier on AI response.") counter = 0 while True: response = self._get_ai_reply( message_sequence=input_message_sequence, first_message=True, # passed through to the prompt formatter ) if verify_first_message_correctness(response, require_monologue=self.first_message_verify_mono): break counter += 1 if counter > first_message_retry_limit: raise Exception(f"Hit first message retry limit ({first_message_retry_limit})") else: response = self._get_ai_reply( message_sequence=input_message_sequence, ) # Step 2: check if LLM wanted to call a function # (if yes) Step 3: call the function # (if yes) Step 4: send the info on the function call and function response to LLM response_message = response.choices[0].message response_message_copy = response_message.copy() all_response_messages, heartbeat_request, function_failed = self._handle_ai_response(response_message) # Add the extra metadata to the assistant response # (e.g. enough metadata to enable recreating the API call) # assert "api_response" not in all_response_messages[0] # all_response_messages[0]["api_response"] = response_message_copy # assert "api_args" not in all_response_messages[0] # all_response_messages[0]["api_args"] = { # "model": self.model, # "messages": input_message_sequence, # "functions": self.functions, # } # Step 4: extend the message history if user_message is not None: all_new_messages = [ Message.dict_to_message( agent_id=self.agent_state.id, user_id=self.agent_state.user_id, model=self.model, openai_message_dict=packed_user_message, ) ] + all_response_messages else: all_new_messages = all_response_messages # Check the memory pressure and potentially issue a memory pressure warning current_total_tokens = response.usage.total_tokens active_memory_warning = False # We can't do summarize logic properly if context_window is undefined if self.agent_state.llm_config.context_window is None: # Fallback if for some reason context_window is missing, just set to the default print(f"{CLI_WARNING_PREFIX}could not find context_window in config, setting to default {LLM_MAX_TOKENS['DEFAULT']}") print(f"{self.agent_state}") self.agent_state.llm_config.context_window = ( str(LLM_MAX_TOKENS[self.model]) if (self.model is not None and self.model in LLM_MAX_TOKENS) else str(LLM_MAX_TOKENS["DEFAULT"]) )
if current_total_tokens > MESSAGE_SUMMARY_WARNING_FRAC * int(self.agent_state.llm_config.context_window):
25
2023-10-11 07:38:37+00:00
24k
PixArt-alpha/PixArt-alpha
train_scripts/train_controlnet.py
[ { "identifier": "IDDPM", "path": "diffusion/iddpm.py", "snippet": "def IDDPM(\n timestep_respacing,\n noise_schedule=\"linear\",\n use_kl=False,\n sigma_small=False,\n predict_xstart=False,\n learn_sigma=True,\n pred_sigma=True,\n rescale_learned_sigmas=False,\n diffusion_steps=1000,\n snr=False,\n return_startx=False,\n):\n betas = gd.get_named_beta_schedule(noise_schedule, diffusion_steps)\n if use_kl:\n loss_type = gd.LossType.RESCALED_KL\n elif rescale_learned_sigmas:\n loss_type = gd.LossType.RESCALED_MSE\n else:\n loss_type = gd.LossType.MSE\n if timestep_respacing is None or timestep_respacing == \"\":\n timestep_respacing = [diffusion_steps]\n return SpacedDiffusion(\n use_timesteps=space_timesteps(diffusion_steps, timestep_respacing),\n betas=betas,\n model_mean_type=(\n gd.ModelMeanType.EPSILON if not predict_xstart else gd.ModelMeanType.START_X\n ),\n model_var_type=(\n ((\n gd.ModelVarType.FIXED_LARGE\n if not sigma_small\n else gd.ModelVarType.FIXED_SMALL\n )\n if not learn_sigma\n else gd.ModelVarType.LEARNED_RANGE\n )\n if pred_sigma\n else None\n ),\n loss_type=loss_type,\n snr=snr,\n return_startx=return_startx,\n # rescale_timesteps=rescale_timesteps,\n )" }, { "identifier": "build_dataset", "path": "diffusion/data/builder.py", "snippet": "def build_dataset(cfg, resolution=224, **kwargs):\n logger = get_root_logger()\n\n dataset_type = cfg.get('type')\n logger.info(f\"Constructing dataset {dataset_type}...\")\n t = time.time()\n transform = cfg.pop('transform', 'default_train')\n transform = get_transform(transform, resolution)\n dataset = build_from_cfg(cfg, DATASETS, default_args=dict(transform=transform, resolution=resolution, **kwargs))\n logger.info(f\"Dataset {dataset_type} constructed. time: {(time.time() - t):.2f} s, length (use/ori): {len(dataset)}/{dataset.ori_imgs_nums}\")\n return dataset" }, { "identifier": "build_dataloader", "path": "diffusion/data/builder.py", "snippet": "def build_dataloader(dataset, batch_size=256, num_workers=4, shuffle=True, **kwargs):\n if 'batch_sampler' in kwargs:\n dataloader = DataLoader(dataset, batch_sampler=kwargs['batch_sampler'], num_workers=num_workers, pin_memory=True)\n else:\n dataloader = DataLoader(dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers,\n pin_memory=True,\n **kwargs)\n return dataloader" }, { "identifier": "set_data_root", "path": "diffusion/data/builder.py", "snippet": "def set_data_root(data_root):\n global DATA_ROOT\n DATA_ROOT = data_root" }, { "identifier": "build_model", "path": "diffusion/model/builder.py", "snippet": "def build_model(cfg, use_grad_checkpoint=False, use_fp32_attention=False, gc_step=1, **kwargs):\n if isinstance(cfg, str):\n cfg = dict(type=cfg)\n model = MODELS.build(cfg, default_args=kwargs)\n if use_grad_checkpoint:\n set_grad_checkpoint(model, use_fp32_attention=use_fp32_attention, gc_step=gc_step)\n return model" }, { "identifier": "PixArtMS", "path": "diffusion/model/nets/PixArtMS.py", "snippet": "class PixArtMS(PixArt):\n \"\"\"\n Diffusion model with a Transformer backbone.\n \"\"\"\n\n def __init__(\n self,\n input_size=32,\n patch_size=2,\n in_channels=4,\n hidden_size=1152,\n depth=28,\n num_heads=16,\n mlp_ratio=4.0,\n class_dropout_prob=0.1,\n learn_sigma=True,\n pred_sigma=True,\n drop_path: float = 0.,\n window_size=0,\n window_block_indexes=[],\n use_rel_pos=False,\n caption_channels=4096,\n lewei_scale=1.,\n config=None,\n model_max_length=120,\n **kwargs,\n ):\n super().__init__(\n input_size=input_size,\n patch_size=patch_size,\n in_channels=in_channels,\n hidden_size=hidden_size,\n depth=depth,\n num_heads=num_heads,\n mlp_ratio=mlp_ratio,\n class_dropout_prob=class_dropout_prob,\n learn_sigma=learn_sigma,\n pred_sigma=pred_sigma,\n drop_path=drop_path,\n window_size=window_size,\n window_block_indexes=window_block_indexes,\n use_rel_pos=use_rel_pos,\n lewei_scale=lewei_scale,\n config=config,\n model_max_length=model_max_length,\n **kwargs,\n )\n self.h = self.w = 0\n approx_gelu = lambda: nn.GELU(approximate=\"tanh\")\n self.t_block = nn.Sequential(\n nn.SiLU(),\n nn.Linear(hidden_size, 6 * hidden_size, bias=True)\n )\n self.x_embedder = PatchEmbed(patch_size, in_channels, hidden_size, bias=True)\n self.y_embedder = CaptionEmbedder(in_channels=caption_channels, hidden_size=hidden_size, uncond_prob=class_dropout_prob, act_layer=approx_gelu, token_num=model_max_length)\n self.csize_embedder = SizeEmbedder(hidden_size//3) # c_size embed\n self.ar_embedder = SizeEmbedder(hidden_size//3) # aspect ratio embed\n drop_path = [x.item() for x in torch.linspace(0, drop_path, depth)] # stochastic depth decay rule\n self.blocks = nn.ModuleList([\n PixArtMSBlock(hidden_size, num_heads, mlp_ratio=mlp_ratio, drop_path=drop_path[i],\n input_size=(input_size // patch_size, input_size // patch_size),\n window_size=window_size if i in window_block_indexes else 0,\n use_rel_pos=use_rel_pos if i in window_block_indexes else False)\n for i in range(depth)\n ])\n self.final_layer = T2IFinalLayer(hidden_size, patch_size, self.out_channels)\n\n self.initialize()\n\n def forward(self, x, timestep, y, mask=None, data_info=None, **kwargs):\n \"\"\"\n Forward pass of PixArt.\n x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)\n t: (N,) tensor of diffusion timesteps\n y: (N, 1, 120, C) tensor of class labels\n \"\"\"\n bs = x.shape[0]\n c_size, ar = data_info['img_hw'], data_info['aspect_ratio']\n self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size\n pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.pos_embed.shape[-1], (self.h, self.w), lewei_scale=self.lewei_scale, base_size=self.base_size)).float().unsqueeze(0).to(x.device)\n x = self.x_embedder(x) + pos_embed # (N, T, D), where T = H * W / patch_size ** 2\n t = self.t_embedder(timestep) # (N, D)\n csize = self.csize_embedder(c_size, bs) # (N, D)\n ar = self.ar_embedder(ar, bs) # (N, D)\n t = t + torch.cat([csize, ar], dim=1)\n t0 = self.t_block(t)\n y = self.y_embedder(y, self.training) # (N, D)\n if mask is not None:\n if mask.shape[0] != y.shape[0]:\n mask = mask.repeat(y.shape[0] // mask.shape[0], 1)\n mask = mask.squeeze(1).squeeze(1)\n y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1])\n y_lens = mask.sum(dim=1).tolist()\n else:\n y_lens = [y.shape[2]] * y.shape[0]\n y = y.squeeze(1).view(1, -1, x.shape[-1])\n for block in self.blocks:\n x = auto_grad_checkpoint(block, x, y, t0, y_lens, **kwargs) # (N, T, D) #support grad checkpoint\n x = self.final_layer(x, t) # (N, T, patch_size ** 2 * out_channels)\n x = self.unpatchify(x) # (N, out_channels, H, W)\n return x\n\n def forward_with_dpmsolver(self, x, timestep, y, data_info, **kwargs):\n \"\"\"\n dpm solver donnot need variance prediction\n \"\"\"\n # https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb\n model_out = self.forward(x, timestep, y, data_info=data_info, **kwargs)\n return model_out.chunk(2, dim=1)[0]\n\n def forward_with_cfg(self, x, timestep, y, cfg_scale, data_info, **kwargs):\n \"\"\"\n Forward pass of PixArt, but also batches the unconditional forward pass for classifier-free guidance.\n \"\"\"\n # https://github.com/openai/glide-text2im/blob/main/notebooks/text2im.ipynb\n half = x[: len(x) // 2]\n combined = torch.cat([half, half], dim=0)\n model_out = self.forward(combined, timestep, y, data_info=data_info)\n eps, rest = model_out[:, :3], model_out[:, 3:]\n cond_eps, uncond_eps = torch.split(eps, len(eps) // 2, dim=0)\n half_eps = uncond_eps + cfg_scale * (cond_eps - uncond_eps)\n eps = torch.cat([half_eps, half_eps], dim=0)\n return torch.cat([eps, rest], dim=1)\n\n def unpatchify(self, x):\n \"\"\"\n x: (N, T, patch_size**2 * C)\n imgs: (N, H, W, C)\n \"\"\"\n c = self.out_channels\n p = self.x_embedder.patch_size[0]\n assert self.h * self.w == x.shape[1]\n\n x = x.reshape(shape=(x.shape[0], self.h, self.w, p, p, c))\n x = torch.einsum('nhwpqc->nchpwq', x)\n imgs = x.reshape(shape=(x.shape[0], c, self.h * p, self.w * p))\n return imgs\n\n def initialize(self):\n # Initialize transformer layers:\n def _basic_init(module):\n if isinstance(module, nn.Linear):\n torch.nn.init.xavier_uniform_(module.weight)\n if module.bias is not None:\n nn.init.constant_(module.bias, 0)\n\n self.apply(_basic_init)\n\n # Initialize patch_embed like nn.Linear (instead of nn.Conv2d):\n w = self.x_embedder.proj.weight.data\n nn.init.xavier_uniform_(w.view([w.shape[0], -1]))\n\n # Initialize timestep embedding MLP:\n nn.init.normal_(self.t_embedder.mlp[0].weight, std=0.02)\n nn.init.normal_(self.t_embedder.mlp[2].weight, std=0.02)\n nn.init.normal_(self.t_block[1].weight, std=0.02)\n nn.init.normal_(self.csize_embedder.mlp[0].weight, std=0.02)\n nn.init.normal_(self.csize_embedder.mlp[2].weight, std=0.02)\n nn.init.normal_(self.ar_embedder.mlp[0].weight, std=0.02)\n nn.init.normal_(self.ar_embedder.mlp[2].weight, std=0.02)\n\n # Initialize caption embedding MLP:\n nn.init.normal_(self.y_embedder.y_proj.fc1.weight, std=0.02)\n nn.init.normal_(self.y_embedder.y_proj.fc2.weight, std=0.02)\n\n # Zero-out adaLN modulation layers in PixArt blocks:\n for block in self.blocks:\n nn.init.constant_(block.cross_attn.proj.weight, 0)\n nn.init.constant_(block.cross_attn.proj.bias, 0)\n\n # Zero-out output layers:\n nn.init.constant_(self.final_layer.linear.weight, 0)\n nn.init.constant_(self.final_layer.linear.bias, 0)" }, { "identifier": "ControlPixArtHalf", "path": "diffusion/model/nets/pixart_controlnet.py", "snippet": "class ControlPixArtHalf(Module):\n # only support single res model\n def __init__(self, base_model: PixArt, copy_blocks_num: int = 13) -> None:\n super().__init__()\n self.base_model = base_model.eval()\n self.controlnet = []\n self.copy_blocks_num = copy_blocks_num\n self.total_blocks_num = len(base_model.blocks)\n for p in self.base_model.parameters():\n p.requires_grad_(False)\n\n # Copy first copy_blocks_num block\n for i in range(copy_blocks_num):\n self.controlnet.append(ControlT2IDitBlockHalf(base_model.blocks[i], i))\n self.controlnet = nn.ModuleList(self.controlnet)\n \n def __getattr__(self, name: str) -> Tensor or Module:\n if name in ['forward', 'forward_with_dpmsolver', 'forward_with_cfg', 'forward_c', 'load_state_dict']:\n return self.__dict__[name]\n elif name in ['base_model', 'controlnet']:\n return super().__getattr__(name)\n else:\n return getattr(self.base_model, name)\n\n def forward_c(self, c):\n self.h, self.w = c.shape[-2]//self.patch_size, c.shape[-1]//self.patch_size\n pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.pos_embed.shape[-1], (self.h, self.w), lewei_scale=self.lewei_scale, base_size=self.base_size)).unsqueeze(0).to(c.device).to(self.dtype)\n return self.x_embedder(c) + pos_embed if c is not None else c\n\n # def forward(self, x, t, c, **kwargs):\n # return self.base_model(x, t, c=self.forward_c(c), **kwargs)\n def forward(self, x, timestep, y, mask=None, data_info=None, c=None, **kwargs):\n # modify the original PixArtMS forward function\n if c is not None:\n c = c.to(self.dtype)\n c = self.forward_c(c)\n \"\"\"\n Forward pass of PixArt.\n x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)\n t: (N,) tensor of diffusion timesteps\n y: (N, 1, 120, C) tensor of class labels\n \"\"\"\n x = x.to(self.dtype)\n timestep = timestep.to(self.dtype)\n y = y.to(self.dtype)\n pos_embed = self.pos_embed.to(self.dtype)\n self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size\n x = self.x_embedder(x) + pos_embed # (N, T, D), where T = H * W / patch_size ** 2\n t = self.t_embedder(timestep.to(x.dtype)) # (N, D)\n t0 = self.t_block(t)\n y = self.y_embedder(y, self.training) # (N, 1, L, D)\n if mask is not None:\n if mask.shape[0] != y.shape[0]:\n mask = mask.repeat(y.shape[0] // mask.shape[0], 1)\n mask = mask.squeeze(1).squeeze(1)\n y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1])\n y_lens = mask.sum(dim=1).tolist()\n else:\n y_lens = [y.shape[2]] * y.shape[0]\n y = y.squeeze(1).view(1, -1, x.shape[-1])\n\n # define the first layer\n x = auto_grad_checkpoint(self.base_model.blocks[0], x, y, t0, y_lens, **kwargs) # (N, T, D) #support grad checkpoint\n\n if c is not None:\n # update c\n for index in range(1, self.copy_blocks_num + 1):\n c, c_skip = auto_grad_checkpoint(self.controlnet[index - 1], x, y, t0, y_lens, c, **kwargs)\n x = auto_grad_checkpoint(self.base_model.blocks[index], x + c_skip, y, t0, y_lens, **kwargs)\n \n # update x\n for index in range(self.copy_blocks_num + 1, self.total_blocks_num):\n x = auto_grad_checkpoint(self.base_model.blocks[index], x, y, t0, y_lens, **kwargs)\n else:\n for index in range(1, self.total_blocks_num):\n x = auto_grad_checkpoint(self.base_model.blocks[index], x, y, t0, y_lens, **kwargs)\n\n x = self.final_layer(x, t) # (N, T, patch_size ** 2 * out_channels)\n x = self.unpatchify(x) # (N, out_channels, H, W)\n return x\n\n def forward_with_dpmsolver(self, x, t, y, data_info, c, **kwargs):\n model_out = self.forward(x, t, y, data_info=data_info, c=c, **kwargs)\n return model_out.chunk(2, dim=1)[0]\n\n # def forward_with_dpmsolver(self, x, t, y, data_info, c, **kwargs):\n # return self.base_model.forward_with_dpmsolver(x, t, y, data_info=data_info, c=self.forward_c(c), **kwargs)\n\n def forward_with_cfg(self, x, t, y, cfg_scale, data_info, c, **kwargs):\n return self.base_model.forward_with_cfg(x, t, y, cfg_scale, data_info, c=self.forward_c(c), **kwargs)\n\n def load_state_dict(self, state_dict: Mapping[str, Any], strict: bool = True):\n if all((k.startswith('base_model') or k.startswith('controlnet')) for k in state_dict.keys()):\n return super().load_state_dict(state_dict, strict)\n else:\n new_key = {}\n for k in state_dict.keys():\n new_key[k] = re.sub(r\"(blocks\\.\\d+)(.*)\", r\"\\1.base_block\\2\", k)\n for k, v in new_key.items():\n if k != v:\n print(f\"replace {k} to {v}\")\n state_dict[v] = state_dict.pop(k)\n\n return self.base_model.load_state_dict(state_dict, strict)\n \n def unpatchify(self, x):\n \"\"\"\n x: (N, T, patch_size**2 * C)\n imgs: (N, H, W, C)\n \"\"\"\n c = self.out_channels\n p = self.x_embedder.patch_size[0]\n assert self.h * self.w == x.shape[1]\n\n x = x.reshape(shape=(x.shape[0], self.h, self.w, p, p, c))\n x = torch.einsum('nhwpqc->nchpwq', x)\n imgs = x.reshape(shape=(x.shape[0], c, self.h * p, self.w * p))\n return imgs\n\n @property\n def dtype(self):\n # 返回模型参数的数据类型\n return next(self.parameters()).dtype" }, { "identifier": "ControlPixArtMSHalf", "path": "diffusion/model/nets/pixart_controlnet.py", "snippet": "class ControlPixArtMSHalf(ControlPixArtHalf):\n # support multi-scale res model (multi-scale model can also be applied to single reso training & inference)\n def __init__(self, base_model: PixArtMS, copy_blocks_num: int = 13) -> None:\n super().__init__(base_model=base_model, copy_blocks_num=copy_blocks_num)\n\n def forward(self, x, timestep, y, mask=None, data_info=None, c=None, **kwargs):\n # modify the original PixArtMS forward function\n \"\"\"\n Forward pass of PixArt.\n x: (N, C, H, W) tensor of spatial inputs (images or latent representations of images)\n t: (N,) tensor of diffusion timesteps\n y: (N, 1, 120, C) tensor of class labels\n \"\"\"\n if c is not None:\n c = c.to(self.dtype)\n c = self.forward_c(c)\n bs = x.shape[0]\n x = x.to(self.dtype)\n timestep = timestep.to(self.dtype)\n y = y.to(self.dtype)\n c_size, ar = data_info['img_hw'].to(self.dtype), data_info['aspect_ratio'].to(self.dtype)\n self.h, self.w = x.shape[-2]//self.patch_size, x.shape[-1]//self.patch_size\n\n pos_embed = torch.from_numpy(get_2d_sincos_pos_embed(self.pos_embed.shape[-1], (self.h, self.w), lewei_scale=self.lewei_scale, base_size=self.base_size)).unsqueeze(0).to(x.device).to(self.dtype)\n x = self.x_embedder(x) + pos_embed # (N, T, D), where T = H * W / patch_size ** 2\n t = self.t_embedder(timestep) # (N, D)\n csize = self.csize_embedder(c_size, bs) # (N, D)\n ar = self.ar_embedder(ar, bs) # (N, D)\n t = t + torch.cat([csize, ar], dim=1)\n t0 = self.t_block(t)\n y = self.y_embedder(y, self.training) # (N, D)\n if mask is not None:\n if mask.shape[0] != y.shape[0]:\n mask = mask.repeat(y.shape[0] // mask.shape[0], 1)\n mask = mask.squeeze(1).squeeze(1)\n y = y.squeeze(1).masked_select(mask.unsqueeze(-1) != 0).view(1, -1, x.shape[-1])\n y_lens = mask.sum(dim=1).tolist()\n else:\n y_lens = [y.shape[2]] * y.shape[0]\n y = y.squeeze(1).view(1, -1, x.shape[-1])\n\n # define the first layer\n x = auto_grad_checkpoint(self.base_model.blocks[0], x, y, t0, y_lens, **kwargs) # (N, T, D) #support grad checkpoint\n\n if c is not None:\n # update c\n for index in range(1, self.copy_blocks_num + 1):\n c, c_skip = auto_grad_checkpoint(self.controlnet[index - 1], x, y, t0, y_lens, c, **kwargs)\n x = auto_grad_checkpoint(self.base_model.blocks[index], x + c_skip, y, t0, y_lens, **kwargs)\n \n # update x\n for index in range(self.copy_blocks_num + 1, self.total_blocks_num):\n x = auto_grad_checkpoint(self.base_model.blocks[index], x, y, t0, y_lens, **kwargs)\n else:\n for index in range(1, self.total_blocks_num):\n x = auto_grad_checkpoint(self.base_model.blocks[index], x, y, t0, y_lens, **kwargs)\n\n x = self.final_layer(x, t) # (N, T, patch_size ** 2 * out_channels)\n x = self.unpatchify(x) # (N, out_channels, H, W)\n return x" }, { "identifier": "save_checkpoint", "path": "diffusion/utils/checkpoint.py", "snippet": "def save_checkpoint(work_dir,\n epoch,\n model,\n model_ema=None,\n optimizer=None,\n lr_scheduler=None,\n keep_last=False,\n step=None,\n ):\n os.makedirs(work_dir, exist_ok=True)\n state_dict = dict(state_dict=model.state_dict())\n if model_ema is not None:\n state_dict['state_dict_ema'] = model_ema.state_dict()\n if optimizer is not None:\n state_dict['optimizer'] = optimizer.state_dict()\n if lr_scheduler is not None:\n state_dict['scheduler'] = lr_scheduler.state_dict()\n if epoch is not None:\n state_dict['epoch'] = epoch\n file_path = os.path.join(work_dir, f\"epoch_{epoch}.pth\")\n if step is not None:\n file_path = file_path.split('.pth')[0] + f\"_step_{step}.pth\"\n logger = get_root_logger()\n torch.save(state_dict, file_path)\n logger.info(f'Saved checkpoint of epoch {epoch} to {file_path.format(epoch)}.')\n if keep_last:\n for i in range(epoch):\n previous_ckgt = file_path.format(i)\n if os.path.exists(previous_ckgt):\n os.remove(previous_ckgt)" }, { "identifier": "load_checkpoint", "path": "diffusion/utils/checkpoint.py", "snippet": "def load_checkpoint(checkpoint,\n model,\n model_ema=None,\n optimizer=None,\n lr_scheduler=None,\n load_ema=False,\n resume_optimizer=True,\n resume_lr_scheduler=True\n ):\n assert isinstance(checkpoint, str)\n ckpt_file = checkpoint\n checkpoint = torch.load(ckpt_file, map_location=\"cpu\")\n\n state_dict_keys = ['pos_embed', 'base_model.pos_embed', 'model.pos_embed']\n for key in state_dict_keys:\n if key in checkpoint['state_dict']:\n del checkpoint['state_dict'][key]\n if 'state_dict_ema' in checkpoint and key in checkpoint['state_dict_ema']:\n del checkpoint['state_dict_ema'][key]\n break\n\n if load_ema:\n state_dict = checkpoint['state_dict_ema']\n else:\n state_dict = checkpoint.get('state_dict', checkpoint) # to be compatible with the official checkpoint\n # model.load_state_dict(state_dict)\n missing, unexpect = model.load_state_dict(state_dict, strict=False)\n if model_ema is not None:\n model_ema.load_state_dict(checkpoint['state_dict_ema'], strict=False)\n if optimizer is not None and resume_optimizer:\n optimizer.load_state_dict(checkpoint['optimizer'])\n if lr_scheduler is not None and resume_lr_scheduler:\n lr_scheduler.load_state_dict(checkpoint['scheduler'])\n logger = get_root_logger()\n if optimizer is not None:\n epoch = checkpoint.get('epoch', re.match(r'.*epoch_(\\d*).*.pth', ckpt_file).group()[0])\n logger.info(f'Resume checkpoint of epoch {epoch} from {ckpt_file}. Load ema: {load_ema}, '\n f'resume optimizer: {resume_optimizer}, resume lr scheduler: {resume_lr_scheduler}.')\n return epoch, missing, unexpect\n logger.info(f'Load checkpoint from {ckpt_file}. Load ema: {load_ema}.')\n return missing, unexpect" }, { "identifier": "AspectRatioBatchSampler", "path": "diffusion/utils/data_sampler.py", "snippet": "class AspectRatioBatchSampler(BatchSampler):\n \"\"\"A sampler wrapper for grouping images with similar aspect ratio into a same batch.\n\n Args:\n sampler (Sampler): Base sampler.\n dataset (Dataset): Dataset providing data information.\n batch_size (int): Size of mini-batch.\n drop_last (bool): If ``True``, the sampler will drop the last batch if\n its size would be less than ``batch_size``.\n aspect_ratios (dict): The predefined aspect ratios.\n \"\"\"\n\n def __init__(self,\n sampler: Sampler,\n dataset: Dataset,\n batch_size: int,\n aspect_ratios: dict,\n drop_last: bool = False,\n config=None,\n valid_num=0, # take as valid aspect-ratio when sample number >= valid_num\n **kwargs) -> None:\n if not isinstance(sampler, Sampler):\n raise TypeError('sampler should be an instance of ``Sampler``, '\n f'but got {sampler}')\n if not isinstance(batch_size, int) or batch_size <= 0:\n raise ValueError('batch_size should be a positive integer value, '\n f'but got batch_size={batch_size}')\n self.sampler = sampler\n self.dataset = dataset\n self.batch_size = batch_size\n self.aspect_ratios = aspect_ratios\n self.drop_last = drop_last\n self.ratio_nums_gt = kwargs.get('ratio_nums', None)\n self.config = config\n assert self.ratio_nums_gt\n # buckets for each aspect ratio\n self._aspect_ratio_buckets = {ratio: [] for ratio in aspect_ratios.keys()}\n self.current_available_bucket_keys = [str(k) for k, v in self.ratio_nums_gt.items() if v >= valid_num]\n logger = get_root_logger() if config is None else get_root_logger(os.path.join(config.work_dir, 'train_log.log'))\n logger.warning(f\"Using valid_num={valid_num} in config file. Available {len(self.current_available_bucket_keys)} aspect_ratios: {self.current_available_bucket_keys}\")\n\n def __iter__(self) -> Sequence[int]:\n for idx in self.sampler:\n data_info = self.dataset.get_data_info(idx)\n height, width = data_info['height'], data_info['width']\n ratio = height / width\n # find the closest aspect ratio\n closest_ratio = min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio))\n if closest_ratio not in self.current_available_bucket_keys:\n continue\n bucket = self._aspect_ratio_buckets[closest_ratio]\n bucket.append(idx)\n # yield a batch of indices in the same aspect ratio group\n if len(bucket) == self.batch_size:\n yield bucket[:]\n del bucket[:]\n\n # yield the rest data and reset the buckets\n for bucket in self._aspect_ratio_buckets.values():\n while len(bucket) > 0:\n if len(bucket) <= self.batch_size:\n if not self.drop_last:\n yield bucket[:]\n bucket = []\n else:\n yield bucket[:self.batch_size]\n bucket = bucket[self.batch_size:]" }, { "identifier": "BalancedAspectRatioBatchSampler", "path": "diffusion/utils/data_sampler.py", "snippet": "class BalancedAspectRatioBatchSampler(AspectRatioBatchSampler):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # Assign samples to each bucket\n self.ratio_nums_gt = kwargs.get('ratio_nums', None)\n assert self.ratio_nums_gt\n self._aspect_ratio_buckets = {float(ratio): [] for ratio in self.aspect_ratios.keys()}\n self.original_buckets = {}\n self.current_available_bucket_keys = [k for k, v in self.ratio_nums_gt.items() if v >= 3000]\n self.all_available_keys = deepcopy(self.current_available_bucket_keys)\n self.exhausted_bucket_keys = []\n self.total_batches = len(self.sampler) // self.batch_size\n self._aspect_ratio_count = {}\n for k in self.all_available_keys:\n self._aspect_ratio_count[float(k)] = 0\n self.original_buckets[float(k)] = []\n logger = get_root_logger(os.path.join(self.config.work_dir, 'train_log.log'))\n logger.warning(f\"Available {len(self.current_available_bucket_keys)} aspect_ratios: {self.current_available_bucket_keys}\")\n\n def __iter__(self) -> Sequence[int]:\n i = 0\n for idx in self.sampler:\n data_info = self.dataset.get_data_info(idx)\n height, width = data_info['height'], data_info['width']\n ratio = height / width\n closest_ratio = float(min(self.aspect_ratios.keys(), key=lambda r: abs(float(r) - ratio)))\n if closest_ratio not in self.all_available_keys:\n continue\n if self._aspect_ratio_count[closest_ratio] < self.ratio_nums_gt[closest_ratio]:\n self._aspect_ratio_count[closest_ratio] += 1\n self._aspect_ratio_buckets[closest_ratio].append(idx)\n self.original_buckets[closest_ratio].append(idx) # Save the original samples for each bucket\n if not self.current_available_bucket_keys:\n self.current_available_bucket_keys, self.exhausted_bucket_keys = self.exhausted_bucket_keys, []\n\n if closest_ratio not in self.current_available_bucket_keys:\n continue\n key = closest_ratio\n bucket = self._aspect_ratio_buckets[key]\n if len(bucket) == self.batch_size:\n yield bucket[:self.batch_size]\n del bucket[:self.batch_size]\n i += 1\n self.exhausted_bucket_keys.append(key)\n self.current_available_bucket_keys.remove(key)\n\n for _ in range(self.total_batches - i):\n key = choice(self.all_available_keys)\n bucket = self._aspect_ratio_buckets[key]\n if len(bucket) >= self.batch_size:\n yield bucket[:self.batch_size]\n del bucket[:self.batch_size]\n\n # If a bucket is exhausted\n if not bucket:\n self._aspect_ratio_buckets[key] = deepcopy(self.original_buckets[key][:])\n shuffle(self._aspect_ratio_buckets[key])\n else:\n self._aspect_ratio_buckets[key] = deepcopy(self.original_buckets[key][:])\n shuffle(self._aspect_ratio_buckets[key])" }, { "identifier": "synchronize", "path": "diffusion/utils/dist_utils.py", "snippet": "def synchronize():\n \"\"\"\n Helper function to synchronize (barrier) among all processes when\n using distributed training\n \"\"\"\n if not dist.is_available():\n return\n if not dist.is_initialized():\n return\n world_size = dist.get_world_size()\n if world_size == 1:\n return\n dist.barrier()" }, { "identifier": "get_world_size", "path": "diffusion/utils/dist_utils.py", "snippet": "def get_world_size():\n if not dist.is_available():\n return 1\n if not dist.is_initialized():\n return 1\n return dist.get_world_size()" }, { "identifier": "clip_grad_norm_", "path": "diffusion/utils/dist_utils.py", "snippet": "@torch.no_grad()\ndef clip_grad_norm_(\n self, max_norm: Union[float, int], norm_type: Union[float, int] = 2.0\n) -> None:\n self._lazy_init()\n self._wait_for_previous_optim_step()\n assert self._is_root, \"clip_grad_norm should only be called on the root (parent) instance\"\n self._assert_state(TrainingState_.IDLE)\n\n max_norm = float(max_norm)\n norm_type = float(norm_type)\n # Computes the max norm for this shard's gradients and sync's across workers\n local_norm = _calc_grad_norm(self.params_with_grad, norm_type).cuda() # type: ignore[arg-type]\n if norm_type == math.inf:\n total_norm = local_norm\n dist.all_reduce(total_norm, op=torch.distributed.ReduceOp.MAX, group=self.process_group)\n else:\n total_norm = local_norm ** norm_type\n dist.all_reduce(total_norm, group=self.process_group)\n total_norm = total_norm ** (1.0 / norm_type)\n\n clip_coef = torch.tensor(max_norm, dtype=total_norm.dtype, device=total_norm.device) / (total_norm + 1e-6)\n if clip_coef < 1:\n # multiply by clip_coef, aka, (max_norm/total_norm).\n for p in self.params_with_grad:\n assert p.grad is not None\n p.grad.detach().mul_(clip_coef.to(p.grad.device))\n return total_norm" }, { "identifier": "get_root_logger", "path": "diffusion/utils/logger.py", "snippet": "def get_root_logger(log_file=None, log_level=logging.INFO, name='PixArt'):\n \"\"\"Get root logger.\n\n Args:\n log_file (str, optional): File path of log. Defaults to None.\n log_level (int, optional): The level of logger.\n Defaults to logging.INFO.\n name (str): logger name\n Returns:\n :obj:`logging.Logger`: The obtained logger\n \"\"\"\n if log_file is None:\n log_file = '/dev/null'\n logger = get_logger(name=name, log_file=log_file, log_level=log_level)\n return logger" }, { "identifier": "build_lr_scheduler", "path": "diffusion/utils/lr_scheduler.py", "snippet": "def build_lr_scheduler(config, optimizer, train_dataloader, lr_scale_ratio):\n if not config.get('lr_schedule_args', None):\n config.lr_schedule_args = dict()\n if config.get('lr_warmup_steps', None):\n config['num_warmup_steps'] = config.get('lr_warmup_steps') # for compatibility with old version\n\n logger = get_root_logger()\n logger.info(\n f'Lr schedule: {config.lr_schedule}, ' + \",\".join(\n [f\"{key}:{value}\" for key, value in config.lr_schedule_args.items()]) + '.')\n if config.lr_schedule == 'cosine':\n lr_scheduler = get_cosine_schedule_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n num_training_steps=(len(train_dataloader) * config.num_epochs),\n )\n elif config.lr_schedule == 'constant':\n lr_scheduler = get_constant_schedule_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n )\n elif config.lr_schedule == 'cosine_decay_to_constant':\n assert lr_scale_ratio >= 1\n lr_scheduler = get_cosine_decay_to_constant_with_warmup(\n optimizer=optimizer,\n **config.lr_schedule_args,\n final_lr=1 / lr_scale_ratio,\n num_training_steps=(len(train_dataloader) * config.num_epochs),\n )\n else:\n raise RuntimeError(f'Unrecognized lr schedule {config.lr_schedule}.')\n return lr_scheduler" }, { "identifier": "set_random_seed", "path": "diffusion/utils/misc.py", "snippet": "def set_random_seed(seed, deterministic=False):\n \"\"\"Set random seed.\n\n Args:\n seed (int): Seed to be used.\n deterministic (bool): Whether to set the deterministic option for\n CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`\n to True and `torch.backends.cudnn.benchmark` to False.\n Default: False.\n \"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "read_config", "path": "diffusion/utils/misc.py", "snippet": "def read_config(file):\n # solve config loading conflict when multi-processes\n import time\n while True:\n config = Config.fromfile(file)\n if len(config) == 0:\n time.sleep(0.1)\n continue\n break\n return config" }, { "identifier": "init_random_seed", "path": "diffusion/utils/misc.py", "snippet": "def init_random_seed(seed=None, device='cuda'):\n \"\"\"Initialize random seed.\n\n If the seed is not set, the seed will be automatically randomized,\n and then broadcast to all processes to prevent some potential bugs.\n\n Args:\n seed (int, Optional): The seed. Default to None.\n device (str): The device where the seed will be put on.\n Default to 'cuda'.\n\n Returns:\n int: Seed to be used.\n \"\"\"\n if seed is not None:\n return seed\n\n # Make sure all ranks share the same random seed to prevent\n # some potential bugs. Please refer to\n # https://github.com/open-mmlab/mmdetection/issues/6339\n rank, world_size = get_dist_info()\n seed = np.random.randint(2 ** 31)\n if world_size == 1:\n return seed\n\n if rank == 0:\n random_num = torch.tensor(seed, dtype=torch.int32, device=device)\n else:\n random_num = torch.tensor(0, dtype=torch.int32, device=device)\n dist.broadcast(random_num, src=0)\n return random_num.item()" }, { "identifier": "DebugUnderflowOverflow", "path": "diffusion/utils/misc.py", "snippet": "class DebugUnderflowOverflow:\n \"\"\"\n This debug class helps detect and understand where the model starts getting very large or very small, and more\n importantly `nan` or `inf` weight and activation elements.\n There are 2 working modes:\n 1. Underflow/overflow detection (default)\n 2. Specific batch absolute min/max tracing without detection\n Mode 1: Underflow/overflow detection\n To activate the underflow/overflow detection, initialize the object with the model :\n ```python\n debug_overflow = DebugUnderflowOverflow(model)\n ```\n then run the training as normal and if `nan` or `inf` gets detected in at least one of the weight, input or\n output elements this module will throw an exception and will print `max_frames_to_save` frames that lead to this\n event, each frame reporting\n 1. the fully qualified module name plus the class name whose `forward` was run\n 2. the absolute min and max value of all elements for each module weights, and the inputs and output\n For example, here is the header and the last few frames in detection report for `google/mt5-small` run in fp16 mixed precision :\n ```\n Detected inf/nan during batch_number=0\n Last 21 forward frames:\n abs min abs max metadata\n [...]\n encoder.block.2.layer.1.DenseReluDense.wi_0 Linear\n 2.17e-07 4.50e+00 weight\n 1.79e-06 4.65e+00 input[0]\n 2.68e-06 3.70e+01 output\n encoder.block.2.layer.1.DenseReluDense.wi_1 Linear\n 8.08e-07 2.66e+01 weight\n 1.79e-06 4.65e+00 input[0]\n 1.27e-04 2.37e+02 output\n encoder.block.2.layer.1.DenseReluDense.wo Linear\n 1.01e-06 6.44e+00 weight\n 0.00e+00 9.74e+03 input[0]\n 3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.DenseReluDense T5DenseGatedGeluDense\n 1.79e-06 4.65e+00 input[0]\n 3.18e-04 6.27e+04 output\n encoder.block.2.layer.1.dropout Dropout\n 3.18e-04 6.27e+04 input[0]\n 0.00e+00 inf output\n ```\n You can see here, that `T5DenseGatedGeluDense.forward` resulted in output activations, whose absolute max value\n was around 62.7K, which is very close to fp16's top limit of 64K. In the next frame we have `Dropout` which\n renormalizes the weights, after it zeroed some of the elements, which pushes the absolute max value to more than\n 64K, and we get an overlow.\n As you can see it's the previous frames that we need to look into when the numbers start going into very large for\n fp16 numbers.\n The tracking is done in a forward hook, which gets invoked immediately after `forward` has completed.\n By default the last 21 frames are printed. You can change the default to adjust for your needs. For example :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, max_frames_to_save=100)\n ```\n To validate that you have set up this debugging feature correctly, and you intend to use it in a training that may\n take hours to complete, first run it with normal tracing enabled for one of a few batches as explained in the next\n section.\n Mode 2. Specific batch absolute min/max tracing without detection\n The second work mode is per-batch tracing with the underflow/overflow detection feature turned off.\n Let's say you want to watch the absolute min and max values for all the ingredients of each `forward` call of a\n given batch, and only do that for batches 1 and 3. Then you instantiate this class as :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3])\n ```\n And now full batches 1 and 3 will be traced using the same format as explained above. Batches are 0-indexed.\n This is helpful if you know that the program starts misbehaving after a certain batch number, so you can\n fast-forward right to that area.\n Early stopping:\n You can also specify the batch number after which to stop the training, with :\n ```python\n debug_overflow = DebugUnderflowOverflow(model, trace_batch_nums=[1,3], abort_after_batch_num=3)\n ```\n This feature is mainly useful in the tracing mode, but you can use it for any mode.\n **Performance**:\n As this module measures absolute `min`/``max` of each weight of the model on every forward it'll slow the\n training down. Therefore remember to turn it off once the debugging needs have been met.\n Args:\n model (`nn.Module`):\n The model to debug.\n max_frames_to_save (`int`, *optional*, defaults to 21):\n How many frames back to record\n trace_batch_nums(`List[int]`, *optional*, defaults to `[]`):\n Which batch numbers to trace (turns detection off)\n abort_after_batch_num (`int``, *optional*):\n Whether to abort after a certain batch number has finished\n \"\"\"\n\n def __init__(self, model, max_frames_to_save=21, trace_batch_nums=[], abort_after_batch_num=None):\n self.model = model\n self.trace_batch_nums = trace_batch_nums\n self.abort_after_batch_num = abort_after_batch_num\n\n # keep a LIFO buffer of frames to dump as soon as inf/nan is encountered to give context to the problem emergence\n self.frames = collections.deque([], max_frames_to_save)\n self.frame = []\n self.batch_number = 0\n self.total_calls = 0\n self.detected_overflow = False\n self.prefix = \" \"\n\n self.analyse_model()\n\n self.register_forward_hook()\n\n def save_frame(self, frame=None):\n if frame is not None:\n self.expand_frame(frame)\n self.frames.append(\"\\n\".join(self.frame))\n self.frame = [] # start a new frame\n\n def expand_frame(self, line):\n self.frame.append(line)\n\n def trace_frames(self):\n print(\"\\n\".join(self.frames))\n self.frames = []\n\n def reset_saved_frames(self):\n self.frames = []\n\n def dump_saved_frames(self):\n print(f\"\\nDetected inf/nan during batch_number={self.batch_number} \"\n f\"Last {len(self.frames)} forward frames:\"\n f\"{'abs min':8} {'abs max':8} metadata\"\n f\"'\\n'.join(self.frames)\"\n f\"\\n\\n\")\n self.frames = []\n\n def analyse_model(self):\n # extract the fully qualified module names, to be able to report at run time. e.g.:\n # encoder.block.2.layer.0.SelfAttention.o\n #\n # for shared weights only the first shared module name will be registered\n self.module_names = {m: name for name, m in self.model.named_modules()}\n # self.longest_module_name = max(len(v) for v in self.module_names.values())\n\n def analyse_variable(self, var, ctx):\n if torch.is_tensor(var):\n self.expand_frame(self.get_abs_min_max(var, ctx))\n if self.detect_overflow(var, ctx):\n self.detected_overflow = True\n elif var is None:\n self.expand_frame(f\"{'None':>17} {ctx}\")\n else:\n self.expand_frame(f\"{'not a tensor':>17} {ctx}\")\n\n def batch_start_frame(self):\n self.expand_frame(f\"\\n\\n{self.prefix} *** Starting batch number={self.batch_number} ***\")\n self.expand_frame(f\"{'abs min':8} {'abs max':8} metadata\")\n\n def batch_end_frame(self):\n self.expand_frame(f\"{self.prefix} *** Finished batch number={self.batch_number - 1} ***\\n\\n\")\n\n def create_frame(self, module, input, output):\n self.expand_frame(f\"{self.prefix} {self.module_names[module]} {module.__class__.__name__}\")\n\n # params\n for name, p in module.named_parameters(recurse=False):\n self.analyse_variable(p, name)\n\n # inputs\n if isinstance(input, tuple):\n for i, x in enumerate(input):\n self.analyse_variable(x, f\"input[{i}]\")\n else:\n self.analyse_variable(input, \"input\")\n\n # outputs\n if isinstance(output, tuple):\n for i, x in enumerate(output):\n # possibly a tuple of tuples\n if isinstance(x, tuple):\n for j, y in enumerate(x):\n self.analyse_variable(y, f\"output[{i}][{j}]\")\n else:\n self.analyse_variable(x, f\"output[{i}]\")\n else:\n self.analyse_variable(output, \"output\")\n\n self.save_frame()\n\n def register_forward_hook(self):\n self.model.apply(self._register_forward_hook)\n\n def _register_forward_hook(self, module):\n module.register_forward_hook(self.forward_hook)\n\n def forward_hook(self, module, input, output):\n # - input is a tuple of packed inputs (could be non-Tensors)\n # - output could be a Tensor or a tuple of Tensors and non-Tensors\n\n last_frame_of_batch = False\n\n trace_mode = True if self.batch_number in self.trace_batch_nums else False\n if trace_mode:\n self.reset_saved_frames()\n\n if self.total_calls == 0:\n self.batch_start_frame()\n self.total_calls += 1\n\n # count batch numbers - the very first forward hook of the batch will be called when the\n # batch completes - i.e. it gets called very last - we know this batch has finished\n if module == self.model:\n self.batch_number += 1\n last_frame_of_batch = True\n\n self.create_frame(module, input, output)\n\n # if last_frame_of_batch:\n # self.batch_end_frame()\n\n if trace_mode:\n self.trace_frames()\n\n if last_frame_of_batch:\n self.batch_start_frame()\n\n if self.detected_overflow and not trace_mode:\n self.dump_saved_frames()\n\n # now we can abort, as it's pointless to continue running\n raise ValueError(\n \"DebugUnderflowOverflow: inf/nan detected, aborting as there is no point running further. \"\n \"Please scroll up above this traceback to see the activation values prior to this event.\"\n )\n\n # abort after certain batch if requested to do so\n if self.abort_after_batch_num is not None and self.batch_number > self.abort_after_batch_num:\n raise ValueError(\n f\"DebugUnderflowOverflow: aborting after {self.batch_number} batches due to `abort_after_batch_num={self.abort_after_batch_num}` arg\"\n )\n\n @staticmethod\n def get_abs_min_max(var, ctx):\n abs_var = var.abs()\n return f\"{abs_var.min():8.2e} {abs_var.max():8.2e} {ctx}\"\n\n @staticmethod\n def detect_overflow(var, ctx):\n \"\"\"\n Report whether the tensor contains any `nan` or `inf` entries.\n This is useful for detecting overflows/underflows and best to call right after the function that did some math that\n modified the tensor in question.\n This function contains a few other helper features that you can enable and tweak directly if you want to track\n various other things.\n Args:\n var: the tensor variable to check\n ctx: the message to print as a context\n Return:\n `True` if `inf` or `nan` was detected, `False` otherwise\n \"\"\"\n detected = False\n if torch.isnan(var).any().item():\n detected = True\n print(f\"{ctx} has nans\")\n if torch.isinf(var).any().item():\n detected = True\n print(f\"{ctx} has infs\")\n if var.dtype == torch.float32 and torch.ge(var.abs(), 65535).any().item():\n detected = True\n print(f\"{ctx} has overflow values {var.abs().max().item()}.\")\n # if needed to monitor large elements can enable the following\n if 0: # and detected:\n n100 = var[torch.ge(var.abs(), 100)]\n if n100.numel() > 0:\n print(f\"{ctx}: n100={n100.numel()}\")\n n1000 = var[torch.ge(var.abs(), 1000)]\n if n1000.numel() > 0:\n print(f\"{ctx}: n1000={n1000.numel()}\")\n n10000 = var[torch.ge(var.abs(), 10000)]\n if n10000.numel() > 0:\n print(f\"{ctx}: n10000={n10000.numel()}\")\n\n if 0:\n print(f\"min={var.min():9.2e} max={var.max():9.2e}\")\n\n if 0:\n print(f\"min={var.min():9.2e} max={var.max():9.2e} var={var.var():9.2e} mean={var.mean():9.2e} ({ctx})\")\n\n return detected" }, { "identifier": "build_optimizer", "path": "diffusion/utils/optimizer.py", "snippet": "def build_optimizer(model, optimizer_cfg):\n # default parameter-wise config\n logger = get_root_logger()\n\n if hasattr(model, 'module'):\n model = model.module\n # set optimizer constructor\n optimizer_cfg.setdefault('constructor', 'MyOptimizerConstructor')\n # parameter-wise setting: cancel weight decay for some specific modules\n custom_keys = dict()\n for name, module in model.named_modules():\n if hasattr(module, 'zero_weight_decay'):\n custom_keys.update({(name, key): dict(decay_mult=0) for key in module.zero_weight_decay})\n\n paramwise_cfg = Config(dict(cfg=dict(custom_keys=custom_keys)))\n given_cfg = optimizer_cfg.get('paramwise_cfg')\n if given_cfg:\n paramwise_cfg.merge_from_dict(dict(cfg=given_cfg))\n optimizer_cfg['paramwise_cfg'] = paramwise_cfg.cfg\n # build optimizer\n optimizer = mm_build_optimizer(model, optimizer_cfg)\n\n weight_decay_groups = dict()\n lr_groups = dict()\n for group in optimizer.param_groups:\n if not group.get('requires_grad', True): continue\n lr_groups.setdefault(group['lr'], []).append(group)\n weight_decay_groups.setdefault(group['weight_decay'], []).append(group)\n\n learnable_count, fix_count = 0, 0\n for p in model.parameters():\n if p.requires_grad:\n learnable_count += 1\n else:\n fix_count += 1\n fix_info = f\"{learnable_count} are learnable, {fix_count} are fix\"\n lr_info = \"Lr group: \" + \", \".join([f'{len(group)} params with lr {lr:.5f}' for lr, group in lr_groups.items()])\n wd_info = \"Weight decay group: \" + \", \".join(\n [f'{len(group)} params with weight decay {wd}' for wd, group in weight_decay_groups.items()])\n opt_info = f\"Optimizer: total {len(optimizer.param_groups)} param groups, {fix_info}. {lr_info}; {wd_info}.\"\n logger.info(opt_info)\n\n return optimizer" }, { "identifier": "auto_scale_lr", "path": "diffusion/utils/optimizer.py", "snippet": "def auto_scale_lr(effective_bs, optimizer_cfg, rule='linear', base_batch_size=256):\n assert rule in ['linear', 'sqrt']\n logger = get_root_logger()\n # scale by world size\n if rule == 'sqrt':\n scale_ratio = math.sqrt(effective_bs / base_batch_size)\n elif rule == 'linear':\n scale_ratio = effective_bs / base_batch_size\n optimizer_cfg['lr'] *= scale_ratio\n logger.info(f'Automatically adapt lr to {optimizer_cfg[\"lr\"]:.7f} (using {rule} scaling rule).')\n return scale_ratio" } ]
import argparse import datetime import os import sys import time import types import warnings import torch from pathlib import Path from accelerate import Accelerator, InitProcessGroupKwargs from accelerate.utils import DistributedType from mmcv.runner import LogBuffer from torch.utils.data import RandomSampler from diffusion import IDDPM from diffusion.data.builder import build_dataset, build_dataloader, set_data_root from diffusion.model.builder import build_model from diffusion.model.nets import PixArtMS, ControlPixArtHalf, ControlPixArtMSHalf from diffusion.utils.checkpoint import save_checkpoint, load_checkpoint from diffusion.utils.data_sampler import AspectRatioBatchSampler, BalancedAspectRatioBatchSampler from diffusion.utils.dist_utils import synchronize, get_world_size, clip_grad_norm_ from diffusion.utils.logger import get_root_logger from diffusion.utils.lr_scheduler import build_lr_scheduler from diffusion.utils.misc import set_random_seed, read_config, init_random_seed, DebugUnderflowOverflow from diffusion.utils.optimizer import build_optimizer, auto_scale_lr from accelerate import FullyShardedDataParallelPlugin from torch.distributed.fsdp.fully_sharded_data_parallel import FullStateDictConfig
16,472
config = read_config(args.config) if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None config.work_dir = args.work_dir if args.cloud: config.data_root = '/data/data' if args.data_root: config.data_root = args.data_root if args.resume_from is not None: config.load_from = None config.resume_from = dict( checkpoint=args.resume_from, load_ema=False, resume_optimizer=args.resume_optimizer, resume_lr_scheduler=args.resume_lr_scheduler) if args.debug: config.log_interval = 1 config.train_batch_size = 6 config.optimizer.update({'lr': args.lr}) os.umask(0o000) # file permission: 666; dir permission: 777 os.makedirs(config.work_dir, exist_ok=True) init_handler = InitProcessGroupKwargs() init_handler.timeout = datetime.timedelta(seconds=9600) # change timeout to avoid a strange NCCL bug # Initialize accelerator and tensorboard logging if config.use_fsdp: init_train = 'FSDP' set_fsdp_env() fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),) else: init_train = 'DDP' fsdp_plugin = None even_batches = True if config.multi_scale: even_batches=False, accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with=args.report_to, project_dir=os.path.join(config.work_dir, "logs"), fsdp_plugin=fsdp_plugin, even_batches=even_batches, kwargs_handlers=[init_handler] ) logger = get_root_logger(os.path.join(config.work_dir, 'train_log.log')) config.seed = init_random_seed(config.get('seed', None)) set_random_seed(config.seed) if accelerator.is_main_process: config.dump(os.path.join(config.work_dir, 'config.py')) logger.info(f"Config: \n{config.pretty_text}") logger.info(f"World_size: {get_world_size()}, seed: {config.seed}") logger.info(f"Initializing: {init_train} for training") image_size = config.image_size # @param [512, 1024] latent_size = int(image_size) // 8 pred_sigma = getattr(config, 'pred_sigma', True) learn_sigma = getattr(config, 'learn_sigma', True) and pred_sigma model_kwargs={"window_block_indexes": config.window_block_indexes, "window_size": config.window_size, "use_rel_pos": config.use_rel_pos, "lewei_scale": config.lewei_scale, 'config':config, 'model_max_length': config.model_max_length} # build models train_diffusion = IDDPM(str(config.train_sampling_steps)) model: PixArtMS = build_model(config.model, config.grad_checkpointing, config.get('fp32_attention', False), input_size=latent_size, learn_sigma=learn_sigma, pred_sigma=pred_sigma, **model_kwargs) if config.load_from is not None and args.resume_from is None: # load from PixArt model missing, unexpected = load_checkpoint(config.load_from, model) logger.warning(f'Missing keys: {missing}') logger.warning(f'Unexpected keys: {unexpected}') if image_size == 1024: model: ControlPixArtMSHalf = ControlPixArtMSHalf(model, copy_blocks_num=config.copy_blocks_num).train() else: model: ControlPixArtHalf = ControlPixArtHalf(model, copy_blocks_num=config.copy_blocks_num).train() logger.info(f"{model.__class__.__name__} Model Parameters: {sum(p.numel() for p in model.parameters()):,}") logger.info(f"T5 max token length: {config.model_max_length}") # if args.local_rank == 0: # for name, params in model.named_parameters(): # if params.requires_grad == False: logger.info(f"freeze param: {name}") # # for name, params in model.named_parameters(): # if params.requires_grad == True: logger.info(f"trainable param: {name}") # prepare for FSDP clip grad norm calculation if accelerator.distributed_type == DistributedType.FSDP: for m in accelerator._models: m.clip_grad_norm_ = types.MethodType(clip_grad_norm_, m) # build dataloader set_data_root(config.data_root) dataset = build_dataset(config.data, resolution=image_size, aspect_ratio_type=config.aspect_ratio_type, train_ratio=config.train_ratio) if config.multi_scale: batch_sampler = AspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, drop_last=True, ratio_nums=dataset.ratio_nums, config=config, valid_num=1) # batch_sampler = BalancedAspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, # batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, # ratio_nums=dataset.ratio_nums) train_dataloader = build_dataloader(dataset, batch_sampler=batch_sampler, num_workers=config.num_workers) else: train_dataloader = build_dataloader(dataset, num_workers=config.num_workers, batch_size=config.train_batch_size, shuffle=True) # build optimizer and lr scheduler lr_scale_ratio = 1 if config.get('auto_lr', None):
current_file_path = Path(__file__).resolve() sys.path.insert(0, str(current_file_path.parent.parent)) warnings.filterwarnings("ignore") # ignore warning def set_fsdp_env(): os.environ["ACCELERATE_USE_FSDP"] = 'true' os.environ["FSDP_AUTO_WRAP_POLICY"] = 'TRANSFORMER_BASED_WRAP' os.environ["FSDP_BACKWARD_PREFETCH"] = 'BACKWARD_PRE' os.environ["FSDP_TRANSFORMER_CLS_TO_WRAP"] = 'PixArtBlock' def train(): if config.get('debug_nan', False): DebugUnderflowOverflow(model) logger.info('NaN debugger registered. Start to detect overflow during training.') time_start, last_tic = time.time(), time.time() log_buffer = LogBuffer() start_step = start_epoch * len(train_dataloader) global_step = 0 total_steps = len(train_dataloader) * config.num_epochs load_vae_feat = getattr(train_dataloader.dataset, 'load_vae_feat', False) if not load_vae_feat: raise ValueError("Only support load vae features for now.") # Now you train the model for epoch in range(start_epoch + 1, config.num_epochs + 1): data_time_start = time.time() data_time_all = 0 for step, batch in enumerate(train_dataloader): data_time_all += time.time() - data_time_start z = batch[0] # 4 x 4 x 128 x 128 z:vae output, 3x1024x1024->vae->4x128x128 clean_images = z * config.scale_factor # vae needed scale factor y = batch[1] # 4 x 1 x 120 x 4096 # T5 extracted feature of caption, 120 token, 4096 y_mask = batch[2] # 4 x 1 x 1 x 120 # caption indicate whether valid data_info = batch[3] # Sample a random timestep for each image bs = clean_images.shape[0] timesteps = torch.randint(0, config.train_sampling_steps, (bs,), device=clean_images.device).long() grad_norm = None with accelerator.accumulate(model): # Predict the noise residual optimizer.zero_grad() loss_term = train_diffusion.training_losses(model, clean_images, timesteps, model_kwargs=dict(y=y, mask=y_mask, data_info=data_info, c=data_info['condition'] * config.scale_factor)) loss = loss_term['loss'].mean() accelerator.backward(loss) if accelerator.sync_gradients: grad_norm = accelerator.clip_grad_norm_(model.parameters(), config.gradient_clip) optimizer.step() lr_scheduler.step() lr = lr_scheduler.get_last_lr()[0] logs = {"loss": accelerator.gather(loss).mean().item()} if grad_norm is not None: logs.update(grad_norm=accelerator.gather(grad_norm).mean().item()) log_buffer.update(logs) if (step + 1) % config.log_interval == 0 or (step + 1) == 1: t = (time.time() - last_tic) / config.log_interval t_d = data_time_all / config.log_interval avg_time = (time.time() - time_start) / (global_step + 1) eta = str(datetime.timedelta(seconds=int(avg_time * (total_steps - start_step - global_step - 1)))) eta_epoch = str(datetime.timedelta(seconds=int(avg_time * (len(train_dataloader) - step - 1)))) # avg_loss = sum(loss_buffer) / len(loss_buffer) log_buffer.average() info = f"Step/Epoch [{(epoch - 1) * len(train_dataloader) + step + 1}/{epoch}][{step + 1}/{len(train_dataloader)}]:total_eta: {eta}, " \ f"epoch_eta:{eta_epoch}, time_all:{t:.3f}, time_data:{t_d:.3f}, lr:{lr:.3e}, s:({data_info['img_hw'][0][0].item()}, {data_info['img_hw'][0][1].item()}), " info += ', '.join([f"{k}:{v:.4f}" for k, v in log_buffer.output.items()]) logger.info(info) last_tic = time.time() log_buffer.clear() data_time_all = 0 logs.update(lr=lr) accelerator.log(logs, step=global_step + start_step) if (global_step + 1) % 1000 == 0 and config.s3_work_dir is not None: logger.info(f"s3_work_dir: {config.s3_work_dir}") global_step += 1 data_time_start = time.time() synchronize() if accelerator.is_main_process: if ((epoch - 1) * len(train_dataloader) + step + 1) % config.save_model_steps == 0: os.umask(0o000) # file permission: 666; dir permission: 777 save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() synchronize() # After each epoch you optionally sample some demo images with evaluate() and save the model if accelerator.is_main_process: if epoch % config.save_model_epochs == 0 or epoch == config.num_epochs: os.umask(0o000) # file permission: 666; dir permission: 777 save_checkpoint(os.path.join(config.work_dir, 'checkpoints'), epoch=epoch, step=(epoch - 1) * len(train_dataloader) + step + 1, model=accelerator.unwrap_model(model), optimizer=optimizer, lr_scheduler=lr_scheduler ) synchronize() def parse_args(): parser = argparse.ArgumentParser(description="Process some integers.") parser.add_argument("config", type=str, help="config") parser.add_argument("--cloud", action='store_true', default=False, help="cloud or local machine") parser.add_argument('--work-dir', help='the dir to save logs and models') parser.add_argument('--resume_from', help='the dir to save logs and models') parser.add_argument('--local-rank', type=int, default=-1) parser.add_argument('--local_rank', type=int, default=-1) parser.add_argument('--debug', action='store_true') parser.add_argument( "--report_to", type=str, default="tensorboard", help=( 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`' ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.' ), ) parser.add_argument( "--tracker_project_name", type=str, default="text2image-fine-tune", help=( "The `project_name` argument passed to Accelerator.init_trackers for" " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator" ), ) parser.add_argument('--lr', type=float, default=2e-4) parser.add_argument('--data_root', type=str, default=None) parser.add_argument('--resume_optimizer', action='store_true') parser.add_argument('--resume_lr_scheduler', action='store_true') args = parser.parse_args() return args if __name__ == '__main__': args = parse_args() config = read_config(args.config) if args.work_dir is not None: # update configs according to CLI args if args.work_dir is not None config.work_dir = args.work_dir if args.cloud: config.data_root = '/data/data' if args.data_root: config.data_root = args.data_root if args.resume_from is not None: config.load_from = None config.resume_from = dict( checkpoint=args.resume_from, load_ema=False, resume_optimizer=args.resume_optimizer, resume_lr_scheduler=args.resume_lr_scheduler) if args.debug: config.log_interval = 1 config.train_batch_size = 6 config.optimizer.update({'lr': args.lr}) os.umask(0o000) # file permission: 666; dir permission: 777 os.makedirs(config.work_dir, exist_ok=True) init_handler = InitProcessGroupKwargs() init_handler.timeout = datetime.timedelta(seconds=9600) # change timeout to avoid a strange NCCL bug # Initialize accelerator and tensorboard logging if config.use_fsdp: init_train = 'FSDP' set_fsdp_env() fsdp_plugin = FullyShardedDataParallelPlugin(state_dict_config=FullStateDictConfig(offload_to_cpu=False, rank0_only=False),) else: init_train = 'DDP' fsdp_plugin = None even_batches = True if config.multi_scale: even_batches=False, accelerator = Accelerator( mixed_precision=config.mixed_precision, gradient_accumulation_steps=config.gradient_accumulation_steps, log_with=args.report_to, project_dir=os.path.join(config.work_dir, "logs"), fsdp_plugin=fsdp_plugin, even_batches=even_batches, kwargs_handlers=[init_handler] ) logger = get_root_logger(os.path.join(config.work_dir, 'train_log.log')) config.seed = init_random_seed(config.get('seed', None)) set_random_seed(config.seed) if accelerator.is_main_process: config.dump(os.path.join(config.work_dir, 'config.py')) logger.info(f"Config: \n{config.pretty_text}") logger.info(f"World_size: {get_world_size()}, seed: {config.seed}") logger.info(f"Initializing: {init_train} for training") image_size = config.image_size # @param [512, 1024] latent_size = int(image_size) // 8 pred_sigma = getattr(config, 'pred_sigma', True) learn_sigma = getattr(config, 'learn_sigma', True) and pred_sigma model_kwargs={"window_block_indexes": config.window_block_indexes, "window_size": config.window_size, "use_rel_pos": config.use_rel_pos, "lewei_scale": config.lewei_scale, 'config':config, 'model_max_length': config.model_max_length} # build models train_diffusion = IDDPM(str(config.train_sampling_steps)) model: PixArtMS = build_model(config.model, config.grad_checkpointing, config.get('fp32_attention', False), input_size=latent_size, learn_sigma=learn_sigma, pred_sigma=pred_sigma, **model_kwargs) if config.load_from is not None and args.resume_from is None: # load from PixArt model missing, unexpected = load_checkpoint(config.load_from, model) logger.warning(f'Missing keys: {missing}') logger.warning(f'Unexpected keys: {unexpected}') if image_size == 1024: model: ControlPixArtMSHalf = ControlPixArtMSHalf(model, copy_blocks_num=config.copy_blocks_num).train() else: model: ControlPixArtHalf = ControlPixArtHalf(model, copy_blocks_num=config.copy_blocks_num).train() logger.info(f"{model.__class__.__name__} Model Parameters: {sum(p.numel() for p in model.parameters()):,}") logger.info(f"T5 max token length: {config.model_max_length}") # if args.local_rank == 0: # for name, params in model.named_parameters(): # if params.requires_grad == False: logger.info(f"freeze param: {name}") # # for name, params in model.named_parameters(): # if params.requires_grad == True: logger.info(f"trainable param: {name}") # prepare for FSDP clip grad norm calculation if accelerator.distributed_type == DistributedType.FSDP: for m in accelerator._models: m.clip_grad_norm_ = types.MethodType(clip_grad_norm_, m) # build dataloader set_data_root(config.data_root) dataset = build_dataset(config.data, resolution=image_size, aspect_ratio_type=config.aspect_ratio_type, train_ratio=config.train_ratio) if config.multi_scale: batch_sampler = AspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, drop_last=True, ratio_nums=dataset.ratio_nums, config=config, valid_num=1) # batch_sampler = BalancedAspectRatioBatchSampler(sampler=RandomSampler(dataset), dataset=dataset, # batch_size=config.train_batch_size, aspect_ratios=dataset.aspect_ratio, # ratio_nums=dataset.ratio_nums) train_dataloader = build_dataloader(dataset, batch_sampler=batch_sampler, num_workers=config.num_workers) else: train_dataloader = build_dataloader(dataset, num_workers=config.num_workers, batch_size=config.train_batch_size, shuffle=True) # build optimizer and lr scheduler lr_scale_ratio = 1 if config.get('auto_lr', None):
lr_scale_ratio = auto_scale_lr(config.train_batch_size * get_world_size() * config.gradient_accumulation_steps,
22
2023-10-12 14:16:33+00:00
24k
NVlabs/EmerNeRF
train_emernerf.py
[ { "identifier": "metrics", "path": "datasets/metrics.py", "snippet": "def compute_valid_depth_rmse(prediction: Tensor, target: Tensor) -> float:\ndef compute_psnr(prediction: Tensor, target: Tensor) -> float:\ndef compute_ssim(\n prediction: Union[Tensor, np.ndarray], target: Union[Tensor, np.ndarray]\n) -> float:\ndef compute_scene_flow_metrics(pred: Tensor, labels: Tensor):\ndef knn_predict(\n queries: Tensor,\n memory_bank: Tensor,\n memory_labels: Tensor,\n n_classes: int,\n knn_k: int = 1,\n knn_t: float = 0.1,\n) -> Tensor:\ndef knn_predict(\n queries: Tensor,\n memory_bank: Tensor,\n memory_labels: Tensor,\n n_classes: int,\n knn_k: int = 1,\n knn_t: float = 0.1,\n similarity: str = \"cosine\",\n) -> Tensor:\ndef collect_centroids(\n train_indices: List[int],\n dataset, # a WaymoDataset object\n model: RadianceField,\n device: torch.device,\n):\ndef eval_few_shot_occ(\n test_indices: List[int],\n dataset, # a WaymoDataset object\n model: RadianceField,\n device: torch.device,\n centroids_bank: Tensor,\n label_bank: Tensor,\n):\n EPE3D = torch.mean(l2_norm).item() # Mean absolute distance error" }, { "identifier": "SceneDataset", "path": "datasets/base/scene_dataset.py", "snippet": "class SceneDataset(abc.ABC):\n \"\"\"\n Base class for scene dataset.\n \"\"\"\n\n data_cfg: OmegaConf = None\n pixel_source: ScenePixelSource = None\n lidar_source: SceneLidarSource = None\n # training and testing indices are indices into the full dataset\n # train_indices are img indices, so the length is num_cams * num_timesteps\n train_indices: List[int] = None\n test_indices: List[int] = None\n # train_timesteps are timesteps, so the length is num_timesteps (len(unique_timesteps))\n train_timesteps: Tensor = None\n test_timesteps: Tensor = None\n\n # dataset wrappers\n # full: includes all data\n full_pixel_set: SplitWrapper = None\n full_lidar_set: SplitWrapper = None\n # train: includes only training data\n train_pixel_set: SplitWrapper = None\n train_lidar_set: SplitWrapper = None\n # test: includes only testing data\n test_pixel_set: SplitWrapper = None\n test_lidar_set: SplitWrapper = None\n\n def __init__(\n self,\n data_config: OmegaConf,\n ):\n super().__init__()\n self.data_cfg = data_config\n\n @abc.abstractmethod\n def build_data_source(self):\n \"\"\"\n Create the data source for the dataset.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def build_split_wrapper(self):\n \"\"\"\n Makes each data source as a Pytorch Dataset.\n \"\"\"\n raise NotImplementedError\n\n @abc.abstractmethod\n def split_train_test(self):\n raise NotImplementedError\n\n def get_aabb(self) -> Tensor:\n if self.lidar_source is not None:\n aabb = self.lidar_source.get_aabb()\n else:\n aabb = self.pixel_source.get_aabb()\n return aabb\n\n @property\n def num_cams(self) -> int:\n return self.pixel_source.num_cams\n\n @property\n def scene_idx(self) -> int:\n return self.data_cfg.scene_idx\n\n @property\n def num_img_timesteps(self) -> int:\n return self.pixel_source.num_timesteps\n\n @property\n def num_lidar_timesteps(self) -> int:\n if self.lidar_source is None:\n logger.warning(\"No lidar source, returning num_img_timesteps\")\n return self.num_img_timesteps\n return self.lidar_source.num_timesteps\n\n @property\n def num_train_timesteps(self) -> int:\n return len(self.train_timesteps)\n\n @property\n def num_test_timesteps(self) -> int:\n return len(self.test_timesteps)\n\n @property\n def unique_normalized_training_timestamps(self) -> Tensor:\n return self.pixel_source.unique_normalized_timestamps[self.train_timesteps]\n\n @property\n def device(self):\n return self.data_cfg.preload_device" }, { "identifier": "DensityField", "path": "radiance_fields/radiance_field.py", "snippet": "class DensityField(nn.Module):\n def __init__(\n self,\n xyz_encoder: HashEncoder,\n aabb: Union[Tensor, List[float]] = [[-1.0, -1.0, -1.0, 1.0, 1.0, 1.0]],\n num_dims: int = 3,\n density_activation: Callable = lambda x: trunc_exp(x - 1),\n unbounded: bool = False,\n base_mlp_layer_width: int = 64,\n ) -> None:\n super().__init__()\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n self.register_buffer(\"aabb\", aabb)\n self.num_dims = num_dims\n self.density_activation = density_activation\n self.unbounded = unbounded\n self.xyz_encoder = xyz_encoder\n\n # density head\n self.base_mlp = nn.Sequential(\n nn.Linear(self.xyz_encoder.n_output_dims, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, 1),\n )\n\n @property\n def device(self) -> torch.device:\n return self.aabb.device\n\n def set_aabb(self, aabb: Union[Tensor, List[float]]) -> None:\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n logger.info(f\"Set propnet aabb from {self.aabb} to {aabb}\")\n self.aabb.copy_(aabb)\n self.aabb = self.aabb.to(self.device)\n\n def forward(\n self, positions: Tensor, data_dict: Dict[str, Tensor] = None\n ) -> Dict[str, Tensor]:\n if self.unbounded:\n # use infinte norm to contract the positions for cuboid aabb\n positions = contract(positions, self.aabb, ord=float(\"inf\"))\n else:\n aabb_min, aabb_max = torch.split(self.aabb, 3, dim=-1)\n positions = (positions - aabb_min) / (aabb_max - aabb_min)\n selector = ((positions > 0.0) & (positions < 1.0)).all(dim=-1).to(positions)\n positions = positions * selector.unsqueeze(-1)\n xyz_encoding = self.xyz_encoder(positions.view(-1, self.num_dims))\n density_before_activation = self.base_mlp(xyz_encoding).view(\n list(positions.shape[:-1]) + [-1]\n )\n density = self.density_activation(density_before_activation)\n return {\"density\": density}" }, { "identifier": "RadianceField", "path": "radiance_fields/radiance_field.py", "snippet": "class RadianceField(nn.Module):\n def __init__(\n self,\n xyz_encoder: HashEncoder,\n dynamic_xyz_encoder: Optional[HashEncoder] = None,\n flow_xyz_encoder: Optional[HashEncoder] = None,\n aabb: Union[Tensor, List[float]] = [-1, -1, -1, 1, 1, 1],\n num_dims: int = 3,\n density_activation: Callable = lambda x: trunc_exp(x - 1),\n unbounded: bool = True,\n geometry_feature_dim: int = 15,\n base_mlp_layer_width: int = 64,\n head_mlp_layer_width: int = 64,\n enable_cam_embedding: bool = False,\n enable_img_embedding: bool = False,\n num_cams: int = 3,\n appearance_embedding_dim: int = 16,\n semantic_feature_dim: int = 64,\n feature_mlp_layer_width: int = 256,\n feature_embedding_dim: int = 768,\n enable_sky_head: bool = False,\n enable_shadow_head: bool = False,\n enable_feature_head: bool = False,\n num_train_timesteps: int = 0,\n interpolate_xyz_encoding: bool = False,\n enable_learnable_pe: bool = True,\n enable_temporal_interpolation: bool = False,\n ) -> None:\n super().__init__()\n # scene properties\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n self.register_buffer(\"aabb\", aabb)\n self.unbounded = unbounded\n self.num_cams = num_cams\n self.num_dims = num_dims\n self.density_activation = density_activation\n\n # appearance embedding\n self.enable_cam_embedding = enable_cam_embedding\n self.enable_img_embedding = enable_img_embedding\n self.appearance_embedding_dim = appearance_embedding_dim\n\n self.geometry_feature_dim = geometry_feature_dim\n # add semantic feature dim if feature head is enabled\n if not enable_feature_head:\n semantic_feature_dim = 0\n self.semantic_feature_dim = semantic_feature_dim\n\n # note: we use very conservative default values for mlps\n # usually you want to use larger ones\n\n # ======== Static Field ======== #\n self.xyz_encoder = xyz_encoder\n self.base_mlp = nn.Sequential(\n nn.Linear(self.xyz_encoder.n_output_dims, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(\n base_mlp_layer_width, geometry_feature_dim + semantic_feature_dim\n ),\n )\n\n # ======== Dynamic Field ======== #\n self.interpolate_xyz_encoding = interpolate_xyz_encoding\n self.dynamic_xyz_encoder = dynamic_xyz_encoder\n self.enable_temporal_interpolation = enable_temporal_interpolation\n if self.dynamic_xyz_encoder is not None:\n # for temporal interpolation\n self.register_buffer(\"training_timesteps\", torch.zeros(num_train_timesteps))\n self.dynamic_base_mlp = nn.Sequential(\n nn.Linear(self.dynamic_xyz_encoder.n_output_dims, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(\n base_mlp_layer_width,\n geometry_feature_dim + semantic_feature_dim,\n ),\n )\n\n # ======== Flow Field ======== #\n self.flow_xyz_encoder = flow_xyz_encoder\n if self.flow_xyz_encoder is not None:\n self.flow_mlp = nn.Sequential(\n nn.Linear(\n self.flow_xyz_encoder.n_output_dims,\n base_mlp_layer_width,\n ),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, 6), # 3 for forward, 3 for backward\n # no activation function for flow\n )\n\n # appearance embedding\n if self.enable_cam_embedding:\n # per-camera embedding\n self.appearance_embedding = nn.Embedding(num_cams, appearance_embedding_dim)\n elif self.enable_img_embedding:\n # per-image embedding\n self.appearance_embedding = nn.Embedding(\n num_train_timesteps * num_cams, appearance_embedding_dim\n )\n else:\n self.appearance_embedding = None\n\n # direction encoding\n self.direction_encoding = SinusoidalEncoder(\n n_input_dims=3, min_deg=0, max_deg=4\n )\n\n # ======== Color Head ======== #\n self.rgb_head = MLP(\n in_dims=geometry_feature_dim\n + self.direction_encoding.n_output_dims\n + (\n appearance_embedding_dim\n if self.enable_cam_embedding or self.enable_img_embedding\n else 0 # 2 or 0?\n ),\n out_dims=3,\n num_layers=3,\n hidden_dims=head_mlp_layer_width,\n skip_connections=[1],\n )\n\n # ======== Shadow Head ======== #\n self.enable_shadow_head = enable_shadow_head\n if self.enable_shadow_head:\n self.shadow_head = nn.Sequential(\n nn.Linear(geometry_feature_dim, base_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(base_mlp_layer_width, 1),\n nn.Sigmoid(),\n )\n\n # ======== Sky Head ======== #\n self.enable_sky_head = enable_sky_head\n if self.enable_sky_head:\n self.sky_head = MLP(\n in_dims=self.direction_encoding.n_output_dims\n + (\n appearance_embedding_dim\n if self.enable_cam_embedding or self.enable_img_embedding\n else 0\n ),\n out_dims=3,\n num_layers=3,\n hidden_dims=head_mlp_layer_width,\n skip_connections=[1],\n )\n if enable_feature_head:\n # feature sky head\n self.dino_sky_head = nn.Sequential(\n # TODO: remove appearance embedding from dino sky head\n nn.Linear(\n self.direction_encoding.n_output_dims\n + (\n appearance_embedding_dim\n if self.enable_cam_embedding or self.enable_img_embedding\n else 0\n ),\n feature_mlp_layer_width,\n ),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_embedding_dim),\n )\n\n # ======== Feature Head ======== #\n self.enable_feature_head = enable_feature_head\n if self.enable_feature_head:\n self.dino_head = nn.Sequential(\n nn.Linear(semantic_feature_dim, feature_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_mlp_layer_width),\n nn.ReLU(),\n nn.Linear(feature_mlp_layer_width, feature_embedding_dim),\n )\n # placeholders for visualization, will be registered when available\n self.register_buffer(\n \"feats_reduction_mat\", torch.zeros(feature_embedding_dim, 3)\n )\n self.register_buffer(\"feat_color_min\", torch.zeros(3, dtype=torch.float32))\n self.register_buffer(\"feat_color_max\", torch.ones(3, dtype=torch.float32))\n\n # positional embedding (PE) decomposition\n self.enable_learnable_pe = enable_learnable_pe\n if self.enable_learnable_pe:\n # globally-shared low-resolution learnable PE map\n self.learnable_pe_map = nn.Parameter(\n 0.05 * torch.randn(1, feature_embedding_dim // 2, 80, 120),\n requires_grad=True,\n )\n # a PE head to decode PE features\n self.pe_head = nn.Sequential(\n nn.Linear(feature_embedding_dim // 2, feature_embedding_dim),\n )\n\n def register_normalized_training_timesteps(\n self, normalized_timesteps: Tensor, time_diff: float = None\n ) -> None:\n \"\"\"\n register normalized timesteps for temporal interpolation\n\n Args:\n normalized_timesteps (Tensor): normalized timesteps in [0, 1]\n time_diff (float, optional): time difference between two consecutive timesteps. Defaults to None.\n \"\"\"\n if self.dynamic_xyz_encoder is not None:\n # register timesteps for temporal interpolation\n self.training_timesteps.copy_(normalized_timesteps)\n self.training_timesteps = self.training_timesteps.to(self.device)\n if time_diff is not None:\n # use the provided time difference if available\n self.time_diff = time_diff\n else:\n if len(self.training_timesteps) > 1:\n # otherwise, compute the time difference from the provided timesteps\n # it's important to make sure the provided timesteps are consecutive\n self.time_diff = (\n self.training_timesteps[1] - self.training_timesteps[0]\n )\n else:\n self.time_diff = 0\n\n def set_aabb(self, aabb: Union[Tensor, List[float]]) -> None:\n \"\"\"\n register aabb for scene space\n \"\"\"\n if not isinstance(aabb, Tensor):\n aabb = torch.tensor(aabb, dtype=torch.float32)\n logger.info(f\"Set aabb from {self.aabb} to {aabb}\")\n self.aabb.copy_(aabb)\n self.aabb = self.aabb.to(self.device)\n\n def register_feats_reduction_mat(\n self,\n feats_reduction_mat: Tensor,\n feat_color_min: Tensor,\n feat_color_max: Tensor,\n ) -> None:\n \"\"\"\n A placeholder for registering the PCA reduction matrix and min/max values for visualization.\n You may not want to compute PCA reduction matrix every time from the dataset.\n \"\"\"\n # for visualization\n self.feats_reduction_mat.copy_(feats_reduction_mat)\n self.feat_color_min.copy_(feat_color_min)\n self.feat_color_max.copy_(feat_color_max)\n self.feats_reduction_mat = self.feats_reduction_mat.to(self.device)\n self.feat_color_min = self.feat_color_min.to(self.device)\n self.feat_color_max = self.feat_color_max.to(self.device)\n\n @property\n def device(self) -> torch.device:\n return self.aabb.device\n\n def contract_points(\n self,\n positions: Tensor,\n ) -> Tensor:\n \"\"\"\n contract [-inf, inf] points to the range [0, 1] for hash encoding\n\n Returns:\n normed_positions: [..., 3] in [0, 1]\n \"\"\"\n if self.unbounded:\n # use infinte norm to contract the positions for cuboid aabb\n normed_positions = contract(positions, self.aabb, ord=float(\"inf\"))\n else:\n aabb_min, aabb_max = torch.split(self.aabb, 3, dim=-1)\n normed_positions = (positions - aabb_min) / (aabb_max - aabb_min)\n selector = (\n ((normed_positions > 0.0) & (normed_positions < 1.0))\n .all(dim=-1)\n .to(positions)\n )\n normed_positions = normed_positions * selector.unsqueeze(-1)\n return normed_positions\n\n def forward_static_hash(\n self,\n positions: Tensor,\n ) -> Tensor:\n \"\"\"\n forward pass for static hash encoding\n\n Returns:\n encoded_features: [..., geometry_feature_dim + (semantic_feature_dim)]\n normed_positions: [..., 3] in [0, 1]\n \"\"\"\n normed_positions = self.contract_points(positions)\n xyz_encoding = self.xyz_encoder(normed_positions.view(-1, self.num_dims))\n encoded_features = self.base_mlp(xyz_encoding).view(\n list(normed_positions.shape[:-1]) + [-1]\n )\n return encoded_features, normed_positions\n\n def forward_dynamic_hash(\n self,\n normed_positions: Tensor,\n normed_timestamps: Tensor,\n return_hash_encodings: bool = False,\n ) -> Union[Tuple[Tensor, Tensor], Tensor]:\n \"\"\"\n forward pass for dynamic hash encoding\n\n Returns:\n encoded_dynamic_feats: [..., geometry_feature_dim + (semantic_feature_dim)]\n dynamic_xyz_encoding: [..., n_output_dims] (optional)\n \"\"\"\n if normed_timestamps.shape[-1] != 1:\n normed_timestamps = normed_timestamps.unsqueeze(-1)\n # To be fixed.\n # if self.training or not self.enable_temporal_interpolation:\n if True:\n temporal_positions = torch.cat(\n [normed_positions, normed_timestamps], dim=-1\n )\n dynamic_xyz_encoding = self.dynamic_xyz_encoder(\n temporal_positions.view(-1, self.num_dims + 1)\n ).view(list(temporal_positions.shape[:-1]) + [-1])\n encoded_dynamic_feats = self.dynamic_base_mlp(dynamic_xyz_encoding)\n else:\n encoded_dynamic_feats = temporal_interpolation(\n normed_timestamps,\n self.training_timesteps,\n normed_positions,\n self.dynamic_xyz_encoder,\n self.dynamic_base_mlp,\n interpolate_xyz_encoding=self.interpolate_xyz_encoding,\n )\n if return_hash_encodings:\n return encoded_dynamic_feats, dynamic_xyz_encoding\n else:\n return encoded_dynamic_feats\n\n def forward_flow_hash(\n self,\n normed_positions: Tensor,\n normed_timestamps: Tensor,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"\n forward pass for flow hash encoding\n\n Returns:\n flow: [..., 6] (forward_flow, backward_flow)\n \"\"\"\n if normed_timestamps.shape[-1] != 1:\n normed_timestamps = normed_timestamps.unsqueeze(-1)\n if self.training or not self.enable_temporal_interpolation:\n temporal_positions = torch.cat(\n [normed_positions, normed_timestamps], dim=-1\n )\n flow_xyz_encoding = self.flow_xyz_encoder(\n temporal_positions.view(-1, self.num_dims + 1)\n ).view(list(temporal_positions.shape[:-1]) + [-1])\n flow = self.flow_mlp(flow_xyz_encoding)\n else:\n flow = temporal_interpolation(\n normed_timestamps,\n self.training_timesteps,\n normed_positions,\n self.flow_xyz_encoder,\n self.flow_mlp,\n interpolate_xyz_encoding=True,\n )\n return flow\n\n def forward(\n self,\n positions: Tensor,\n directions: Tensor = None,\n data_dict: Dict[str, Tensor] = {},\n return_density_only: bool = False,\n combine_static_dynamic: bool = False,\n query_feature_head: bool = True,\n query_pe_head: bool = True,\n ) -> Dict[str, Tensor]:\n \"\"\"\n Args:\n positions: [..., 3]\n directions: [..., 3]\n data_dict: a dictionary containing additional data\n return_density_only: if True, only return density without querying other heads\n combine_static_dynamic: if True, combine static and dynamic predictions based on static and dynamic density\n in addition to returning separate results for static and dynamic fields\n query_feature_head: if True, query feature head\n query_pe_head: if True, query PE head. Disable this if we want to directly query 3D features.\n Returns:\n results_dict: a dictionary containing everything\n \"\"\"\n results_dict = {}\n # forward static branch\n encoded_features, normed_positions = self.forward_static_hash(positions)\n geo_feats, semantic_feats = torch.split(\n encoded_features,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n static_density = self.density_activation(geo_feats[..., 0])\n\n has_timestamps = (\n \"normed_timestamps\" in data_dict or \"lidar_normed_timestamps\" in data_dict\n )\n if self.dynamic_xyz_encoder is not None and has_timestamps:\n # forward dynamic branch\n if \"normed_timestamps\" in data_dict:\n normed_timestamps = data_dict[\"normed_timestamps\"]\n elif \"lidar_normed_timestamps\" in data_dict:\n # we use `lidar_` prefix as an identifier to skip querying other heads\n normed_timestamps = data_dict[\"lidar_normed_timestamps\"]\n dynamic_feats, dynamic_hash_encodings = self.forward_dynamic_hash(\n normed_positions, normed_timestamps, return_hash_encodings=True\n )\n if self.flow_xyz_encoder is not None:\n flow = self.forward_flow_hash(normed_positions, normed_timestamps)\n forward_flow, backward_flow = flow[..., :3], flow[..., 3:]\n results_dict[\"forward_flow\"] = forward_flow\n results_dict[\"backward_flow\"] = backward_flow\n temporal_aggregation_results = self.temporal_aggregation(\n positions,\n normed_timestamps,\n forward_flow,\n backward_flow,\n dynamic_feats,\n )\n # overwrite dynamic feats using temporal aggregation results\n dynamic_feats = temporal_aggregation_results[\"dynamic_feats\"]\n # to be studied\n temporal_aggregation_results[\n \"current_dynamic_hash_encodings\"\n ] = dynamic_hash_encodings\n results_dict.update(temporal_aggregation_results)\n (dynamic_geo_feats, dynamic_semantic_feats,) = torch.split(\n dynamic_feats,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n dynamic_density = self.density_activation(dynamic_geo_feats[..., 0])\n # blend static and dynamic density to get the final density\n density = static_density + dynamic_density\n results_dict.update(\n {\n \"density\": density,\n \"static_density\": static_density,\n \"dynamic_density\": dynamic_density,\n }\n )\n if return_density_only:\n # skip querying other heads\n return results_dict\n\n if directions is not None:\n rgb_results = self.query_rgb(\n directions, geo_feats, dynamic_geo_feats, data_dict=data_dict\n )\n results_dict[\"dynamic_rgb\"] = rgb_results[\"dynamic_rgb\"]\n results_dict[\"static_rgb\"] = rgb_results[\"rgb\"]\n if combine_static_dynamic:\n static_ratio = static_density / (density + 1e-6)\n dynamic_ratio = dynamic_density / (density + 1e-6)\n results_dict[\"rgb\"] = (\n static_ratio[..., None] * results_dict[\"static_rgb\"]\n + dynamic_ratio[..., None] * results_dict[\"dynamic_rgb\"]\n )\n if self.enable_shadow_head:\n shadow_ratio = self.shadow_head(dynamic_geo_feats)\n results_dict[\"shadow_ratio\"] = shadow_ratio\n if combine_static_dynamic and \"rgb\" in results_dict:\n results_dict[\"rgb\"] = (\n static_ratio[..., None]\n * results_dict[\"rgb\"]\n * (1 - shadow_ratio)\n + dynamic_ratio[..., None] * results_dict[\"dynamic_rgb\"]\n )\n else:\n # if no dynamic branch, use static density\n results_dict[\"density\"] = static_density\n if return_density_only:\n # skip querying other heads\n return results_dict\n if directions is not None:\n rgb_results = self.query_rgb(directions, geo_feats, data_dict=data_dict)\n results_dict[\"rgb\"] = rgb_results[\"rgb\"]\n\n if self.enable_feature_head and query_feature_head:\n if self.enable_learnable_pe and query_pe_head:\n learnable_pe_map = (\n F.grid_sample(\n self.learnable_pe_map,\n # assume pixel coords have been normalize to [-1, 1]\n data_dict[\"pixel_coords\"].reshape(1, 1, -1, 2) * 2 - 1,\n align_corners=False, # didn't test with True\n mode=\"bilinear\", # didn't test with other modes\n )\n .squeeze(2)\n .squeeze(0)\n .permute(1, 0)\n )\n dino_pe = self.pe_head(learnable_pe_map)\n results_dict[\"dino_pe\"] = dino_pe\n dino_feats = self.dino_head(semantic_feats)\n\n if self.dynamic_xyz_encoder is not None and has_timestamps:\n dynamic_dino_feats = self.dino_head(dynamic_semantic_feats)\n results_dict[\"static_dino_feat\"] = dino_feats\n results_dict[\"dynamic_dino_feat\"] = dynamic_dino_feats\n if combine_static_dynamic:\n static_ratio = static_density / (density + 1e-6)\n dynamic_ratio = dynamic_density / (density + 1e-6)\n results_dict[\"dino_feat\"] = (\n static_ratio[..., None] * dino_feats\n + dynamic_ratio[..., None] * dynamic_dino_feats\n )\n else:\n results_dict[\"dino_feat\"] = dino_feats\n\n # query sky if not in lidar mode\n if (\n self.enable_sky_head\n and \"lidar_origin\" not in data_dict\n and directions is not None\n ):\n directions = directions[:, 0]\n reduced_data_dict = {k: v[:, 0] for k, v in data_dict.items()}\n sky_results = self.query_sky(directions, data_dict=reduced_data_dict)\n results_dict.update(sky_results)\n\n return results_dict\n\n def temporal_aggregation(\n self,\n positions: Tensor, # current world coordinates\n normed_timestamps: Tensor, # current normalized timestamps\n forward_flow: Tensor,\n backward_flow: Tensor,\n dynamic_feats: Tensor,\n ) -> Tensor:\n \"\"\"\n temporal aggregation for dynamic features\n Eq. (8) in the emernerf paper\n \"\"\"\n if normed_timestamps.shape[-1] != 1:\n normed_timestamps = normed_timestamps.unsqueeze(-1)\n if self.training:\n noise = torch.rand_like(forward_flow)[..., 0:1]\n else:\n noise = torch.ones_like(forward_flow)[..., 0:1]\n # forward and backward warped positions\n forward_warped_positions = self.contract_points(\n positions + forward_flow * noise\n )\n backward_warped_positions = self.contract_points(\n positions + backward_flow * noise\n )\n # forward and backward warped timestamps\n forward_warped_time = torch.clamp(\n normed_timestamps + self.time_diff * noise, 0, 1.0\n )\n backward_warped_time = torch.clamp(\n normed_timestamps - self.time_diff * noise, 0, 1.0\n )\n (\n forward_dynamic_feats,\n forward_dynamic_hash_encodings,\n ) = self.forward_dynamic_hash(\n forward_warped_positions,\n forward_warped_time,\n return_hash_encodings=True,\n )\n (\n backward_dynamic_feats,\n backward_dynamic_hash_encodings,\n ) = self.forward_dynamic_hash(\n backward_warped_positions,\n backward_warped_time,\n return_hash_encodings=True,\n )\n forward_pred_flow = self.forward_flow_hash(\n forward_warped_positions,\n forward_warped_time,\n )\n backward_pred_flow = self.forward_flow_hash(\n backward_warped_positions,\n backward_warped_time,\n )\n # simple weighted sum\n aggregated_dynamic_feats = (\n dynamic_feats + 0.5 * forward_dynamic_feats + 0.5 * backward_dynamic_feats\n ) / 2.0\n return {\n \"dynamic_feats\": aggregated_dynamic_feats,\n \"forward_pred_backward_flow\": forward_pred_flow[..., 3:],\n \"backward_pred_forward_flow\": backward_pred_flow[..., :3],\n # to be studied\n \"forward_dynamic_hash_encodings\": forward_dynamic_hash_encodings,\n \"backward_dynamic_hash_encodings\": backward_dynamic_hash_encodings,\n }\n\n def query_rgb(\n self,\n directions: Tensor,\n geo_feats: Tensor,\n dynamic_geo_feats: Tensor = None,\n data_dict: Dict[str, Tensor] = None,\n ) -> Tensor:\n directions = (directions + 1.0) / 2.0 # do we need this?\n h = self.direction_encoding(directions.reshape(-1, directions.shape[-1])).view(\n *directions.shape[:-1], -1\n )\n if self.enable_cam_embedding or self.enable_img_embedding:\n if \"cam_idx\" in data_dict and self.enable_cam_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"cam_idx\"])\n elif \"img_idx\" in data_dict and self.enable_img_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"img_idx\"])\n else:\n # use mean appearance embedding\n # print(\"using mean appearance embedding\")\n appearance_embedding = torch.ones(\n (*directions.shape[:-1], self.appearance_embedding_dim),\n device=directions.device,\n ) * self.appearance_embedding.weight.mean(dim=0)\n h = torch.cat([h, appearance_embedding], dim=-1)\n\n rgb = self.rgb_head(torch.cat([h, geo_feats], dim=-1))\n rgb = F.sigmoid(rgb)\n results = {\"rgb\": rgb}\n\n if self.dynamic_xyz_encoder is not None:\n assert (\n dynamic_geo_feats is not None\n ), \"Dynamic geometry features are not provided.\"\n dynamic_rgb = self.rgb_head(torch.cat([h, dynamic_geo_feats], dim=-1))\n dynamic_rgb = F.sigmoid(dynamic_rgb)\n results[\"dynamic_rgb\"] = dynamic_rgb\n return results\n\n def query_sky(\n self, directions: Tensor, data_dict: Dict[str, Tensor] = None\n ) -> Dict[str, Tensor]:\n if len(directions.shape) == 2:\n dd = self.direction_encoding(directions).to(directions)\n else:\n dd = self.direction_encoding(directions[:, 0]).to(directions)\n if self.enable_cam_embedding or self.enable_img_embedding:\n # optionally add appearance embedding\n if \"cam_idx\" in data_dict and self.enable_cam_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"cam_idx\"])\n elif \"img_idx\" in data_dict and self.enable_img_embedding:\n appearance_embedding = self.appearance_embedding(data_dict[\"img_idx\"])\n else:\n # use mean appearance embedding\n appearance_embedding = torch.ones(\n (*directions.shape[:-1], self.appearance_embedding_dim),\n device=directions.device,\n ) * self.appearance_embedding.weight.mean(dim=0)\n dd = torch.cat([dd, appearance_embedding], dim=-1)\n rgb_sky = self.sky_head(dd).to(directions)\n rgb_sky = F.sigmoid(rgb_sky)\n results = {\"rgb_sky\": rgb_sky}\n if self.enable_feature_head:\n self.dino_sky_head(dd).to(directions)\n results[\"dino_sky_feat\"] = self.dino_sky_head(dd).to(directions)\n return results\n\n def query_flow(\n self, positions: Tensor, normed_timestamps: Tensor, query_density: bool = True\n ) -> Dict[str, Tensor]:\n \"\"\"\n query flow field\n \"\"\"\n normed_positions = self.contract_points(positions)\n flow = self.forward_flow_hash(normed_positions, normed_timestamps)\n results = {\n \"forward_flow\": flow[..., :3],\n \"backward_flow\": flow[..., 3:],\n }\n if query_density:\n # it's important to filter valid flows based on a dynamic density threshold.\n # flows are valid only if they are on dynamic points.\n dynamic_feats = self.forward_dynamic_hash(\n normed_positions, normed_timestamps\n )\n (dynamic_geo_feats, _,) = torch.split(\n dynamic_feats,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n dynamic_density = self.density_activation(dynamic_geo_feats[..., 0])\n results[\"dynamic_density\"] = dynamic_density\n return results\n\n def query_attributes(\n self,\n positions: Tensor,\n normed_timestamps: Tensor = None,\n query_feature_head: bool = True,\n ):\n \"\"\"\n query attributes (density, dino features, etc.)\n \"\"\"\n results_dict = {}\n encoded_features, normed_positions = self.forward_static_hash(positions)\n geo_feats, semantic_feats = torch.split(\n encoded_features,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n static_density = self.density_activation(geo_feats[..., 0])\n if self.dynamic_xyz_encoder is not None and normed_timestamps is not None:\n dynamic_feats, dynamic_hash_encodings = self.forward_dynamic_hash(\n normed_positions, normed_timestamps, return_hash_encodings=True\n )\n if self.flow_xyz_encoder is not None:\n flow = self.forward_flow_hash(normed_positions, normed_timestamps)\n forward_flow = flow[..., :3]\n backward_flow = flow[..., 3:]\n results_dict[\"forward_flow\"] = forward_flow\n results_dict[\"backward_flow\"] = backward_flow\n temporal_aggregation_results = self.temporal_aggregation(\n positions,\n normed_timestamps,\n forward_flow,\n backward_flow,\n dynamic_feats,\n )\n dynamic_feats = temporal_aggregation_results[\"dynamic_feats\"]\n temporal_aggregation_results[\n \"current_dynamic_hash_encodings\"\n ] = dynamic_hash_encodings\n results_dict.update(temporal_aggregation_results)\n\n (dynamic_geo_feats, dynamic_semantic_feats,) = torch.split(\n dynamic_feats,\n [self.geometry_feature_dim, self.semantic_feature_dim],\n dim=-1,\n )\n dynamic_density = self.density_activation(dynamic_geo_feats[..., 0])\n density = static_density + dynamic_density\n results_dict.update(\n {\n \"density\": density,\n \"static_density\": static_density,\n \"dynamic_density\": dynamic_density,\n # \"occupancy\": occupancy,\n }\n )\n else:\n results_dict[\"density\"] = static_density\n if self.enable_feature_head and query_feature_head:\n # query on demand\n dino_feats = self.dino_head(semantic_feats)\n if self.dynamic_xyz_encoder is not None and normed_timestamps is not None:\n dynamic_dino_feats = self.dino_head(dynamic_semantic_feats)\n results_dict[\"static_dino_feat\"] = dino_feats\n results_dict[\"dynamic_dino_feat\"] = dynamic_dino_feats\n results_dict[\"dino_feat\"] = (\n static_density.unsqueeze(-1) * dino_feats\n + dynamic_density.unsqueeze(-1) * dynamic_dino_feats\n ) / (density.unsqueeze(-1) + 1e-6)\n else:\n results_dict[\"dino_feat\"] = dino_feats\n return results_dict" }, { "identifier": "render_rays", "path": "radiance_fields/render_utils.py", "snippet": "def render_rays(\n # scene\n radiance_field: RadianceField = None,\n proposal_estimator: PropNetEstimator = None,\n proposal_networks: Optional[List[DensityField]] = None,\n data_dict: Dict[str, Tensor] = None,\n cfg: OmegaConf = None,\n proposal_requires_grad: bool = False,\n return_decomposition: bool = False,\n prefix=\"\",\n) -> Dict[str, Tensor]:\n \"\"\"Render some attributes of the scene along the rays.\"\"\"\n # reshape data_dict to be (num_rays, ...)\n rays_shape = data_dict[prefix + \"origins\"].shape\n if len(rays_shape) == 3:\n height, width, _ = rays_shape\n num_rays = height * width\n reshaped_data_dict = {}\n for k, v in data_dict.items():\n reshaped_data_dict[k] = v.reshape(num_rays, -1).squeeze()\n else:\n num_rays, _ = rays_shape\n reshaped_data_dict = data_dict.copy()\n\n def prop_sigma_fn(t_starts, t_ends, proposal_network):\n # query propsal networks for density\n t_origins = chunk_data_dict[prefix + \"origins\"][..., None, :]\n t_dirs = chunk_data_dict[prefix + \"viewdirs\"][..., None, :]\n positions = t_origins + t_dirs * (t_starts + t_ends)[..., None] / 2.0\n sub_dict = {\n k: v[..., None].repeat_interleave(t_starts.shape[-1], dim=-1)\n for k, v in chunk_data_dict.items()\n if \"time\" in k\n }\n return proposal_network(positions, sub_dict)\n\n def query_fn(t_starts, t_ends):\n # query the final nerf model for density and other information along the rays\n t_origins = chunk_data_dict[prefix + \"origins\"][..., None, :]\n t_dirs = chunk_data_dict[prefix + \"viewdirs\"][..., None, :].repeat_interleave(\n t_starts.shape[-1], dim=-2\n )\n sub_dict = {\n k: v[..., None].repeat_interleave(t_starts.shape[-1], dim=-1)\n for k, v in chunk_data_dict.items()\n if k not in [prefix + \"viewdirs\", prefix + \"origins\", \"pixel_coords\"]\n }\n sub_dict[\"t_starts\"], sub_dict[\"t_ends\"] = t_starts, t_ends\n if \"pixel_coords\" in chunk_data_dict:\n # use this for positional embedding decomposition\n sub_dict[\"pixel_coords\"] = chunk_data_dict[\"pixel_coords\"]\n positions = t_origins + t_dirs * (t_starts + t_ends)[..., None] / 2.0\n # return density only when rendering lidar, i.e., no rgb or sky or features are rendered\n results_dict: Dict[str, Tensor] = radiance_field(\n positions, t_dirs, sub_dict, return_density_only=(prefix == \"lidar_\")\n )\n results_dict[\"density\"] = results_dict[\"density\"].squeeze(-1)\n return results_dict\n\n results = []\n chunk = 2**24 if radiance_field.training else cfg.render.render_chunk_size\n for i in range(0, num_rays, chunk):\n chunk_data_dict = {k: v[i : i + chunk] for k, v in reshaped_data_dict.items()}\n assert proposal_networks is not None, \"proposal_networks is required.\"\n # obtain proposed intervals\n t_starts, t_ends = proposal_estimator.sampling(\n prop_sigma_fns=[\n lambda *args: prop_sigma_fn(*args, p) for p in proposal_networks\n ],\n num_samples=cfg.nerf.sampling.num_samples,\n prop_samples=cfg.nerf.propnet.num_samples_per_prop,\n n_rays=chunk_data_dict[prefix + \"origins\"].shape[0],\n near_plane=cfg.nerf.propnet.near_plane,\n far_plane=cfg.nerf.propnet.far_plane,\n sampling_type=cfg.nerf.propnet.sampling_type,\n stratified=radiance_field.training,\n requires_grad=proposal_requires_grad,\n )\n # render the scene\n chunk_results_dict = rendering(\n t_starts,\n t_ends,\n query_fn=query_fn,\n return_decomposition=return_decomposition,\n )\n extras = chunk_results_dict.pop(\"extras\")\n results.append(chunk_results_dict)\n render_results = collate(\n results,\n collate_fn_map={\n **default_collate_fn_map,\n Tensor: lambda x, **_: torch.cat(x, 0),\n },\n )\n extras[\"density\"] = render_results.pop(\"density\")\n for k, v in render_results.items():\n # recover the original shape\n render_results[k] = v.reshape(list(rays_shape[:-1]) + list(v.shape[1:]))\n render_results[\"extras\"] = extras\n return render_results" }, { "identifier": "render_pixels", "path": "radiance_fields/video_utils.py", "snippet": "def render_pixels(\n cfg: OmegaConf,\n model: RadianceField,\n proposal_estimator: PropNetEstimator,\n dataset: SplitWrapper,\n proposal_networks: Optional[List[DensityField]] = None,\n compute_metrics: bool = False,\n vis_indices: Optional[List[int]] = None,\n return_decomposition: bool = True,\n):\n \"\"\"\n Render pixel-related outputs from a model.\n\n Args:\n ....skip obvious args\n compute_metrics (bool, optional): Whether to compute metrics. Defaults to False.\n vis_indices (Optional[List[int]], optional): Indices to visualize. Defaults to None.\n return_decomposition (bool, optional): Whether to visualize the static-dynamic decomposition. Defaults to True.\n \"\"\"\n model.eval()\n if proposal_networks is not None:\n for p in proposal_networks:\n p.eval()\n if proposal_estimator is not None:\n proposal_estimator.eval()\n # set up render function\n render_func = lambda data_dict: render_rays(\n radiance_field=model,\n proposal_estimator=proposal_estimator,\n proposal_networks=proposal_networks,\n data_dict=data_dict,\n cfg=cfg,\n return_decomposition=return_decomposition, # return static-dynamic decomposition\n )\n render_results = render(\n dataset,\n render_func,\n model=model,\n compute_metrics=compute_metrics,\n vis_indices=vis_indices,\n )\n if compute_metrics:\n num_samples = len(dataset) if vis_indices is None else len(vis_indices)\n logger.info(f\"Eval over {num_samples} images:\")\n logger.info(f\"\\tPSNR: {render_results['psnr']:.4f}\")\n logger.info(f\"\\tSSIM: {render_results['ssim']:.4f}\")\n logger.info(f\"\\tFeature PSNR: {render_results['feat_psnr']:.4f}\")\n logger.info(f\"\\tMasked PSNR: {render_results['masked_psnr']:.4f}\")\n logger.info(f\"\\tMasked SSIM: {render_results['masked_ssim']:.4f}\")\n logger.info(f\"\\tMasked Feature PSNR: {render_results['masked_feat_psnr']:.4f}\")\n\n return render_results" }, { "identifier": "save_videos", "path": "radiance_fields/video_utils.py", "snippet": "def save_videos(\n render_results: Dict[str, List[Tensor]],\n save_pth: str,\n num_timestamps: int,\n keys: List[str] = [\"gt_rgbs\", \"rgbs\", \"depths\"],\n num_cams: int = 3,\n save_seperate_video: bool = False,\n save_images: bool = False,\n fps: int = 10,\n verbose: bool = True,\n):\n if save_seperate_video:\n return_frame = save_seperate_videos(\n render_results,\n save_pth,\n num_timestamps=num_timestamps,\n keys=keys,\n num_cams=num_cams,\n save_images=save_images,\n fps=fps,\n verbose=verbose,\n )\n else:\n return_frame = save_concatenated_videos(\n render_results,\n save_pth,\n num_timestamps=num_timestamps,\n keys=keys,\n num_cams=num_cams,\n save_images=save_images,\n fps=fps,\n verbose=verbose,\n )\n return return_frame" }, { "identifier": "PropNetEstimator", "path": "third_party/nerfacc_prop_net.py", "snippet": "class PropNetEstimator(AbstractEstimator):\n \"\"\"Proposal network transmittance estimator.\n\n References: \"Mip-NeRF 360: Unbounded Anti-Aliased Neural Radiance Fields.\"\n\n Args:\n optimizer: The optimizer to use for the proposal networks.\n scheduler: The learning rate scheduler to use for the proposal networks.\n \"\"\"\n\n def __init__(\n self,\n optimizer: Optional[torch.optim.Optimizer] = None,\n scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n enable_anti_aliasing_loss: Optional[bool] = True,\n anti_aliasing_pulse_width: Optional[List[float]] = [0.03, 0.003],\n ) -> None:\n super().__init__()\n self.optimizer = optimizer\n self.scheduler = scheduler\n self.prop_cache: List = []\n self.enable_anti_aliasing_loss = enable_anti_aliasing_loss\n self.pulse_width = anti_aliasing_pulse_width\n if self.enable_anti_aliasing_loss:\n logger.info(\"Enable anti-aliasing loss, pulse width: %s\", self.pulse_width)\n\n @torch.no_grad()\n def sampling(\n self,\n prop_sigma_fns: List[Callable],\n prop_samples: List[int],\n num_samples: int,\n # rendering options\n n_rays: int,\n near_plane: float,\n far_plane: float,\n sampling_type: Literal[\n \"uniform\", \"lindisp\", \"sqrt\", \"log\", \"uniform_lindisp\"\n ] = \"uniform_lindisp\",\n # training options\n stratified: bool = False,\n requires_grad: bool = False,\n ) -> Tuple[Tensor, Tensor]:\n \"\"\"Sampling with CDFs from proposal networks.\n\n Note:\n When `requires_grad` is `True`, the gradients are allowed to flow\n through the proposal networks, and the outputs of the proposal\n networks are cached to update them later when calling `update_every_n_steps()`\n\n Args:\n prop_sigma_fns: Proposal network evaluate functions. It should be a list\n of functions that take in samples {t_starts (n_rays, n_samples),\n t_ends (n_rays, n_samples)} and returns the post-activation densities\n (n_rays, n_samples).\n prop_samples: Number of samples to draw from each proposal network. Should\n be the same length as `prop_sigma_fns`.\n num_samples: Number of samples to draw in the end.\n n_rays: Number of rays.\n near_plane: Near plane.\n far_plane: Far plane.\n sampling_type: Sampling type. Either \"uniform\" or \"lindisp\". Default to\n \"lindisp\".\n stratified: Whether to use stratified sampling. Default to `False`.\n requires_grad: Whether to allow gradients to flow through the proposal\n networks. Default to `False`.\n\n Returns:\n A tuple of {Tensor, Tensor}:\n\n - **t_starts**: The starts of the samples. Shape (n_rays, num_samples).\n - **t_ends**: The ends of the samples. Shape (n_rays, num_samples).\n\n \"\"\"\n assert len(prop_sigma_fns) == len(prop_samples), (\n \"The number of proposal networks and the number of samples \"\n \"should be the same.\"\n )\n cdfs = torch.cat(\n [\n torch.zeros((n_rays, 1), device=self.device),\n torch.ones((n_rays, 1), device=self.device),\n ],\n dim=-1,\n )\n intervals = RayIntervals(vals=cdfs)\n\n for i, (level_fn, level_samples) in enumerate(\n zip(prop_sigma_fns, prop_samples)\n ):\n intervals, _ = importance_sampling(\n intervals, cdfs, level_samples, stratified\n )\n t_vals = _transform_stot(\n sampling_type, intervals.vals, near_plane, far_plane\n )\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n\n with torch.set_grad_enabled(requires_grad):\n sigmas = level_fn(t_starts, t_ends)[\"density\"].squeeze(-1)\n assert sigmas.shape == t_starts.shape\n trans, _ = render_transmittance_from_density(t_starts, t_ends, sigmas)\n cdfs = 1.0 - torch.cat(\n [trans, torch.zeros_like(trans[..., :1])], dim=-1\n )\n if requires_grad:\n self.prop_cache.append((intervals, cdfs, i))\n\n intervals, _ = importance_sampling(intervals, cdfs, num_samples, stratified)\n t_vals = _transform_stot(sampling_type, intervals.vals, near_plane, far_plane)\n t_starts = t_vals[..., :-1]\n t_ends = t_vals[..., 1:]\n if requires_grad:\n self.prop_cache.append((intervals, None, None))\n\n return t_starts, t_ends\n\n @torch.enable_grad()\n def compute_loss(self, trans: Tensor, loss_scaler: float = 1.0) -> Tensor:\n \"\"\"Compute the loss for the proposal networks.\n\n Args:\n trans: The transmittance of all samples. Shape (n_rays, num_samples).\n loss_scaler: The loss scaler. Default to 1.0.\n\n Returns:\n The loss for the proposal networks.\n \"\"\"\n if len(self.prop_cache) == 0:\n return torch.zeros((), device=self.device)\n\n intervals, _, _ = self.prop_cache.pop()\n # get cdfs at all edges of intervals\n cdfs = 1.0 - torch.cat([trans, torch.zeros_like(trans[..., :1])], dim=-1)\n cdfs = cdfs.detach()\n loss = 0.0\n\n if self.enable_anti_aliasing_loss:\n w_normalize = (cdfs[..., 1:] - cdfs[..., :-1]) / (\n intervals.vals[..., 1:] - intervals.vals[..., :-1]\n )\n c1, w1 = blur_stepfun(intervals.vals, w_normalize, self.pulse_width[0])\n c2, w2 = blur_stepfun(intervals.vals, w_normalize, self.pulse_width[1])\n area1 = 0.5 * (w1[..., 1:] + w1[..., :-1]) * (c1[..., 1:] - c1[..., :-1])\n area2 = 0.5 * (w2[..., 1:] + w2[..., :-1]) * (c2[..., 1:] - c2[..., :-1])\n cdfs1 = torch.cat(\n [\n torch.zeros_like(area1[..., :1]),\n torch.cumsum(area1, dim=-1),\n ],\n dim=-1,\n )\n cdfs2 = torch.cat(\n [\n torch.zeros_like(area2[..., :1]),\n torch.cumsum(area2, dim=-1),\n ],\n dim=-1,\n )\n cs = [c1, c2]\n ws = [w1, w2]\n _cdfs = [cdfs1, cdfs2]\n while self.prop_cache:\n prop_intervals, prop_cdfs, prop_id = self.prop_cache.pop()\n wp = prop_cdfs[..., 1:] - prop_cdfs[..., :-1]\n cdf_interp = sorted_interp_quad(\n prop_intervals.vals, cs[prop_id], ws[prop_id], _cdfs[prop_id]\n )\n w_s = torch.diff(cdf_interp, dim=-1)\n loss += ((w_s - wp).clamp_min(0) ** 2 / (wp + 1e-5)).mean()\n else:\n while self.prop_cache:\n prop_intervals, prop_cdfs, _ = self.prop_cache.pop()\n loss += _pdf_loss(intervals, cdfs, prop_intervals, prop_cdfs).mean()\n return loss * loss_scaler\n\n @torch.enable_grad()\n def update_every_n_steps(\n self,\n trans: Tensor,\n requires_grad: bool = False,\n loss_scaler: float = 1.0,\n ) -> float:\n \"\"\"Update the estimator every n steps during training.\n\n Args:\n trans: The transmittance of all samples. Shape (n_rays, num_samples).\n requires_grad: Whether to allow gradients to flow through the proposal\n networks. Default to `False`.\n loss_scaler: The loss scaler to use. Default to 1.0.\n\n Returns:\n The loss of the proposal networks for logging (a float scalar).\n \"\"\"\n if requires_grad:\n return self._update(trans=trans, loss_scaler=loss_scaler)\n else:\n if self.scheduler is not None:\n self.scheduler.step()\n return 0.0\n\n @torch.enable_grad()\n def _update(self, trans: Tensor, loss_scaler: float = 1.0) -> float:\n assert len(self.prop_cache) > 0\n assert self.optimizer is not None, \"No optimizer is provided.\"\n\n loss = self.compute_loss(trans, loss_scaler)\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n if self.scheduler is not None:\n self.scheduler.step()\n return loss.item()" }, { "identifier": "get_proposal_requires_grad_fn", "path": "third_party/nerfacc_prop_net.py", "snippet": "def get_proposal_requires_grad_fn(\n target: float = 5.0, num_steps: int = 1000\n) -> Callable:\n schedule = lambda s: min(s / num_steps, 1.0) * target\n\n steps_since_last_grad = 0\n\n def proposal_requires_grad_fn(step: int) -> bool:\n nonlocal steps_since_last_grad\n target_steps_since_last_grad = schedule(step)\n requires_grad = steps_since_last_grad > target_steps_since_last_grad\n if requires_grad:\n steps_since_last_grad = 0\n steps_since_last_grad += 1\n return requires_grad\n\n return proposal_requires_grad_fn" }, { "identifier": "MetricLogger", "path": "utils/logging.py", "snippet": "class MetricLogger(object):\n def __init__(self, delimiter=\"\\t\", output_file=None):\n self.meters = defaultdict(SmoothedValue)\n self.delimiter = delimiter\n self.output_file = output_file\n\n def update(self, **kwargs):\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor):\n v = v.item()\n assert isinstance(v, (float, int))\n self.meters[k].update(v)\n\n def __getattr__(self, attr):\n if attr in self.meters:\n return self.meters[attr]\n if attr in self.__dict__:\n return self.__dict__[attr]\n raise AttributeError(\n f\"'{type(self).__name__}' object has no attribute '{attr}'\"\n )\n\n def __str__(self):\n loss_str = []\n for name, meter in self.meters.items():\n loss_str.append(f\"{name}: {str(meter)}\")\n return self.delimiter.join(loss_str)\n\n def synchronize_between_processes(self):\n for meter in self.meters.values():\n meter.synchronize_between_processes()\n\n def add_meter(self, name, meter):\n self.meters[name] = meter\n\n def dump_in_output_file(self, iteration, iter_time, data_time):\n if self.output_file is None:\n return\n dict_to_dump = dict(\n iteration=iteration,\n iter_time=iter_time,\n data_time=data_time,\n )\n dict_to_dump.update({k: v.median for k, v in self.meters.items()})\n with open(self.output_file, \"a\") as f:\n f.write(json.dumps(dict_to_dump) + \"\\n\")\n pass\n\n def log_every(\n self, iterable, print_freq, header=None, n_iterations=None, start_iteration=0\n ):\n i = start_iteration\n if not header:\n header = \"\"\n start_time = time.time()\n end = time.time()\n iter_time = SmoothedValue(fmt=\"{avg:.6f}\")\n data_time = SmoothedValue(fmt=\"{avg:.6f}\")\n\n if n_iterations is None:\n n_iterations = len(iterable)\n\n space_fmt = \":\" + str(len(str(n_iterations))) + \"d\"\n\n log_list = [\n header,\n \"[{0\" + space_fmt + \"}/{1}]\",\n \"eta: {eta}\",\n \"elapsed: {elapsed_time_str}\",\n \"{meters}\",\n \"time: {time}\",\n \"data: {data}\",\n ]\n if torch.cuda.is_available():\n log_list += [\"max mem: {memory:.0f}\"]\n\n log_msg = self.delimiter.join(log_list)\n MB = 1024.0 * 1024.0\n for obj in iterable:\n data_time.update(time.time() - end)\n yield obj\n iter_time.update(time.time() - end)\n if i % print_freq == 0 or i == n_iterations - 1:\n self.dump_in_output_file(\n iteration=i, iter_time=iter_time.avg, data_time=data_time.avg\n )\n eta_seconds = iter_time.global_avg * (n_iterations - i)\n eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))\n elapsed_time = time.time() - start_time\n elapsed_time_str = str(datetime.timedelta(seconds=int(elapsed_time)))\n\n if torch.cuda.is_available():\n logger.info(\n log_msg.format(\n i,\n n_iterations,\n eta=eta_string,\n elapsed_time_str=elapsed_time_str,\n meters=str(self),\n time=str(iter_time),\n data=str(data_time),\n memory=torch.cuda.max_memory_allocated() / MB,\n )\n )\n else:\n logger.info(\n log_msg.format(\n i,\n n_iterations,\n eta=eta_string,\n meters=str(self),\n time=str(iter_time),\n data=str(data_time),\n )\n )\n i += 1\n end = time.time()\n if i >= n_iterations:\n break\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n logger.info(\n f\"{header} Total time: {total_time_str} ({total_time / n_iterations:.6f} s / it)\"\n )" }, { "identifier": "setup_logging", "path": "utils/logging.py", "snippet": "def setup_logging(\n output: Optional[str] = None,\n *,\n name: Optional[str] = None,\n level: int = logging.DEBUG,\n capture_warnings: bool = True,\n time_string: Optional[str] = None,\n) -> None:\n \"\"\"\n Setup logging.\n\n Args:\n output: A file name or a directory to save log files. If None, log\n files will not be saved. If output ends with \".txt\" or \".log\", it\n is assumed to be a file name.\n Otherwise, logs will be saved to `output/log.txt`.\n name: The name of the logger to configure, by default the root logger.\n level: The logging level to use.\n capture_warnings: Whether warnings should be captured as logs.\n \"\"\"\n logging.captureWarnings(capture_warnings)\n _configure_logger(name, level=level, output=output, time_string=time_string)" }, { "identifier": "visualize_voxels", "path": "utils/visualization_tools.py", "snippet": "def visualize_voxels(\n cfg: OmegaConf,\n model: RadianceField,\n proposal_estimator: PropNetEstimator = None,\n proposal_networks: DensityField = None,\n dataset: SceneDataset = None,\n device: str = \"cuda\",\n save_html: bool = True,\n is_dynamic: bool = False,\n):\n model.eval()\n for p in proposal_networks:\n p.eval()\n if proposal_estimator is not None:\n proposal_estimator.eval()\n if proposal_networks is not None:\n for p in proposal_networks:\n p.eval()\n\n vis_voxel_aabb = torch.tensor(model.aabb, device=device)\n # slightly expand the aabb to make sure all points are covered\n vis_voxel_aabb[1:3] -= 1\n vis_voxel_aabb[3:] += 1\n aabb_min, aabb_max = torch.split(vis_voxel_aabb, 3, dim=-1)\n aabb_length = aabb_max - aabb_min\n\n # compute the voxel resolution for visualization\n static_voxel_resolution = torch.ceil(\n (aabb_max - aabb_min) / cfg.render.vis_voxel_size\n ).long()\n empty_static_voxels = torch.zeros(*static_voxel_resolution, device=device)\n if is_dynamic:\n # use a slightly smaller voxel size for dynamic voxels\n dynamic_voxel_resolution = torch.ceil(\n (aabb_max - aabb_min) / cfg.render.vis_voxel_size * 0.8\n ).long()\n all_occupied_dynamic_points = []\n empty_dynamic_voxels = torch.zeros(*dynamic_voxel_resolution, device=device)\n\n # collect some patches for PCA\n to_compute_pca_patches = []\n\n pbar = tqdm(\n dataset.full_pixel_set,\n desc=\"querying depth\",\n dynamic_ncols=True,\n total=len(dataset.full_pixel_set),\n )\n for i, data_dict in enumerate(pbar):\n data_dict = dataset.full_pixel_set[i]\n for k, v in data_dict.items():\n data_dict[k] = v.to(device)\n if i < dataset.num_cams:\n # collect all patches from the first timestep\n with torch.no_grad():\n render_results = render_rays(\n radiance_field=model,\n proposal_estimator=proposal_estimator,\n proposal_networks=proposal_networks,\n data_dict=data_dict,\n cfg=cfg,\n proposal_requires_grad=False,\n )\n if \"dino_pe_free\" in render_results:\n dino_feats = render_results[\"dino_pe_free\"]\n else:\n dino_feats = render_results[\"dino_feat\"]\n dino_feats = dino_feats.reshape(-1, dino_feats.shape[-1])\n to_compute_pca_patches.append(dino_feats)\n # query the depth. we force a lidar mode here so that the renderer will skip\n # querying other features such as colors, features, etc.\n data_dict[\"lidar_origins\"] = data_dict[\"origins\"].to(device)\n data_dict[\"lidar_viewdirs\"] = data_dict[\"viewdirs\"].to(device)\n data_dict[\"lidar_normed_timestamps\"] = data_dict[\"normed_timestamps\"].to(device)\n with torch.no_grad():\n render_results = render_rays(\n radiance_field=model,\n proposal_estimator=proposal_estimator,\n proposal_networks=proposal_networks,\n data_dict=data_dict,\n cfg=cfg,\n proposal_requires_grad=False,\n prefix=\"lidar_\", # force lidar mode\n return_decomposition=True,\n )\n # ==== get the static voxels ======\n if is_dynamic:\n static_depth = render_results[\"static_depth\"]\n else:\n static_depth = render_results[\"depth\"]\n world_coords = (\n data_dict[\"lidar_origins\"] + data_dict[\"lidar_viewdirs\"] * static_depth\n )\n world_coords = world_coords[static_depth.squeeze() < 80]\n voxel_coords = world_coords_to_voxel_coords(\n world_coords, aabb_min, aabb_max, static_voxel_resolution\n )\n voxel_coords = voxel_coords.long()\n selector = (\n (voxel_coords[..., 0] >= 0)\n & (voxel_coords[..., 0] < static_voxel_resolution[0])\n & (voxel_coords[..., 1] >= 0)\n & (voxel_coords[..., 1] < static_voxel_resolution[1])\n & (voxel_coords[..., 2] >= 0)\n & (voxel_coords[..., 2] < static_voxel_resolution[2])\n )\n # split the voxel_coords into separate dimensions\n voxel_coords_x = voxel_coords[..., 0][selector]\n voxel_coords_y = voxel_coords[..., 1][selector]\n voxel_coords_z = voxel_coords[..., 2][selector]\n # index into empty_voxels using the separated coordinates\n empty_static_voxels[voxel_coords_x, voxel_coords_y, voxel_coords_z] = 1\n\n # ==== get the dynamic voxels ======\n if is_dynamic:\n dynamic_depth = render_results[\"dynamic_depth\"]\n world_coords = (\n data_dict[\"lidar_origins\"] + data_dict[\"lidar_viewdirs\"] * dynamic_depth\n )\n voxel_coords = world_coords_to_voxel_coords(\n world_coords, aabb_min, aabb_max, dynamic_voxel_resolution\n )\n voxel_coords = voxel_coords.long()\n selector = (\n (voxel_coords[..., 0] >= 0)\n & (voxel_coords[..., 0] < dynamic_voxel_resolution[0])\n & (voxel_coords[..., 1] >= 0)\n & (voxel_coords[..., 1] < dynamic_voxel_resolution[1])\n & (voxel_coords[..., 2] >= 0)\n & (voxel_coords[..., 2] < dynamic_voxel_resolution[2])\n )\n # split the voxel_coords into separate dimensions\n voxel_coords_x = voxel_coords[..., 0][selector]\n voxel_coords_y = voxel_coords[..., 1][selector]\n voxel_coords_z = voxel_coords[..., 2][selector]\n # index into empty_voxels using the separated coordinates\n empty_dynamic_voxels[voxel_coords_x, voxel_coords_y, voxel_coords_z] = 1\n if i % dataset.num_cams == 0 and i > 0:\n all_occupied_dynamic_points.append(\n voxel_coords_to_world_coords(\n aabb_min,\n aabb_max,\n dynamic_voxel_resolution,\n torch.nonzero(empty_dynamic_voxels),\n )\n )\n empty_dynamic_voxels = torch.zeros(\n *dynamic_voxel_resolution, device=device\n )\n # compute the pca reduction\n dummy_pca_reduction, color_min, color_max = get_robust_pca(\n torch.cat(to_compute_pca_patches, dim=0).to(device), m=2.5\n )\n # now let's query the features\n all_occupied_static_points = voxel_coords_to_world_coords(\n aabb_min, aabb_max, static_voxel_resolution, torch.nonzero(empty_static_voxels)\n )\n chunk = 2**18\n pca_colors = []\n occupied_points = []\n pbar = tqdm(\n range(0, all_occupied_static_points.shape[0], chunk),\n desc=\"querying static features\",\n dynamic_ncols=True,\n )\n for i in pbar:\n occupied_points_chunk = all_occupied_static_points[i : i + chunk]\n density_list = []\n # we need to accumulate the density from all proposal networks as well\n # to ensure reliable density estimation\n for p in proposal_networks:\n density_list.append(p(occupied_points_chunk)[\"density\"].squeeze(-1))\n with torch.no_grad():\n results = model.forward(\n occupied_points_chunk,\n query_feature_head=False,\n )\n density_list.append(results[\"density\"])\n density = torch.stack(density_list, dim=0)\n density = torch.mean(density, dim=0)\n # use a preset threshold to determine whether a voxel is occupied\n selector = density > 0.5\n occupied_points_chunk = occupied_points_chunk[selector]\n if len(occupied_points_chunk) == 0:\n # skip if no occupied points in this chunk\n continue\n with torch.no_grad():\n feats = model.forward(\n occupied_points_chunk,\n query_feature_head=True,\n query_pe_head=False,\n )[\"dino_feat\"]\n colors = feats @ dummy_pca_reduction\n del feats\n colors = (colors - color_min) / (color_max - color_min)\n pca_colors.append(torch.clamp(colors, 0, 1))\n occupied_points.append(occupied_points_chunk)\n\n pca_colors = torch.cat(pca_colors, dim=0)\n occupied_points = torch.cat(occupied_points, dim=0)\n if is_dynamic:\n dynamic_pca_colors = []\n dynamic_occupied_points = []\n unq_timestamps = dataset.pixel_source.unique_normalized_timestamps.to(device)\n # query every 10 frames\n pbar = tqdm(\n range(0, len(all_occupied_dynamic_points), 10),\n desc=\"querying dynamic fields\",\n dynamic_ncols=True,\n )\n for i in pbar:\n occupied_points_chunk = all_occupied_dynamic_points[i]\n normed_timestamps = unq_timestamps[i].repeat(\n occupied_points_chunk.shape[0], 1\n )\n with torch.no_grad():\n results = model.forward(\n occupied_points_chunk,\n data_dict={\"normed_timestamps\": normed_timestamps},\n query_feature_head=False,\n )\n selector = results[\"dynamic_density\"].squeeze() > 0.1\n occupied_points_chunk = occupied_points_chunk[selector]\n if len(occupied_points_chunk) == 0:\n continue\n # query some features\n normed_timestamps = unq_timestamps[i].repeat(\n occupied_points_chunk.shape[0], 1\n )\n with torch.no_grad():\n feats = model.forward(\n occupied_points_chunk,\n data_dict={\"normed_timestamps\": normed_timestamps},\n query_feature_head=True,\n query_pe_head=False,\n )[\"dynamic_dino_feat\"]\n colors = feats @ dummy_pca_reduction\n del feats\n colors = (colors - color_min) / (color_max - color_min)\n dynamic_pca_colors.append(torch.clamp(colors, 0, 1))\n dynamic_occupied_points.append(occupied_points_chunk)\n dynamic_coords = [x.cpu().numpy() for x in dynamic_occupied_points]\n dynamic_colors = [x.cpu().numpy() for x in dynamic_pca_colors]\n else:\n dynamic_coords = None\n dynamic_colors = None\n\n figure = vis_occ_plotly(\n vis_aabb=vis_voxel_aabb.cpu().numpy().tolist(),\n coords=occupied_points.cpu().numpy(),\n colors=pca_colors.cpu().numpy(),\n dynamic_coords=dynamic_coords,\n dynamic_colors=dynamic_colors,\n x_ratio=1,\n y_ratio=(aabb_length[1] / aabb_length[0]).item(),\n z_ratio=(aabb_length[2] / aabb_length[0]).item(),\n size=3,\n black_bg=True,\n title=f\"Lifted {cfg.data.pixel_source.feature_model_type} Features, PE_removed: {cfg.nerf.model.head.enable_learnable_pe}\",\n )\n # for plotly\n data = figure.to_dict()[\"data\"]\n layout = figure.to_dict()[\"layout\"]\n output_path = os.path.join(cfg.log_dir, f\"feature_field.json\")\n with open(output_path, \"w\") as f:\n json.dump({\"data\": data, \"layout\": layout}, f, cls=NumpyEncoder)\n logger.info(f\"Saved to {output_path}\")\n output_path = os.path.join(cfg.log_dir, f\"feature_field.html\")\n if save_html:\n figure.write_html(output_path)\n logger.info(f\"Query result saved to {output_path}\")" }, { "identifier": "visualize_scene_flow", "path": "utils/visualization_tools.py", "snippet": "def visualize_scene_flow(\n cfg: OmegaConf,\n model: RadianceField,\n dataset: SceneDataset = None,\n device: str = \"cuda\",\n save_html: bool = True,\n):\n pbar = tqdm(\n range(0, len(dataset.full_lidar_set) - 1, 10),\n desc=\"querying flow\",\n dynamic_ncols=True,\n )\n predicted_flow_colors, gt_flow_colors = [], []\n dynamic_coords = []\n for i in pbar:\n data_dict = dataset.full_lidar_set[i].copy()\n lidar_flow_class = data_dict[\"lidar_flow_class\"]\n for k, v in data_dict.items():\n # remove invalid flow (the information is from GT)\n data_dict[k] = v[lidar_flow_class != -1]\n\n if data_dict[k].shape[0] == 0:\n logger.info(f\"no valid points, skipping...\")\n continue\n # filter out ground points\n # for k, v in data_dict.items():\n # data_dict[k] = v[~data_dict[\"lidar_ground\"]]\n valid_lidar_mask = dataset.get_valid_lidar_mask(i, data_dict)\n for k, v in data_dict.items():\n data_dict[k] = v[valid_lidar_mask]\n lidar_points = (\n data_dict[\"lidar_origins\"]\n + data_dict[\"lidar_ranges\"] * data_dict[\"lidar_viewdirs\"]\n )\n normalized_timestamps = data_dict[\"lidar_normed_timestamps\"]\n with torch.no_grad():\n pred_results = model.query_flow(\n positions=lidar_points,\n normed_timestamps=normalized_timestamps,\n )\n pred_flow = pred_results[\"forward_flow\"]\n # flow is only valid when the point is not static\n pred_flow[pred_results[\"dynamic_density\"] < 0.2] *= 0\n\n predicted_flow_colors.append(\n scene_flow_to_rgb(pred_flow, flow_max_radius=2.0, background=\"bright\")\n .cpu()\n .numpy()\n )\n gt_flow_colors.append(\n scene_flow_to_rgb(\n data_dict[\"lidar_flow\"], flow_max_radius=2.0, background=\"bright\"\n )\n .cpu()\n .numpy()\n )\n dynamic_coords.append(lidar_points.cpu().numpy())\n\n vis_voxel_aabb = torch.tensor(model.aabb, device=device)\n # slightly expand the aabb to make sure all points are covered\n vis_voxel_aabb[1:3] -= 1\n vis_voxel_aabb[3:] += 1\n aabb_min, aabb_max = torch.split(vis_voxel_aabb, 3, dim=-1)\n aabb_length = aabb_max - aabb_min\n pred_figure = vis_occ_plotly(\n vis_aabb=vis_voxel_aabb.cpu().numpy().tolist(),\n dynamic_coords=dynamic_coords,\n dynamic_colors=predicted_flow_colors,\n x_ratio=1,\n y_ratio=(aabb_length[1] / aabb_length[0]).item(),\n z_ratio=(aabb_length[2] / aabb_length[0]).item(),\n size=2,\n black_bg=True,\n title=f\"Predicted Flow\",\n )\n gt_figure = vis_occ_plotly(\n vis_aabb=vis_voxel_aabb.cpu().numpy().tolist(),\n dynamic_coords=dynamic_coords,\n dynamic_colors=gt_flow_colors,\n x_ratio=1,\n y_ratio=(aabb_length[1] / aabb_length[0]).item(),\n z_ratio=(aabb_length[2] / aabb_length[0]).item(),\n size=2,\n black_bg=True,\n title=f\"GT Flow\",\n )\n if save_html:\n output_path = os.path.join(cfg.log_dir, f\"predicted_flow.html\")\n pred_figure.write_html(output_path)\n logger.info(f\"Predicted flow result saved to {output_path}\")\n output_path = os.path.join(cfg.log_dir, f\"gt_flow.html\")\n gt_figure.write_html(output_path)\n logger.info(f\"GT flow saved to {output_path}\")" } ]
import argparse import json import logging import os import time import imageio import numpy as np import torch import torch.utils.data import builders import loss import utils.misc as misc import wandb from typing import List, Optional from omegaconf import OmegaConf from tqdm import tqdm from datasets import metrics from datasets.base import SceneDataset from radiance_fields import DensityField, RadianceField from radiance_fields.render_utils import render_rays from radiance_fields.video_utils import render_pixels, save_videos from third_party.nerfacc_prop_net import PropNetEstimator, get_proposal_requires_grad_fn from utils.logging import MetricLogger, setup_logging from utils.visualization_tools import visualize_voxels, visualize_scene_flow from datasets.waymo import WaymoDataset from datasets.nuscenes import NuScenesDataset
21,002
proposal_networks=proposal_networks, proposal_estimator=proposal_estimator, optimizer=optimizer, scheduler=scheduler, ) else: start_step = 0 logger.info( f"Will start training for {cfg.optim.num_iters} iterations from scratch" ) if args.visualize_voxel or args.eval_only: if cfg.nerf.model.head.enable_flow_branch: logger.info("Visualizing scene flow...") visualize_scene_flow( cfg=cfg, model=model, dataset=dataset, device=device, ) if cfg.nerf.model.head.enable_feature_head: logger.info("Visualizing voxel features...") visualize_voxels( cfg, model, proposal_estimator, proposal_networks, dataset, device=device, save_html=True, is_dynamic=cfg.nerf.model.head.enable_dynamic_branch, ) logger.info("Visualization done!") if args.eval_only: do_evaluation( step=start_step, cfg=cfg, model=model, proposal_networks=proposal_networks, proposal_estimator=proposal_estimator, dataset=dataset, args=args, ) exit() # ------ build losses -------- # # rgb loss if cfg.data.pixel_source.load_rgb: rgb_loss_fn = loss.RealValueLoss( loss_type=cfg.supervision.rgb.loss_type, coef=cfg.supervision.rgb.loss_coef, name="rgb", check_nan=cfg.optim.check_nan, ) # lidar related losses if cfg.data.lidar_source.load_lidar and cfg.supervision.depth.enable: depth_loss_fn = loss.DepthLoss( loss_type=cfg.supervision.depth.loss_type, coef=cfg.supervision.depth.loss_coef, depth_error_percentile=cfg.supervision.depth.depth_error_percentile, check_nan=cfg.optim.check_nan, ) if cfg.supervision.depth.line_of_sight.enable: line_of_sight_loss_fn = loss.LineOfSightLoss( loss_type=cfg.supervision.depth.line_of_sight.loss_type, name="line_of_sight", depth_error_percentile=cfg.supervision.depth.depth_error_percentile, coef=cfg.supervision.depth.line_of_sight.loss_coef, check_nan=cfg.optim.check_nan, ) else: line_of_sight_loss_fn = None else: depth_loss_fn = None line_of_sight_loss_fn = None if cfg.data.pixel_source.load_sky_mask and cfg.nerf.model.head.enable_sky_head: sky_loss_fn = loss.SkyLoss( loss_type=cfg.supervision.sky.loss_type, coef=cfg.supervision.sky.loss_coef, check_nan=cfg.optim.check_nan, ) else: sky_loss_fn = None if cfg.data.pixel_source.load_features and cfg.nerf.model.head.enable_feature_head: feature_loss_fn = loss.RealValueLoss( loss_type=cfg.supervision.feature.loss_type, coef=cfg.supervision.feature.loss_coef, name="feature", check_nan=cfg.optim.check_nan, ) else: feature_loss_fn = None ## ------ dynamic related losses -------- # if cfg.nerf.model.head.enable_dynamic_branch: dynamic_reg_loss_fn = loss.DynamicRegularizationLoss( loss_type=cfg.supervision.dynamic.loss_type, coef=cfg.supervision.dynamic.loss_coef, entropy_skewness=cfg.supervision.dynamic.entropy_loss_skewness, check_nan=cfg.optim.check_nan, ) else: dynamic_reg_loss_fn = None if cfg.nerf.model.head.enable_shadow_head: shadow_loss_fn = loss.DynamicRegularizationLoss( name="shadow", loss_type=cfg.supervision.shadow.loss_type, coef=cfg.supervision.shadow.loss_coef, check_nan=cfg.optim.check_nan, ) else: shadow_loss_fn = None metrics_file = os.path.join(cfg.log_dir, "metrics.json") metric_logger = MetricLogger(delimiter=" ", output_file=metrics_file)
logger = logging.getLogger() current_time = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime()) # a global list of keys to render, # comment out the keys you don't want to render or uncomment the keys you want to render render_keys = [ "gt_rgbs", "rgbs", "depths", # "median_depths", "gt_dino_feats", "dino_feats", "dynamic_rgbs", "dynamic_depths", "static_rgbs", "static_depths", "forward_flows", "backward_flows", "dynamic_rgb_on_static_dinos", "dino_pe", "dino_feats_pe_free", # "dynamic_dino_on_static_rgbs", # "shadow_reduced_static_rgbs", # "shadow_only_static_rgbs", # "shadows", # "gt_sky_masks", # "sky_masks", ] def get_args_parser(): parser = argparse.ArgumentParser("Train EmernNerf for a single scene") parser.add_argument("--config_file", help="path to config file", type=str) parser.add_argument( "--eval_only", action="store_true", help="perform evaluation only" ) parser.add_argument( "--visualize_voxel", action="store_true", help="perform evaluation only" ) parser.add_argument( "--render_data_video", action="store_true", help="Render a data video", ) parser.add_argument( "--render_data_video_only", action="store_true", help="Quit after rendering a data video", ) parser.add_argument( "--render_video_postfix", type=str, default=None, help="an optional postfix for video", ) parser.add_argument( "--output_root", default="./work_dirs/", help="path to save checkpoints and logs", type=str, ) # wandb logging part parser.add_argument( "--enable_wandb", action="store_true", help="enable wandb logging" ) parser.add_argument( "--entity", default="YOUR ENTITY NAME", type=str, help="wandb entity name", required=False, ) parser.add_argument( "--project", default="emernerf", type=str, help="wandb project name, also used to enhance log_dir", required=True, ) parser.add_argument( "--run_name", default="debug", type=str, help="wandb run name, also used to enhance log_dir", required=True, ) parser.add_argument( "opts", help="Modify config options using the command-line", default=None, nargs=argparse.REMAINDER, ) return parser def setup(args): # ------ get config from args -------- # default_config = OmegaConf.create(OmegaConf.load("configs/default_config.yaml")) cfg = OmegaConf.load(args.config_file) cfg = OmegaConf.merge(default_config, cfg, OmegaConf.from_cli(args.opts)) log_dir = os.path.join(args.output_root, args.project, args.run_name) cfg.log_dir = log_dir cfg.nerf.model.num_cams = cfg.data.pixel_source.num_cams cfg.nerf.model.unbounded = cfg.nerf.unbounded cfg.nerf.propnet.unbounded = cfg.nerf.unbounded cfg.nerf.model.resume_from = cfg.resume_from os.makedirs(log_dir, exist_ok=True) for folder in [ "images", "full_videos", "test_videos", "lowres_videos", "metrics", "configs_bk", "buffer_maps", ]: os.makedirs(os.path.join(log_dir, folder), exist_ok=True) # ------ setup logging -------- # if args.enable_wandb: # sometimes wandb fails to init in cloud machines, so we give it several (many) tries while ( wandb.init( project=args.project, entity=args.entity, sync_tensorboard=True, settings=wandb.Settings(start_method="fork"), ) is not wandb.run ): continue wandb.run.name = args.run_name wandb.run.save() wandb.config.update(OmegaConf.to_container(cfg, resolve=True)) wandb.config.update(args) misc.fix_random_seeds(cfg.optim.seed) global logger setup_logging(output=log_dir, level=logging.INFO, time_string=current_time) logger.info( "\n".join("%s: %s" % (k, str(v)) for k, v in sorted(dict(vars(args)).items())) ) # -------- write config -------- # logger.info(f"Config:\n{OmegaConf.to_yaml(cfg)}") saved_cfg_path = os.path.join(log_dir, "config.yaml") with open(saved_cfg_path, "w") as f: OmegaConf.save(config=cfg, f=f) # also save a backup copy saved_cfg_path_bk = os.path.join( log_dir, "configs_bk", f"config_{current_time}.yaml" ) with open(saved_cfg_path_bk, "w") as f: OmegaConf.save(config=cfg, f=f) logger.info(f"Full config saved to {saved_cfg_path}, and {saved_cfg_path_bk}") return cfg @torch.no_grad() def do_evaluation( step: int = 0, cfg: OmegaConf = None, model: RadianceField = None, proposal_networks: Optional[List[DensityField]] = None, proposal_estimator: PropNetEstimator = None, dataset: SceneDataset = None, args: argparse.Namespace = None, ): logger.info("Evaluating on the full set...") model.eval() proposal_estimator.eval() for p in proposal_networks: p.eval() if cfg.eval.eval_occ: assert cfg.data.dataset == "waymo", "only support waymo dataset for now" device = model.device # use every cfg.eval.occ_annotation_stride frames for training train_indices = np.arange( 0, dataset.num_lidar_timesteps, cfg.eval.occ_annotation_stride ) test_indices = [ x for x in range(dataset.num_lidar_timesteps) if x not in train_indices ] # collect centroids and labels centroids_bank, label_bank = metrics.collect_centroids( train_indices, dataset, model, device ) logger.info("Evaluating Few-shot Occ...") occ_metrics = metrics.eval_few_shot_occ( test_indices, dataset, model, device, centroids_bank, label_bank ) occ_metrics_file = f"{cfg.log_dir}/metrics/occ_eval_{current_time}.json" with open(occ_metrics_file, "w") as f: json.dump(occ_metrics, f) if args.enable_wandb: wandb.log(occ_metrics) logger.info( f"Few-shot Occupancy evaluation metrics saved to {occ_metrics_file}" ) logger.info("Few-shot Occ Results:") logger.info(json.dumps(occ_metrics, indent=4)) logger.info( "===> Note: zero accuracy means no valid points for that class in the scene" ) torch.cuda.empty_cache() if cfg.eval.eval_lidar_flow and cfg.nerf.model.head.enable_flow_branch: assert cfg.data.dataset == "waymo", "only support waymo dataset for now" logger.info("Evaluating Lidar Flow...") # use metrics from NSFP all_flow_metrics = { "EPE3D": [], "acc3d_strict": [], "acc3d_relax": [], "angle_error": [], "outlier": [], } for data_dict in tqdm( dataset.full_lidar_set, "Evaluating Lidar Flow", dynamic_ncols=True ): lidar_flow_class = data_dict["lidar_flow_class"] for k, v in data_dict.items(): # remove invalid flow (the information is from GT) data_dict[k] = v[lidar_flow_class != -1] if data_dict[k].shape[0] == 0: logger.info(f"no valid points, skipping...") continue if cfg.eval.remove_ground_when_eval_lidar_flow: # following the setting in scene flow estimation works for k, v in data_dict.items(): data_dict[k] = v[~data_dict["lidar_ground"]] lidar_points = ( data_dict["lidar_origins"] + data_dict["lidar_ranges"] * data_dict["lidar_viewdirs"] ) normalized_timestamps = data_dict["lidar_normed_timestamps"] pred_results = model.query_flow( positions=lidar_points, normed_timestamps=normalized_timestamps, ) pred_flow = pred_results["forward_flow"] # flow is only valid when the point is not static pred_flow[pred_results["dynamic_density"] < 0.2] *= 0 # metrics in NSFP flow_metrics = metrics.compute_scene_flow_metrics( pred_flow[None, ...], data_dict["lidar_flow"][None, ...] ) for k, v in flow_metrics.items(): all_flow_metrics[k].append(v) logger.info("Lidar Flow Results:") avg_flow_metrics = {k: np.mean(v) for k, v in all_flow_metrics.items()} logger.info(json.dumps(avg_flow_metrics, indent=4)) flow_metrics_file = f"{cfg.log_dir}/metrics/flow_eval_{current_time}.json" with open(flow_metrics_file, "w") as f: json.dump(avg_flow_metrics, f) logger.info(f"Flow estimation evaluation metrics saved to {flow_metrics_file}") if args.enable_wandb: wandb.log(avg_flow_metrics) torch.cuda.empty_cache() if cfg.data.pixel_source.load_rgb and cfg.render.render_low_res: logger.info("Rendering full set but in a low_resolution...") dataset.pixel_source.update_downscale_factor(1 / cfg.render.low_res_downscale) render_results = render_pixels( cfg=cfg, model=model, proposal_networks=proposal_networks, proposal_estimator=proposal_estimator, dataset=dataset.full_pixel_set, compute_metrics=True, return_decomposition=True, ) dataset.pixel_source.reset_downscale_factor() if args.render_video_postfix is None: video_output_pth = os.path.join(cfg.log_dir, "lowres_videos", f"{step}.mp4") else: video_output_pth = os.path.join( cfg.log_dir, "lowres_videos", f"{step}_{args.render_video_postfix}.mp4", ) vis_frame_dict = save_videos( render_results, video_output_pth, num_timestamps=dataset.num_img_timesteps, keys=render_keys, save_seperate_video=cfg.logging.save_seperate_video, num_cams=dataset.pixel_source.num_cams, fps=cfg.render.fps, verbose=True, ) if args.enable_wandb: for k, v in vis_frame_dict.items(): wandb.log({f"pixel_rendering/lowres_full/{k}": wandb.Image(v)}) del render_results, vis_frame_dict torch.cuda.empty_cache() if cfg.data.pixel_source.load_rgb: logger.info("Evaluating Pixels...") if dataset.test_pixel_set is not None and cfg.render.render_test: logger.info("Evaluating Test Set Pixels...") render_results = render_pixels( cfg=cfg, model=model, proposal_estimator=proposal_estimator, dataset=dataset.test_pixel_set, proposal_networks=proposal_networks, compute_metrics=True, return_decomposition=True, ) eval_dict = {} for k, v in render_results.items(): if k in [ "psnr", "ssim", "feat_psnr", "masked_psnr", "masked_ssim", "masked_feat_psnr", ]: eval_dict[f"pixel_metrics/test/{k}"] = v if args.enable_wandb: wandb.log(eval_dict) test_metrics_file = f"{cfg.log_dir}/metrics/images_test_{current_time}.json" with open(test_metrics_file, "w") as f: json.dump(eval_dict, f) logger.info(f"Image evaluation metrics saved to {test_metrics_file}") if args.render_video_postfix is None: video_output_pth = f"{cfg.log_dir}/test_videos/{step}.mp4" else: video_output_pth = ( f"{cfg.log_dir}/test_videos/{step}_{args.render_video_postfix}.mp4" ) vis_frame_dict = save_videos( render_results, video_output_pth, num_timestamps=dataset.num_test_timesteps, keys=render_keys, num_cams=dataset.pixel_source.num_cams, save_seperate_video=cfg.logging.save_seperate_video, fps=cfg.render.fps, verbose=True, # save_images=True, ) if args.enable_wandb: for k, v in vis_frame_dict.items(): wandb.log({"pixel_rendering/test/" + k: wandb.Image(v)}) del render_results, vis_frame_dict torch.cuda.empty_cache() if cfg.render.render_full: logger.info("Evaluating Full Set...") render_results = render_pixels( cfg=cfg, model=model, proposal_estimator=proposal_estimator, dataset=dataset.full_pixel_set, proposal_networks=proposal_networks, compute_metrics=True, return_decomposition=True, ) eval_dict = {} for k, v in render_results.items(): if k in [ "psnr", "ssim", "feat_psnr", "masked_psnr", "masked_ssim", "masked_feat_psnr", ]: eval_dict[f"pixel_metrics/full/{k}"] = v if args.enable_wandb: wandb.log(eval_dict) test_metrics_file = f"{cfg.log_dir}/metrics/images_full_{current_time}.json" with open(test_metrics_file, "w") as f: json.dump(eval_dict, f) logger.info(f"Image evaluation metrics saved to {test_metrics_file}") if args.render_video_postfix is None: video_output_pth = f"{cfg.log_dir}/full_videos/{step}.mp4" else: video_output_pth = ( f"{cfg.log_dir}/full_videos/{step}_{args.render_video_postfix}.mp4" ) vis_frame_dict = save_videos( render_results, video_output_pth, num_timestamps=dataset.num_img_timesteps, keys=render_keys, num_cams=dataset.pixel_source.num_cams, save_seperate_video=cfg.logging.save_seperate_video, fps=cfg.render.fps, verbose=True, ) if args.enable_wandb: for k, v in vis_frame_dict.items(): wandb.log({"pixel_rendering/full/" + k: wandb.Image(v)}) del render_results, vis_frame_dict torch.cuda.empty_cache() # TODO: add a novel trajectory rendering part def main(args): cfg = setup(args) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # ------ build dataset -------- # # we need to set some hyper-parameters for the model based on the dataset, # e.g., aabb, number of training timestamps, number of cameras, etc, so # we build the dataset at first. if cfg.data.dataset == "waymo": dataset = WaymoDataset(data_cfg=cfg.data) else: dataset = NuScenesDataset(data_cfg=cfg.data) # To give us a quick preview of the scene, we render a data video if args.render_data_video or args.render_data_video_only: save_pth = os.path.join(cfg.log_dir, "data.mp4") # define a `render_data_videos` per dataset. dataset.render_data_videos(save_pth=save_pth, split="full") if args.render_data_video_only: logger.info("Render data video only, exiting...") exit() # ------ build proposal networks and models -------- # # we input the dataset to the model builder to set some hyper-parameters ( proposal_estimator, proposal_networks, ) = builders.build_estimator_and_propnet_from_cfg( nerf_cfg=cfg.nerf, optim_cfg=cfg.optim, dataset=dataset, device=device ) model = builders.build_model_from_cfg( cfg=cfg.nerf.model, dataset=dataset, device=device ) logger.info(f"PropNetEstimator: {proposal_networks}") logger.info(f"Model: {model}") # ------ build optimizer and grad scaler -------- # optimizer = builders.build_optimizer_from_cfg(cfg=cfg.optim, model=model) pixel_grad_scaler = torch.cuda.amp.GradScaler(2**10) lidar_grad_scaler = torch.cuda.amp.GradScaler(2**10) # ------ build scheduler -------- # scheduler = builders.build_scheduler_from_cfg(cfg=cfg.optim, optimizer=optimizer) if cfg.resume_from is not None: start_step = misc.resume_from_checkpoint( ckpt_path=cfg.resume_from, model=model, proposal_networks=proposal_networks, proposal_estimator=proposal_estimator, optimizer=optimizer, scheduler=scheduler, ) else: start_step = 0 logger.info( f"Will start training for {cfg.optim.num_iters} iterations from scratch" ) if args.visualize_voxel or args.eval_only: if cfg.nerf.model.head.enable_flow_branch: logger.info("Visualizing scene flow...") visualize_scene_flow( cfg=cfg, model=model, dataset=dataset, device=device, ) if cfg.nerf.model.head.enable_feature_head: logger.info("Visualizing voxel features...") visualize_voxels( cfg, model, proposal_estimator, proposal_networks, dataset, device=device, save_html=True, is_dynamic=cfg.nerf.model.head.enable_dynamic_branch, ) logger.info("Visualization done!") if args.eval_only: do_evaluation( step=start_step, cfg=cfg, model=model, proposal_networks=proposal_networks, proposal_estimator=proposal_estimator, dataset=dataset, args=args, ) exit() # ------ build losses -------- # # rgb loss if cfg.data.pixel_source.load_rgb: rgb_loss_fn = loss.RealValueLoss( loss_type=cfg.supervision.rgb.loss_type, coef=cfg.supervision.rgb.loss_coef, name="rgb", check_nan=cfg.optim.check_nan, ) # lidar related losses if cfg.data.lidar_source.load_lidar and cfg.supervision.depth.enable: depth_loss_fn = loss.DepthLoss( loss_type=cfg.supervision.depth.loss_type, coef=cfg.supervision.depth.loss_coef, depth_error_percentile=cfg.supervision.depth.depth_error_percentile, check_nan=cfg.optim.check_nan, ) if cfg.supervision.depth.line_of_sight.enable: line_of_sight_loss_fn = loss.LineOfSightLoss( loss_type=cfg.supervision.depth.line_of_sight.loss_type, name="line_of_sight", depth_error_percentile=cfg.supervision.depth.depth_error_percentile, coef=cfg.supervision.depth.line_of_sight.loss_coef, check_nan=cfg.optim.check_nan, ) else: line_of_sight_loss_fn = None else: depth_loss_fn = None line_of_sight_loss_fn = None if cfg.data.pixel_source.load_sky_mask and cfg.nerf.model.head.enable_sky_head: sky_loss_fn = loss.SkyLoss( loss_type=cfg.supervision.sky.loss_type, coef=cfg.supervision.sky.loss_coef, check_nan=cfg.optim.check_nan, ) else: sky_loss_fn = None if cfg.data.pixel_source.load_features and cfg.nerf.model.head.enable_feature_head: feature_loss_fn = loss.RealValueLoss( loss_type=cfg.supervision.feature.loss_type, coef=cfg.supervision.feature.loss_coef, name="feature", check_nan=cfg.optim.check_nan, ) else: feature_loss_fn = None ## ------ dynamic related losses -------- # if cfg.nerf.model.head.enable_dynamic_branch: dynamic_reg_loss_fn = loss.DynamicRegularizationLoss( loss_type=cfg.supervision.dynamic.loss_type, coef=cfg.supervision.dynamic.loss_coef, entropy_skewness=cfg.supervision.dynamic.entropy_loss_skewness, check_nan=cfg.optim.check_nan, ) else: dynamic_reg_loss_fn = None if cfg.nerf.model.head.enable_shadow_head: shadow_loss_fn = loss.DynamicRegularizationLoss( name="shadow", loss_type=cfg.supervision.shadow.loss_type, coef=cfg.supervision.shadow.loss_coef, check_nan=cfg.optim.check_nan, ) else: shadow_loss_fn = None metrics_file = os.path.join(cfg.log_dir, "metrics.json") metric_logger = MetricLogger(delimiter=" ", output_file=metrics_file)
proposal_requires_grad_fn = get_proposal_requires_grad_fn()
8
2023-10-11 20:56:27+00:00
24k
alibaba-damo-academy/FunCodec
funcodec/models/encoder/sanm_encoder.py
[ { "identifier": "overlap_chunk", "path": "funcodec/modules/streaming_utils/chunk_utilis.py", "snippet": "class overlap_chunk():\n\t\"\"\"\n\tauthor: Speech Lab, Alibaba Group, China\n\tSan-m: Memory equipped self-attention for end-to-end speech recognition\n\thttps://arxiv.org/abs/2006.01713\n\n\t\"\"\"\n\tdef __init__(self,\n\t\tchunk_size: tuple = (16,),\n\t\tstride: tuple = (10,),\n\t\tpad_left: tuple = (0,),\n\t\tencoder_att_look_back_factor: tuple = (1,),\n shfit_fsmn: int = 0,\n decoder_att_look_back_factor: tuple = (1,),\n\t):\n\n\t\tpad_left = self.check_chunk_size_args(chunk_size, pad_left)\n\t\tencoder_att_look_back_factor = self.check_chunk_size_args(chunk_size, encoder_att_look_back_factor)\n\t\tdecoder_att_look_back_factor = self.check_chunk_size_args(chunk_size, decoder_att_look_back_factor)\n\t\tself.chunk_size, self.stride, self.pad_left, self.encoder_att_look_back_factor, self.decoder_att_look_back_factor \\\n\t\t\t= chunk_size, stride, pad_left, encoder_att_look_back_factor, decoder_att_look_back_factor\n\t\tself.shfit_fsmn = shfit_fsmn\n\t\tself.x_add_mask = None\n\t\tself.x_rm_mask = None\n\t\tself.x_len = None\n\t\tself.mask_shfit_chunk = None\n\t\tself.mask_chunk_predictor = None\n\t\tself.mask_att_chunk_encoder = None\n\t\tself.mask_shift_att_chunk_decoder = None\n\t\tself.chunk_outs = None\n\t\tself.chunk_size_cur, self.stride_cur, self.pad_left_cur, self.encoder_att_look_back_factor_cur, self.chunk_size_pad_shift_cur \\\n\t\t\t= None, None, None, None, None\n\n\tdef check_chunk_size_args(self, chunk_size, x):\n\t\tif len(x) < len(chunk_size):\n\t\t\tx = [x[0] for i in chunk_size]\n\t\treturn x\n\n\tdef get_chunk_size(self,\n\t\tind: int = 0\n\t):\n\t\t# with torch.no_grad:\n\t\tchunk_size, stride, pad_left, encoder_att_look_back_factor, decoder_att_look_back_factor = \\\n\t\t\tself.chunk_size[ind], self.stride[ind], self.pad_left[ind], self.encoder_att_look_back_factor[ind], self.decoder_att_look_back_factor[ind]\n\t\tself.chunk_size_cur, self.stride_cur, self.pad_left_cur, self.encoder_att_look_back_factor_cur, self.chunk_size_pad_shift_cur, self.decoder_att_look_back_factor_cur \\\n\t\t\t= chunk_size, stride, pad_left, encoder_att_look_back_factor, chunk_size + self.shfit_fsmn, decoder_att_look_back_factor\n\t\treturn self.chunk_size_cur, self.stride_cur, self.pad_left_cur, self.encoder_att_look_back_factor_cur, self.chunk_size_pad_shift_cur\n\n\tdef random_choice(self, training=True, decoding_ind=None):\n\t\tchunk_num = len(self.chunk_size)\n\t\tind = 0\n\t\tif training and chunk_num > 1:\n\t\t\tind = torch.randint(0, chunk_num-1, ()).cpu().item()\n\t\tif not training and decoding_ind is not None:\n\t\t\tind = int(decoding_ind)\n\n\t\treturn ind\n\n\n\n\n\tdef gen_chunk_mask(self, x_len, ind=0, num_units=1, num_units_predictor=1):\n\n\t\twith torch.no_grad():\n\t\t\tx_len = x_len.cpu().numpy()\n\t\t\tx_len_max = x_len.max()\n\n\t\t\tchunk_size, stride, pad_left, encoder_att_look_back_factor, chunk_size_pad_shift = self.get_chunk_size(ind)\n\t\t\tshfit_fsmn = self.shfit_fsmn\n\t\t\tpad_right = chunk_size - stride - pad_left\n\n\t\t\tchunk_num_batch = np.ceil(x_len/stride).astype(np.int32)\n\t\t\tx_len_chunk = (chunk_num_batch-1) * chunk_size_pad_shift + shfit_fsmn + pad_left + 0 + x_len - (chunk_num_batch-1) * stride\n\t\t\tx_len_chunk = x_len_chunk.astype(x_len.dtype)\n\t\t\tx_len_chunk_max = x_len_chunk.max()\n\n\t\t\tchunk_num = int(math.ceil(x_len_max/stride))\n\t\t\tdtype = np.int32\n\t\t\tmax_len_for_x_mask_tmp = max(chunk_size, x_len_max + pad_left)\n\t\t\tx_add_mask = np.zeros([0, max_len_for_x_mask_tmp], dtype=dtype)\n\t\t\tx_rm_mask = np.zeros([max_len_for_x_mask_tmp, 0], dtype=dtype)\n\t\t\tmask_shfit_chunk = np.zeros([0, num_units], dtype=dtype)\n\t\t\tmask_chunk_predictor = np.zeros([0, num_units_predictor], dtype=dtype)\n\t\t\tmask_shift_att_chunk_decoder = np.zeros([0, 1], dtype=dtype)\n\t\t\tmask_att_chunk_encoder = np.zeros([0, chunk_num*chunk_size_pad_shift], dtype=dtype)\n\t\t\tfor chunk_ids in range(chunk_num):\n\t\t\t\t# x_mask add\n\t\t\t\tfsmn_padding = np.zeros((shfit_fsmn, max_len_for_x_mask_tmp), dtype=dtype)\n\t\t\t\tx_mask_cur = np.diag(np.ones(chunk_size, dtype=np.float32))\n\t\t\t\tx_mask_pad_left = np.zeros((chunk_size, chunk_ids * stride), dtype=dtype)\n\t\t\t\tx_mask_pad_right = np.zeros((chunk_size, max_len_for_x_mask_tmp), dtype=dtype)\n\t\t\t\tx_cur_pad = np.concatenate([x_mask_pad_left, x_mask_cur, x_mask_pad_right], axis=1)\n\t\t\t\tx_cur_pad = x_cur_pad[:chunk_size, :max_len_for_x_mask_tmp]\n\t\t\t\tx_add_mask_fsmn = np.concatenate([fsmn_padding, x_cur_pad], axis=0)\n\t\t\t\tx_add_mask = np.concatenate([x_add_mask, x_add_mask_fsmn], axis=0)\n\n\t\t\t\t# x_mask rm\n\t\t\t\tfsmn_padding = np.zeros((max_len_for_x_mask_tmp, shfit_fsmn),dtype=dtype)\n\t\t\t\tpadding_mask_left = np.zeros((max_len_for_x_mask_tmp, pad_left),dtype=dtype)\n\t\t\t\tpadding_mask_right = np.zeros((max_len_for_x_mask_tmp, pad_right), dtype=dtype)\n\t\t\t\tx_mask_cur = np.diag(np.ones(stride, dtype=dtype))\n\t\t\t\tx_mask_cur_pad_top = np.zeros((chunk_ids*stride, stride), dtype=dtype)\n\t\t\t\tx_mask_cur_pad_bottom = np.zeros((max_len_for_x_mask_tmp, stride), dtype=dtype)\n\t\t\t\tx_rm_mask_cur = np.concatenate([x_mask_cur_pad_top, x_mask_cur, x_mask_cur_pad_bottom], axis=0)\n\t\t\t\tx_rm_mask_cur = x_rm_mask_cur[:max_len_for_x_mask_tmp, :stride]\n\t\t\t\tx_rm_mask_cur_fsmn = np.concatenate([fsmn_padding, padding_mask_left, x_rm_mask_cur, padding_mask_right], axis=1)\n\t\t\t\tx_rm_mask = np.concatenate([x_rm_mask, x_rm_mask_cur_fsmn], axis=1)\n\n\t\t\t\t# fsmn_padding_mask\n\t\t\t\tpad_shfit_mask = np.zeros([shfit_fsmn, num_units], dtype=dtype)\n\t\t\t\tones_1 = np.ones([chunk_size, num_units], dtype=dtype)\n\t\t\t\tmask_shfit_chunk_cur = np.concatenate([pad_shfit_mask, ones_1], axis=0)\n\t\t\t\tmask_shfit_chunk = np.concatenate([mask_shfit_chunk, mask_shfit_chunk_cur], axis=0)\n\n\t\t\t\t# predictor mask\n\t\t\t\tzeros_1 = np.zeros([shfit_fsmn + pad_left, num_units_predictor], dtype=dtype)\n\t\t\t\tones_2 = np.ones([stride, num_units_predictor], dtype=dtype)\n\t\t\t\tzeros_3 = np.zeros([chunk_size - stride - pad_left, num_units_predictor], dtype=dtype)\n\t\t\t\tones_zeros = np.concatenate([ones_2, zeros_3], axis=0)\n\t\t\t\tmask_chunk_predictor_cur = np.concatenate([zeros_1, ones_zeros], axis=0)\n\t\t\t\tmask_chunk_predictor = np.concatenate([mask_chunk_predictor, mask_chunk_predictor_cur], axis=0)\n\n\t\t\t\t# encoder att mask\n\t\t\t\tzeros_1_top = np.zeros([shfit_fsmn, chunk_num*chunk_size_pad_shift], dtype=dtype)\n\n\t\t\t\tzeros_2_num = max(chunk_ids - encoder_att_look_back_factor, 0)\n\t\t\t\tzeros_2 = np.zeros([chunk_size, zeros_2_num*chunk_size_pad_shift], dtype=dtype)\n\n\t\t\t\tencoder_att_look_back_num = max(chunk_ids - zeros_2_num, 0)\n\t\t\t\tzeros_2_left = np.zeros([chunk_size, shfit_fsmn], dtype=dtype)\n\t\t\t\tones_2_mid = np.ones([stride, stride], dtype=dtype)\n\t\t\t\tzeros_2_bottom = np.zeros([chunk_size-stride, stride], dtype=dtype)\n\t\t\t\tzeros_2_right = np.zeros([chunk_size, chunk_size-stride], dtype=dtype)\n\t\t\t\tones_2 = np.concatenate([ones_2_mid, zeros_2_bottom], axis=0)\n\t\t\t\tones_2 = np.concatenate([zeros_2_left, ones_2, zeros_2_right], axis=1)\n\t\t\t\tones_2 = np.tile(ones_2, [1, encoder_att_look_back_num])\n\n\t\t\t\tzeros_3_left = np.zeros([chunk_size, shfit_fsmn], dtype=dtype)\n\t\t\t\tones_3_right = np.ones([chunk_size, chunk_size], dtype=dtype)\n\t\t\t\tones_3 = np.concatenate([zeros_3_left, ones_3_right], axis=1)\n\n\t\t\t\tzeros_remain_num = max(chunk_num - 1 - chunk_ids, 0)\n\t\t\t\tzeros_remain = np.zeros([chunk_size, zeros_remain_num*chunk_size_pad_shift], dtype=dtype)\n\n\t\t\t\tones2_bottom = np.concatenate([zeros_2, ones_2, ones_3, zeros_remain], axis=1)\n\t\t\t\tmask_att_chunk_encoder_cur = np.concatenate([zeros_1_top, ones2_bottom], axis=0)\n\t\t\t\tmask_att_chunk_encoder = np.concatenate([mask_att_chunk_encoder, mask_att_chunk_encoder_cur], axis=0)\n\n\n\t\t\t\t# decoder fsmn_shift_att_mask\n\t\t\t\tzeros_1 = np.zeros([shfit_fsmn, 1])\n\t\t\t\tones_1 = np.ones([chunk_size, 1])\n\t\t\t\tmask_shift_att_chunk_decoder_cur = np.concatenate([zeros_1, ones_1], axis=0)\n\t\t\t\tmask_shift_att_chunk_decoder = np.concatenate(\n\t\t\t\t\t[mask_shift_att_chunk_decoder, mask_shift_att_chunk_decoder_cur], axis=0)\n\n\t\t\tself.x_add_mask = x_add_mask[:x_len_chunk_max, :x_len_max+pad_left]\n\t\t\tself.x_len_chunk = x_len_chunk\n\t\t\tself.x_rm_mask = x_rm_mask[:x_len_max, :x_len_chunk_max]\n\t\t\tself.x_len = x_len\n\t\t\tself.mask_shfit_chunk = mask_shfit_chunk[:x_len_chunk_max, :]\n\t\t\tself.mask_chunk_predictor = mask_chunk_predictor[:x_len_chunk_max, :]\n\t\t\tself.mask_att_chunk_encoder = mask_att_chunk_encoder[:x_len_chunk_max, :x_len_chunk_max]\n\t\t\tself.mask_shift_att_chunk_decoder = mask_shift_att_chunk_decoder[:x_len_chunk_max, :]\n\t\t\tself.chunk_outs = (self.x_add_mask,\n\t\t self.x_len_chunk,\n\t\t self.x_rm_mask,\n\t\t self.x_len,\n\t\t self.mask_shfit_chunk,\n\t\t self.mask_chunk_predictor,\n\t\t self.mask_att_chunk_encoder,\n\t\t self.mask_shift_att_chunk_decoder)\n\n\t\treturn self.chunk_outs\n\n\n\tdef split_chunk(self, x, x_len, chunk_outs):\n\t\t\"\"\"\n\t\t:param x: (b, t, d)\n\t\t:param x_length: (b)\n\t\t:param ind: int\n\t\t:return:\n\t\t\"\"\"\n\t\tx = x[:, :x_len.max(), :]\n\t\tb, t, d = x.size()\n\t\tx_len_mask = (~make_pad_mask(x_len, maxlen=t)).to(\n\t\t\tx.device)\n\t\tx *= x_len_mask[:, :, None]\n\n\t\tx_add_mask = self.get_x_add_mask(chunk_outs, x.device, dtype=x.dtype)\n\t\tx_len_chunk = self.get_x_len_chunk(chunk_outs, x_len.device, dtype=x_len.dtype)\n\t\tpad = (0, 0, self.pad_left_cur, 0)\n\t\tx = F.pad(x, pad, \"constant\", 0.0)\n\t\tb, t, d = x.size()\n\t\tx = torch.transpose(x, 1, 0)\n\t\tx = torch.reshape(x, [t, -1])\n\t\tx_chunk = torch.mm(x_add_mask, x)\n\t\tx_chunk = torch.reshape(x_chunk, [-1, b, d]).transpose(1, 0)\n\n\t\treturn x_chunk, x_len_chunk\n\n\tdef remove_chunk(self, x_chunk, x_len_chunk, chunk_outs):\n\t\tx_chunk = x_chunk[:, :x_len_chunk.max(), :]\n\t\tb, t, d = x_chunk.size()\n\t\tx_len_chunk_mask = (~make_pad_mask(x_len_chunk, maxlen=t)).to(\n\t\t\tx_chunk.device)\n\t\tx_chunk *= x_len_chunk_mask[:, :, None]\n\n\t\tx_rm_mask = self.get_x_rm_mask(chunk_outs, x_chunk.device, dtype=x_chunk.dtype)\n\t\tx_len = self.get_x_len(chunk_outs, x_len_chunk.device, dtype=x_len_chunk.dtype)\n\t\tx_chunk = torch.transpose(x_chunk, 1, 0)\n\t\tx_chunk = torch.reshape(x_chunk, [t, -1])\n\t\tx = torch.mm(x_rm_mask, x_chunk)\n\t\tx = torch.reshape(x, [-1, b, d]).transpose(1, 0)\n\n\t\treturn x, x_len\n\n\tdef get_x_add_mask(self, chunk_outs=None, device='cpu', idx=0, dtype=torch.float32):\n\t\twith torch.no_grad():\n\t\t\tx = chunk_outs[idx] if chunk_outs is not None else self.chunk_outs[idx]\n\t\t\tx = torch.from_numpy(x).type(dtype).to(device)\n\t\treturn x\n\n\tdef get_x_len_chunk(self, chunk_outs=None, device='cpu', idx=1, dtype=torch.float32):\n\t\twith torch.no_grad():\n\t\t\tx = chunk_outs[idx] if chunk_outs is not None else self.chunk_outs[idx]\n\t\t\tx = torch.from_numpy(x).type(dtype).to(device)\n\t\treturn x\n\n\n\tdef get_x_rm_mask(self, chunk_outs=None, device='cpu', idx=2, dtype=torch.float32):\n\t\twith torch.no_grad():\n\t\t\tx = chunk_outs[idx] if chunk_outs is not None else self.chunk_outs[idx]\n\t\t\tx = torch.from_numpy(x).type(dtype).to(device)\n\t\treturn x\n\n\tdef get_x_len(self, chunk_outs=None, device='cpu', idx=3, dtype=torch.float32):\n\t\twith torch.no_grad():\n\t\t\tx = chunk_outs[idx] if chunk_outs is not None else self.chunk_outs[idx]\n\t\t\tx = torch.from_numpy(x).type(dtype).to(device)\n\t\treturn x\n\n\n\tdef get_mask_shfit_chunk(self, chunk_outs=None, device='cpu', batch_size=1, num_units=1, idx=4, dtype=torch.float32):\n\t\twith torch.no_grad():\n\t\t\tx = chunk_outs[idx] if chunk_outs is not None else self.chunk_outs[idx]\n\t\t\tx = np.tile(x[None, :, :, ], [batch_size, 1, num_units])\n\t\t\tx = torch.from_numpy(x).type(dtype).to(device)\n\t\treturn x\n\n\tdef get_mask_chunk_predictor(self, chunk_outs=None, device='cpu', batch_size=1, num_units=1, idx=5, dtype=torch.float32):\n\t\twith torch.no_grad():\n\t\t\tx = chunk_outs[idx] if chunk_outs is not None else self.chunk_outs[idx]\n\t\t\tx = np.tile(x[None, :, :, ], [batch_size, 1, num_units])\n\t\t\tx = torch.from_numpy(x).type(dtype).to(device)\n\t\treturn x\n\n\tdef get_mask_att_chunk_encoder(self, chunk_outs=None, device='cpu', batch_size=1, idx=6, dtype=torch.float32):\n\t\twith torch.no_grad():\n\t\t\tx = chunk_outs[idx] if chunk_outs is not None else self.chunk_outs[idx]\n\t\t\tx = np.tile(x[None, :, :, ], [batch_size, 1, 1])\n\t\t\tx = torch.from_numpy(x).type(dtype).to(device)\n\t\treturn x\n\n\tdef get_mask_shift_att_chunk_decoder(self, chunk_outs=None, device='cpu', batch_size=1, idx=7, dtype=torch.float32):\n\t\twith torch.no_grad():\n\t\t\tx = chunk_outs[idx] if chunk_outs is not None else self.chunk_outs[idx]\n\t\t\tx = np.tile(x[None, None, :, 0], [batch_size, 1, 1])\n\t\t\tx = torch.from_numpy(x).type(dtype).to(device)\n\t\treturn x" }, { "identifier": "make_pad_mask", "path": "funcodec/modules/nets_utils.py", "snippet": "def make_pad_mask(lengths, xs=None, length_dim=-1, maxlen=None):\n \"\"\"Make mask tensor containing indices of padded part.\n\n Args:\n lengths (LongTensor or List): Batch of lengths (B,).\n xs (Tensor, optional): The reference tensor.\n If set, masks will be the same shape as this tensor.\n length_dim (int, optional): Dimension indicator of the above tensor.\n See the example.\n\n Returns:\n Tensor: Mask tensor containing indices of padded part.\n dtype=torch.uint8 in PyTorch 1.2-\n dtype=torch.bool in PyTorch 1.2+ (including 1.2)\n\n Examples:\n With only lengths.\n\n >>> lengths = [5, 3, 2]\n >>> make_pad_mask(lengths)\n masks = [[0, 0, 0, 0 ,0],\n [0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]]\n\n With the reference tensor.\n\n >>> xs = torch.zeros((3, 2, 4))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0],\n [0, 0, 0, 0]],\n [[0, 0, 0, 1],\n [0, 0, 0, 1]],\n [[0, 0, 1, 1],\n [0, 0, 1, 1]]], dtype=torch.uint8)\n >>> xs = torch.zeros((3, 2, 6))\n >>> make_pad_mask(lengths, xs)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n\n With the reference tensor and dimension indicator.\n\n >>> xs = torch.zeros((3, 6, 6))\n >>> make_pad_mask(lengths, xs, 1)\n tensor([[[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]],\n [[0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1]]], dtype=torch.uint8)\n >>> make_pad_mask(lengths, xs, 2)\n tensor([[[0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1],\n [0, 0, 0, 0, 0, 1]],\n [[0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1],\n [0, 0, 0, 1, 1, 1]],\n [[0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1],\n [0, 0, 1, 1, 1, 1]]], dtype=torch.uint8)\n\n \"\"\"\n if length_dim == 0:\n raise ValueError(\"length_dim cannot be 0: {}\".format(length_dim))\n\n if not isinstance(lengths, list):\n lengths = lengths.tolist()\n bs = int(len(lengths))\n if maxlen is None:\n if xs is None:\n maxlen = int(max(lengths))\n else:\n maxlen = xs.size(length_dim)\n else:\n assert xs is None\n assert maxlen >= int(max(lengths))\n\n seq_range = torch.arange(0, maxlen, dtype=torch.int64)\n seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)\n seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n\n if xs is not None:\n assert xs.size(0) == bs, (xs.size(0), bs)\n\n if length_dim < 0:\n length_dim = xs.dim() + length_dim\n # ind = (:, None, ..., None, :, , None, ..., None)\n ind = tuple(\n slice(None) if i in (0, length_dim) else None for i in range(xs.dim())\n )\n mask = mask[ind].expand_as(xs).to(xs.device)\n return mask" }, { "identifier": "MultiHeadedAttention", "path": "funcodec/modules/attention.py", "snippet": "class MultiHeadedAttention(nn.Module):\n \"\"\"Multi-Head Attention layer.\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate):\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\n super(MultiHeadedAttention, self).__init__()\n assert n_feat % n_head == 0\n # We assume d_v always equals d_k\n self.d_k = n_feat // n_head\n self.h = n_head\n self.linear_q = nn.Linear(n_feat, n_feat)\n self.linear_k = nn.Linear(n_feat, n_feat)\n self.linear_v = nn.Linear(n_feat, n_feat)\n self.linear_out = nn.Linear(n_feat, n_feat)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout_rate)\n\n def forward_qkv(self, query, key, value):\n \"\"\"Transform query, key and value.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n\n Returns:\n torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).\n torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).\n torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).\n\n \"\"\"\n n_batch = query.size(0)\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\n\n return q, k, v\n\n def forward_attention(self, value, scores, mask):\n \"\"\"Compute attention context vector.\n\n Args:\n value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).\n scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).\n mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Transformed value (#batch, time1, d_model)\n weighted by the attention score (#batch, time1, time2).\n\n \"\"\"\n n_batch = value.size(0)\n if mask is not None:\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\n min_value = float(\n numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min\n )\n scores = scores.masked_fill(mask, min_value)\n self.attn = torch.softmax(scores, dim=-1).masked_fill(\n mask, 0.0\n ) # (batch, head, time1, time2)\n else:\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n\n p_attn = self.dropout(self.attn)\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\n x = (\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\n ) # (batch, time1, d_model)\n\n return self.linear_out(x) # (batch, time1, d_model)\n\n def forward(self, query, key, value, mask):\n \"\"\"Compute scaled dot product attention.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\n return self.forward_attention(v, scores, mask)" }, { "identifier": "MultiHeadedAttentionSANM", "path": "funcodec/modules/attention.py", "snippet": "class MultiHeadedAttentionSANM(nn.Module):\n \"\"\"Multi-Head Attention layer.\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, n_head, in_feat, n_feat, dropout_rate, kernel_size, sanm_shfit=0):\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\n super(MultiHeadedAttentionSANM, self).__init__()\n assert n_feat % n_head == 0\n # We assume d_v always equals d_k\n self.d_k = n_feat // n_head\n self.h = n_head\n # self.linear_q = nn.Linear(n_feat, n_feat)\n # self.linear_k = nn.Linear(n_feat, n_feat)\n # self.linear_v = nn.Linear(n_feat, n_feat)\n self.linear_out = nn.Linear(n_feat, n_feat)\n self.linear_q_k_v = nn.Linear(in_feat, n_feat * 3)\n self.attn = None\n self.dropout = nn.Dropout(p=dropout_rate)\n\n self.fsmn_block = nn.Conv1d(n_feat, n_feat, kernel_size, stride=1, padding=0, groups=n_feat, bias=False)\n # padding\n left_padding = (kernel_size - 1) // 2\n if sanm_shfit > 0:\n left_padding = left_padding + sanm_shfit\n right_padding = kernel_size - 1 - left_padding\n self.pad_fn = nn.ConstantPad1d((left_padding, right_padding), 0.0)\n\n def forward_fsmn(self, inputs, mask, mask_shfit_chunk=None):\n b, t, d = inputs.size()\n if mask is not None:\n mask = torch.reshape(mask, (b, -1, 1))\n if mask_shfit_chunk is not None:\n mask = mask * mask_shfit_chunk\n\n inputs = inputs * mask\n x = inputs.transpose(1, 2)\n x = self.pad_fn(x)\n x = self.fsmn_block(x)\n x = x.transpose(1, 2)\n x += inputs\n x = self.dropout(x)\n return x * mask\n\n def forward_qkv(self, x):\n \"\"\"Transform query, key and value.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n\n Returns:\n torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).\n torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).\n torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).\n\n \"\"\"\n b, t, d = x.size()\n q_k_v = self.linear_q_k_v(x)\n q, k, v = torch.split(q_k_v, int(self.h * self.d_k), dim=-1)\n q_h = torch.reshape(q, (b, t, self.h, self.d_k)).transpose(1, 2) # (batch, head, time1, d_k)\n k_h = torch.reshape(k, (b, t, self.h, self.d_k)).transpose(1, 2) # (batch, head, time2, d_k)\n v_h = torch.reshape(v, (b, t, self.h, self.d_k)).transpose(1, 2) # (batch, head, time2, d_k)\n\n return q_h, k_h, v_h, v\n\n def forward_attention(self, value, scores, mask, mask_att_chunk_encoder=None):\n \"\"\"Compute attention context vector.\n\n Args:\n value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).\n scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).\n mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Transformed value (#batch, time1, d_model)\n weighted by the attention score (#batch, time1, time2).\n\n \"\"\"\n n_batch = value.size(0)\n if mask is not None:\n if mask_att_chunk_encoder is not None:\n mask = mask * mask_att_chunk_encoder\n\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\n\n min_value = float(\n numpy.finfo(torch.tensor(0, dtype=scores.dtype).numpy().dtype).min\n )\n scores = scores.masked_fill(mask, min_value)\n self.attn = torch.softmax(scores, dim=-1).masked_fill(\n mask, 0.0\n ) # (batch, head, time1, time2)\n else:\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n\n p_attn = self.dropout(self.attn)\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\n x = (\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\n ) # (batch, time1, d_model)\n\n return self.linear_out(x) # (batch, time1, d_model)\n\n def forward(self, x, mask, mask_shfit_chunk=None, mask_att_chunk_encoder=None):\n \"\"\"Compute scaled dot product attention.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n\n \"\"\"\n q_h, k_h, v_h, v = self.forward_qkv(x)\n fsmn_memory = self.forward_fsmn(v, mask, mask_shfit_chunk)\n q_h = q_h * self.d_k ** (-0.5)\n scores = torch.matmul(q_h, k_h.transpose(-2, -1))\n att_outs = self.forward_attention(v_h, scores, mask, mask_att_chunk_encoder)\n return att_outs + fsmn_memory" }, { "identifier": "SinusoidalPositionEncoder", "path": "funcodec/modules/embedding.py", "snippet": "class SinusoidalPositionEncoder(torch.nn.Module):\n '''\n\n '''\n def __int__(self, d_model=80, dropout_rate=0.1):\n pass\n\n def encode(self, positions: torch.Tensor = None, depth: int = None, dtype: torch.dtype = torch.float32):\n batch_size = positions.size(0)\n positions = positions.type(dtype)\n log_timescale_increment = torch.log(torch.tensor([10000], dtype=dtype)) / (depth / 2 - 1)\n inv_timescales = torch.exp(torch.arange(depth / 2).type(dtype) * (-log_timescale_increment))\n inv_timescales = torch.reshape(inv_timescales, [batch_size, -1])\n scaled_time = torch.reshape(positions, [1, -1, 1]) * torch.reshape(inv_timescales, [1, 1, -1])\n encoding = torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=2)\n return encoding.type(dtype)\n\n def forward(self, x):\n batch_size, timesteps, input_dim = x.size()\n positions = torch.arange(1, timesteps+1)[None, :]\n position_encoding = self.encode(positions, input_dim, x.dtype).to(x.device)\n\n return x + position_encoding" }, { "identifier": "LayerNorm", "path": "funcodec/modules/layer_norm.py", "snippet": "class LayerNorm(torch.nn.LayerNorm):\n \"\"\"Layer normalization module.\n\n Args:\n nout (int): Output dim size.\n dim (int): Dimension to be normalized.\n\n \"\"\"\n\n def __init__(self, nout, dim=-1):\n \"\"\"Construct an LayerNorm object.\"\"\"\n super(LayerNorm, self).__init__(nout, eps=1e-12)\n self.dim = dim\n\n def forward(self, x):\n \"\"\"Apply layer normalization.\n\n Args:\n x (torch.Tensor): Input tensor.\n\n Returns:\n torch.Tensor: Normalized tensor.\n\n \"\"\"\n if self.dim == -1:\n return super(LayerNorm, self).forward(x)\n return (\n super(LayerNorm, self)\n .forward(x.transpose(self.dim, -1))\n .transpose(self.dim, -1)\n )" }, { "identifier": "Conv1dLinear", "path": "funcodec/modules/multi_layer_conv.py", "snippet": "class Conv1dLinear(torch.nn.Module):\n \"\"\"Conv1D + Linear for Transformer block.\n\n A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.\n\n \"\"\"\n\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\n \"\"\"Initialize Conv1dLinear module.\n\n Args:\n in_chans (int): Number of input channels.\n hidden_chans (int): Number of hidden channels.\n kernel_size (int): Kernel size of conv1d.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n super(Conv1dLinear, self).__init__()\n self.w_1 = torch.nn.Conv1d(\n in_chans,\n hidden_chans,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n )\n self.w_2 = torch.nn.Linear(hidden_chans, in_chans)\n self.dropout = torch.nn.Dropout(dropout_rate)\n\n def forward(self, x):\n \"\"\"Calculate forward propagation.\n\n Args:\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\n\n Returns:\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\n\n \"\"\"\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\n return self.w_2(self.dropout(x))" }, { "identifier": "MultiLayeredConv1d", "path": "funcodec/modules/multi_layer_conv.py", "snippet": "class MultiLayeredConv1d(torch.nn.Module):\n \"\"\"Multi-layered conv1d for Transformer block.\n\n This is a module of multi-leyered conv1d designed\n to replace positionwise feed-forward network\n in Transforner block, which is introduced in\n `FastSpeech: Fast, Robust and Controllable Text to Speech`_.\n\n .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:\n https://arxiv.org/pdf/1905.09263.pdf\n\n \"\"\"\n\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\n \"\"\"Initialize MultiLayeredConv1d module.\n\n Args:\n in_chans (int): Number of input channels.\n hidden_chans (int): Number of hidden channels.\n kernel_size (int): Kernel size of conv1d.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n super(MultiLayeredConv1d, self).__init__()\n self.w_1 = torch.nn.Conv1d(\n in_chans,\n hidden_chans,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n )\n self.w_2 = torch.nn.Conv1d(\n hidden_chans,\n in_chans,\n kernel_size,\n stride=1,\n padding=(kernel_size - 1) // 2,\n )\n self.dropout = torch.nn.Dropout(dropout_rate)\n\n def forward(self, x):\n \"\"\"Calculate forward propagation.\n\n Args:\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\n\n Returns:\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\n\n \"\"\"\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\n return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)" }, { "identifier": "PositionwiseFeedForward", "path": "funcodec/modules/positionwise_feed_forward.py", "snippet": "class PositionwiseFeedForward(torch.nn.Module):\n \"\"\"Positionwise feed forward layer.\n\n Args:\n idim (int): Input dimenstion.\n hidden_units (int): The number of hidden units.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, idim, hidden_units, dropout_rate, activation=torch.nn.ReLU()):\n \"\"\"Construct an PositionwiseFeedForward object.\"\"\"\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = torch.nn.Linear(idim, hidden_units)\n self.w_2 = torch.nn.Linear(hidden_units, idim)\n self.dropout = torch.nn.Dropout(dropout_rate)\n self.activation = activation\n\n def forward(self, x):\n \"\"\"Forward function.\"\"\"\n return self.w_2(self.dropout(self.activation(self.w_1(x))))" }, { "identifier": "repeat", "path": "funcodec/modules/repeat.py", "snippet": "def repeat(N, fn):\n \"\"\"Repeat module N times.\n\n Args:\n N (int): Number of repeat time.\n fn (Callable): Function to generate module.\n\n Returns:\n MultiSequential: Repeated model instance.\n\n \"\"\"\n return MultiSequential(*[fn(n) for n in range(N)])" }, { "identifier": "Conv2dSubsampling", "path": "funcodec/modules/subsampling.py", "snippet": "class Conv2dSubsampling(torch.nn.Module):\n \"\"\"Convolutional 2D subsampling (to 1/4 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n\n \"\"\"\n\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\n \"\"\"Construct an Conv2dSubsampling object.\"\"\"\n super(Conv2dSubsampling, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim),\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\n )\n\n def forward(self, x, x_mask):\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 4.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 4.\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n if x_mask is None:\n return x, None\n return x, x_mask[:, :, :-2:2][:, :, :-2:2]\n\n def __getitem__(self, key):\n \"\"\"Get item.\n\n When reset_parameters() is called, if use_scaled_pos_enc is used,\n return the positioning encoding.\n\n \"\"\"\n if key != -1:\n raise NotImplementedError(\"Support only `-1` (for `reset_parameters`).\")\n return self.out[key]" }, { "identifier": "Conv2dSubsampling2", "path": "funcodec/modules/subsampling.py", "snippet": "class Conv2dSubsampling2(torch.nn.Module):\n \"\"\"Convolutional 2D subsampling (to 1/2 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n\n \"\"\"\n\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\n \"\"\"Construct an Conv2dSubsampling2 object.\"\"\"\n super(Conv2dSubsampling2, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 1),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * (((idim - 1) // 2 - 2)), odim),\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\n )\n\n def forward(self, x, x_mask):\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 2.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 2.\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n if x_mask is None:\n return x, None\n return x, x_mask[:, :, :-2:2][:, :, :-2:1]\n\n def __getitem__(self, key):\n \"\"\"Get item.\n\n When reset_parameters() is called, if use_scaled_pos_enc is used,\n return the positioning encoding.\n\n \"\"\"\n if key != -1:\n raise NotImplementedError(\"Support only `-1` (for `reset_parameters`).\")\n return self.out[key]" }, { "identifier": "Conv2dSubsampling6", "path": "funcodec/modules/subsampling.py", "snippet": "class Conv2dSubsampling6(torch.nn.Module):\n \"\"\"Convolutional 2D subsampling (to 1/6 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n\n \"\"\"\n\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\n \"\"\"Construct an Conv2dSubsampling6 object.\"\"\"\n super(Conv2dSubsampling6, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 5, 3),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3), odim),\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\n )\n\n def forward(self, x, x_mask):\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 6.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 6.\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n if x_mask is None:\n return x, None\n return x, x_mask[:, :, :-2:2][:, :, :-4:3]" }, { "identifier": "Conv2dSubsampling8", "path": "funcodec/modules/subsampling.py", "snippet": "class Conv2dSubsampling8(torch.nn.Module):\n \"\"\"Convolutional 2D subsampling (to 1/8 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n\n \"\"\"\n\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\n \"\"\"Construct an Conv2dSubsampling8 object.\"\"\"\n super(Conv2dSubsampling8, self).__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim),\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\n )\n\n def forward(self, x, x_mask):\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 8.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 8.\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n if x_mask is None:\n return x, None\n return x, x_mask[:, :, :-2:2][:, :, :-2:2][:, :, :-2:2]" }, { "identifier": "TooShortUttError", "path": "funcodec/modules/subsampling.py", "snippet": "class TooShortUttError(Exception):\n \"\"\"Raised when the utt is too short for subsampling.\n\n Args:\n message (str): Message for error catch\n actual_size (int): the short size that cannot pass the subsampling\n limit (int): the limit size for subsampling\n\n \"\"\"\n\n def __init__(self, message, actual_size, limit):\n \"\"\"Construct a TooShortUttError for error handler.\"\"\"\n super().__init__(message)\n self.actual_size = actual_size\n self.limit = limit" }, { "identifier": "check_short_utt", "path": "funcodec/modules/subsampling.py", "snippet": "def check_short_utt(ins, size):\n \"\"\"Check if the utterance is too short for subsampling.\"\"\"\n if isinstance(ins, Conv2dSubsampling2) and size < 3:\n return True, 3\n if isinstance(ins, Conv2dSubsampling) and size < 7:\n return True, 7\n if isinstance(ins, Conv2dSubsampling6) and size < 11:\n return True, 11\n if isinstance(ins, Conv2dSubsampling8) and size < 15:\n return True, 15\n return False, -1" }, { "identifier": "AbsEncoder", "path": "funcodec/models/encoder/abs_encoder.py", "snippet": "class AbsEncoder(torch.nn.Module, ABC):\n @abstractmethod\n def output_size(self) -> int:\n raise NotImplementedError\n\n @abstractmethod\n def forward(\n self,\n xs_pad: torch.Tensor,\n ilens: torch.Tensor,\n prev_states: torch.Tensor = None,\n ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]:\n raise NotImplementedError" } ]
from typing import List from typing import Optional from typing import Sequence from typing import Tuple from typing import Union from funcodec.modules.streaming_utils.chunk_utilis import overlap_chunk from funcodec.modules.nets_utils import make_pad_mask from funcodec.modules.attention import MultiHeadedAttention, MultiHeadedAttentionSANM from funcodec.modules.embedding import SinusoidalPositionEncoder from funcodec.modules.layer_norm import LayerNorm from funcodec.modules.multi_layer_conv import Conv1dLinear from funcodec.modules.multi_layer_conv import MultiLayeredConv1d from funcodec.modules.positionwise_feed_forward import ( PositionwiseFeedForward, # noqa: H301 ) from funcodec.modules.repeat import repeat from funcodec.modules.subsampling import Conv2dSubsampling from funcodec.modules.subsampling import Conv2dSubsampling2 from funcodec.modules.subsampling import Conv2dSubsampling6 from funcodec.modules.subsampling import Conv2dSubsampling8 from funcodec.modules.subsampling import TooShortUttError from funcodec.modules.subsampling import check_short_utt from funcodec.models.encoder.abs_encoder import AbsEncoder import logging import torch import torch.nn as nn import numpy as np
15,077
self.normalize_before = normalize_before if positionwise_layer_type == "linear": positionwise_layer = PositionwiseFeedForward positionwise_layer_args = ( output_size, linear_units, dropout_rate, ) elif positionwise_layer_type == "conv1d": positionwise_layer = MultiLayeredConv1d positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) elif positionwise_layer_type == "conv1d-linear": positionwise_layer = Conv1dLinear positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") if selfattention_layer_type == "selfattn": encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, output_size, attention_dropout_rate, ) elif selfattention_layer_type == "sanm": encoder_selfattn_layer = MultiHeadedAttentionSANM encoder_selfattn_layer_args0 = ( attention_heads, input_size, output_size, attention_dropout_rate, kernel_size, sanm_shfit, ) encoder_selfattn_layer_args = ( attention_heads, output_size, output_size, attention_dropout_rate, kernel_size, sanm_shfit, ) self.encoders0 = repeat( 1, lambda lnum: EncoderLayerSANM( input_size, output_size, encoder_selfattn_layer(*encoder_selfattn_layer_args0), positionwise_layer(*positionwise_layer_args), dropout_rate, normalize_before, concat_after, ), ) self.encoders = repeat( num_blocks-1, lambda lnum: EncoderLayerSANM( output_size, output_size, encoder_selfattn_layer(*encoder_selfattn_layer_args), positionwise_layer(*positionwise_layer_args), dropout_rate, normalize_before, concat_after, ), ) if self.normalize_before: self.after_norm = LayerNorm(output_size) self.interctc_layer_idx = interctc_layer_idx if len(interctc_layer_idx) > 0: assert 0 < min(interctc_layer_idx) and max(interctc_layer_idx) < num_blocks self.interctc_use_conditioning = interctc_use_conditioning self.conditioning_layer = None self.dropout = nn.Dropout(dropout_rate) self.tf2torch_tensor_name_prefix_torch = tf2torch_tensor_name_prefix_torch self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf def output_size(self) -> int: return self._output_size def forward( self, xs_pad: torch.Tensor, ilens: torch.Tensor, prev_states: torch.Tensor = None, ctc = None, ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: """Embed positions in tensor. Args: xs_pad: input tensor (B, L, D) ilens: input length (B) prev_states: Not to be used now. Returns: position embedded tensor and mask """ masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device) xs_pad = xs_pad * self.output_size()**0.5 if self.embed is None: xs_pad = xs_pad elif ( isinstance(self.embed, Conv2dSubsampling) or isinstance(self.embed, Conv2dSubsampling2) or isinstance(self.embed, Conv2dSubsampling6) or isinstance(self.embed, Conv2dSubsampling8) ):
class EncoderLayerSANM(nn.Module): def __init__( self, in_size, size, self_attn, feed_forward, dropout_rate, normalize_before=True, concat_after=False, stochastic_depth_rate=0.0, ): """Construct an EncoderLayer object.""" super(EncoderLayerSANM, self).__init__() self.self_attn = self_attn self.feed_forward = feed_forward self.norm1 = LayerNorm(in_size) self.norm2 = LayerNorm(size) self.dropout = nn.Dropout(dropout_rate) self.in_size = in_size self.size = size self.normalize_before = normalize_before self.concat_after = concat_after if self.concat_after: self.concat_linear = nn.Linear(size + size, size) self.stochastic_depth_rate = stochastic_depth_rate self.dropout_rate = dropout_rate def forward(self, x, mask, cache=None, mask_shfit_chunk=None, mask_att_chunk_encoder=None): """Compute encoded features. Args: x_input (torch.Tensor): Input tensor (#batch, time, size). mask (torch.Tensor): Mask tensor for the input (#batch, time). cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size). Returns: torch.Tensor: Output tensor (#batch, time, size). torch.Tensor: Mask tensor (#batch, time). """ skip_layer = False # with stochastic depth, residual connection `x + f(x)` becomes # `x <- x + 1 / (1 - p) * f(x)` at training time. stoch_layer_coeff = 1.0 if self.training and self.stochastic_depth_rate > 0: skip_layer = torch.rand(1).item() < self.stochastic_depth_rate stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate) if skip_layer: if cache is not None: x = torch.cat([cache, x], dim=1) return x, mask residual = x if self.normalize_before: x = self.norm1(x) if self.concat_after: x_concat = torch.cat((x, self.self_attn(x, mask, mask_shfit_chunk=mask_shfit_chunk, mask_att_chunk_encoder=mask_att_chunk_encoder)), dim=-1) if self.in_size == self.size: x = residual + stoch_layer_coeff * self.concat_linear(x_concat) else: x = stoch_layer_coeff * self.concat_linear(x_concat) else: if self.in_size == self.size: x = residual + stoch_layer_coeff * self.dropout( self.self_attn(x, mask, mask_shfit_chunk=mask_shfit_chunk, mask_att_chunk_encoder=mask_att_chunk_encoder) ) else: x = stoch_layer_coeff * self.dropout( self.self_attn(x, mask, mask_shfit_chunk=mask_shfit_chunk, mask_att_chunk_encoder=mask_att_chunk_encoder) ) if not self.normalize_before: x = self.norm1(x) residual = x if self.normalize_before: x = self.norm2(x) x = residual + stoch_layer_coeff * self.dropout(self.feed_forward(x)) if not self.normalize_before: x = self.norm2(x) return x, mask, cache, mask_shfit_chunk, mask_att_chunk_encoder class SANMEncoder(AbsEncoder): """ author: Speech Lab, Alibaba Group, China San-m: Memory equipped self-attention for end-to-end speech recognition https://arxiv.org/abs/2006.01713 """ def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: Optional[str] = "conv2d", pos_enc_class=SinusoidalPositionEncoder, normalize_before: bool = True, concat_after: bool = False, positionwise_layer_type: str = "linear", positionwise_conv_kernel_size: int = 1, padding_idx: int = -1, interctc_layer_idx: List[int] = [], interctc_use_conditioning: bool = False, kernel_size : int = 11, sanm_shfit : int = 0, selfattention_layer_type: str = "sanm", tf2torch_tensor_name_prefix_torch: str = "encoder", tf2torch_tensor_name_prefix_tf: str = "seq2seq/encoder", ): super().__init__() self._output_size = output_size if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(input_size, output_size), torch.nn.LayerNorm(output_size), torch.nn.Dropout(dropout_rate), torch.nn.ReLU(), pos_enc_class(output_size, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling(input_size, output_size, dropout_rate) elif input_layer == "conv2d2": self.embed = Conv2dSubsampling2(input_size, output_size, dropout_rate) elif input_layer == "conv2d6": self.embed = Conv2dSubsampling6(input_size, output_size, dropout_rate) elif input_layer == "conv2d8": self.embed = Conv2dSubsampling8(input_size, output_size, dropout_rate) elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(input_size, output_size, padding_idx=padding_idx), SinusoidalPositionEncoder(), ) elif input_layer is None: if input_size == output_size: self.embed = None else: self.embed = torch.nn.Linear(input_size, output_size) elif input_layer == "pe": self.embed = SinusoidalPositionEncoder() else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before if positionwise_layer_type == "linear": positionwise_layer = PositionwiseFeedForward positionwise_layer_args = ( output_size, linear_units, dropout_rate, ) elif positionwise_layer_type == "conv1d": positionwise_layer = MultiLayeredConv1d positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) elif positionwise_layer_type == "conv1d-linear": positionwise_layer = Conv1dLinear positionwise_layer_args = ( output_size, linear_units, positionwise_conv_kernel_size, dropout_rate, ) else: raise NotImplementedError("Support only linear or conv1d.") if selfattention_layer_type == "selfattn": encoder_selfattn_layer = MultiHeadedAttention encoder_selfattn_layer_args = ( attention_heads, output_size, attention_dropout_rate, ) elif selfattention_layer_type == "sanm": encoder_selfattn_layer = MultiHeadedAttentionSANM encoder_selfattn_layer_args0 = ( attention_heads, input_size, output_size, attention_dropout_rate, kernel_size, sanm_shfit, ) encoder_selfattn_layer_args = ( attention_heads, output_size, output_size, attention_dropout_rate, kernel_size, sanm_shfit, ) self.encoders0 = repeat( 1, lambda lnum: EncoderLayerSANM( input_size, output_size, encoder_selfattn_layer(*encoder_selfattn_layer_args0), positionwise_layer(*positionwise_layer_args), dropout_rate, normalize_before, concat_after, ), ) self.encoders = repeat( num_blocks-1, lambda lnum: EncoderLayerSANM( output_size, output_size, encoder_selfattn_layer(*encoder_selfattn_layer_args), positionwise_layer(*positionwise_layer_args), dropout_rate, normalize_before, concat_after, ), ) if self.normalize_before: self.after_norm = LayerNorm(output_size) self.interctc_layer_idx = interctc_layer_idx if len(interctc_layer_idx) > 0: assert 0 < min(interctc_layer_idx) and max(interctc_layer_idx) < num_blocks self.interctc_use_conditioning = interctc_use_conditioning self.conditioning_layer = None self.dropout = nn.Dropout(dropout_rate) self.tf2torch_tensor_name_prefix_torch = tf2torch_tensor_name_prefix_torch self.tf2torch_tensor_name_prefix_tf = tf2torch_tensor_name_prefix_tf def output_size(self) -> int: return self._output_size def forward( self, xs_pad: torch.Tensor, ilens: torch.Tensor, prev_states: torch.Tensor = None, ctc = None, ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: """Embed positions in tensor. Args: xs_pad: input tensor (B, L, D) ilens: input length (B) prev_states: Not to be used now. Returns: position embedded tensor and mask """ masks = (~make_pad_mask(ilens)[:, None, :]).to(xs_pad.device) xs_pad = xs_pad * self.output_size()**0.5 if self.embed is None: xs_pad = xs_pad elif ( isinstance(self.embed, Conv2dSubsampling) or isinstance(self.embed, Conv2dSubsampling2) or isinstance(self.embed, Conv2dSubsampling6) or isinstance(self.embed, Conv2dSubsampling8) ):
short_status, limit_size = check_short_utt(self.embed, xs_pad.size(1))
15
2023-10-07 02:00:40+00:00
24k
Beckschen/3D-TransUNet
nn_transunet/trainer/nnUNetTrainerV2.py
[ { "identifier": "get_moreDA_augmentation", "path": "nn_transunet/data/data_augmentation_moreDA.py", "snippet": "def get_moreDA_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params,\n border_val_seg=-1,\n seeds_train=None, seeds_val=None, order_seg=1, order_data=3, deep_supervision_scales=None,\n soft_ds=False,\n classes=None, pin_memory=True, regions=None,\n use_nondetMultiThreadedAugmenter: bool = False,\n is_spatial_aug_only=False, reclip=None):\n\n # default_3D_augmentation_params: {'selected_data_channels': None, 'selected_seg_channels': [0], 'do_elastic': False, 'elastic_deform_alpha': (0.0, 900.0), 'elastic_deform_sigma': (9.0, 13.0), 'p_eldef': 0.2, 'do_scaling': True, 'scale_range': (0.7, 1.4), 'independent_scale_factor_for_each_axis': False, 'p_independent_scale_per_axis': 1, 'p_scale': 0.2, 'do_rotation': True, 'rotation_x': (-0.5235987755982988, 0.5235987755982988), 'rotation_y': (-0.5235987755982988, 0.5235987755982988), 'rotation_z': (-0.5235987755982988, 0.5235987755982988), 'rotation_p_per_axis': 1, 'p_rot': 0.2, 'random_crop': False, 'random_crop_dist_to_border': None, 'do_gamma': True, 'gamma_retain_stats': True, 'gamma_range': (0.7, 1.5), 'p_gamma': 0.3, 'do_mirror': True, 'mirror_axes': (0, 1, 2), 'dummy_2D': False, 'mask_was_used_for_normalization': OrderedDict([(0, False)]), 'border_mode_data': 'constant', 'all_segmentation_labels': None, 'move_last_seg_chanel_to_data': False, 'cascade_do_cascade_augmentations': False, 'cascade_random_binary_transform_p': 0.4, 'cascade_random_binary_transform_p_per_label': 1, 'cascade_random_binary_transform_size': (1, 8), 'cascade_remove_conn_comp_p': 0.2, 'cascade_remove_conn_comp_max_size_percent_threshold': 0.15, 'cascade_remove_conn_comp_fill_with_other_class_p': 0.0, 'do_additive_brightness': False, 'additive_brightness_p_per_sample': 0.15, 'additive_brightness_p_per_channel': 0.5, 'additive_brightness_mu': 0.0, 'additive_brightness_sigma': 0.1, 'num_threads': 12, 'num_cached_per_thread': 2, 'patch_size_for_spatialtransform': [64, 128, 128]} \n\n assert params.get('mirror') is None, \"old version of params, use new keyword do_mirror\"\n\n tr_transforms = []\n\n\n if params.get(\"selected_data_channels\") is not None:\n tr_transforms.append(DataChannelSelectionTransform(params.get(\"selected_data_channels\")))\n\n if params.get(\"selected_seg_channels\") is not None:\n tr_transforms.append(SegChannelSelectionTransform(params.get(\"selected_seg_channels\")))\n\n # don't do color augmentations while in 2d mode with 3d data because the color channel is overloaded!!\n if params.get(\"dummy_2D\") is not None and params.get(\"dummy_2D\"):\n ignore_axes = (0,)\n tr_transforms.append(Convert3DTo2DTransform())\n patch_size_spatial = patch_size[1:]\n else:\n patch_size_spatial = patch_size\n ignore_axes = None\n\n tr_transforms.append(SpatialTransform(\n patch_size_spatial, patch_center_dist_from_border=None,\n do_elastic_deform=params.get(\"do_elastic\"), alpha=params.get(\"elastic_deform_alpha\"),\n sigma=params.get(\"elastic_deform_sigma\"),\n do_rotation=params.get(\"do_rotation\"), angle_x=params.get(\"rotation_x\"), angle_y=params.get(\"rotation_y\"),\n angle_z=params.get(\"rotation_z\"), p_rot_per_axis=params.get(\"rotation_p_per_axis\"),\n do_scale=params.get(\"do_scaling\"), scale=params.get(\"scale_range\"),\n border_mode_data=params.get(\"border_mode_data\"), border_cval_data=0, order_data=order_data,\n border_mode_seg=\"constant\", border_cval_seg=border_val_seg,\n order_seg=order_seg, random_crop=params.get(\"random_crop\"), p_el_per_sample=params.get(\"p_eldef\"),\n p_scale_per_sample=params.get(\"p_scale\"), p_rot_per_sample=params.get(\"p_rot\"),\n independent_scale_for_each_axis=params.get(\"independent_scale_factor_for_each_axis\")\n ))\n\n if params.get(\"dummy_2D\"):\n tr_transforms.append(Convert2DTo3DTransform())\n\n # we need to put the color augmentations after the dummy 2d part (if applicable). Otherwise the overloaded color\n # channel gets in the way\n tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1)) # a kind of noise transform\n tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2, p_per_channel=0.5))\n tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))\n\n if params.get(\"do_additive_brightness\"):\n tr_transforms.append(BrightnessTransform(params.get(\"additive_brightness_mu\"),\n params.get(\"additive_brightness_sigma\"),\n True, p_per_sample=params.get(\"additive_brightness_p_per_sample\"),\n p_per_channel=params.get(\"additive_brightness_p_per_channel\")))\n\n tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))\n tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,\n p_per_channel=0.5,\n order_downsample=0, order_upsample=3, p_per_sample=0.25,\n ignore_axes=ignore_axes))\n tr_transforms.append(\n GammaTransform(params.get(\"gamma_range\"), True, True, retain_stats=params.get(\"gamma_retain_stats\"),\n p_per_sample=0.1)) # inverted gamma, a kind of color transform\n\n if params.get(\"do_gamma\"):\n tr_transforms.append(\n GammaTransform(params.get(\"gamma_range\"), False, True, retain_stats=params.get(\"gamma_retain_stats\"),\n p_per_sample=params[\"p_gamma\"]))\n if params.get(\"do_mirror\") or params.get(\"mirror\"):\n tr_transforms.append(MirrorTransform(params.get(\"mirror_axes\")))\n\n if params.get(\"mask_was_used_for_normalization\") is not None:\n mask_was_used_for_normalization = params.get(\"mask_was_used_for_normalization\")\n tr_transforms.append(MaskTransform(mask_was_used_for_normalization, mask_idx_in_seg=0, set_outside_to=0))\n # Replaces all pixels in data_dict[input_key] that have value remove_label with replace_with and saves the result to data_dict[output_key]\n tr_transforms.append(RemoveLabelTransform(-1, 0))\n\n if params.get(\"move_last_seg_chanel_to_data\") is not None and params.get(\"move_last_seg_chanel_to_data\"): # only used for cascade\n print(\"only used for cascaded!\")\n raise NotImplementedError\n\n tr_transforms.append(RenameTransform('seg', 'target', True))\n\n if regions is not None:\n tr_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))\n\n if deep_supervision_scales is not None:\n if soft_ds:\n assert classes is not None\n tr_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))\n else:\n tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target',\n output_key='target'))\n\n tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))\n tr_transforms = Compose(tr_transforms)\n\n if use_nondetMultiThreadedAugmenter:\n if NonDetMultiThreadedAugmenter is None:\n raise RuntimeError('NonDetMultiThreadedAugmenter is not yet available')\n batchgenerator_train = NonDetMultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'),\n params.get(\"num_cached_per_thread\"), seeds=seeds_train,\n pin_memory=pin_memory)\n else:\n batchgenerator_train = MultiThreadedAugmenter(dataloader_train, tr_transforms, params.get('num_threads'),\n params.get(\"num_cached_per_thread\"),\n seeds=seeds_train, pin_memory=pin_memory)\n # batchgenerator_train = SingleThreadedAugmenter(dataloader_train, tr_transforms)\n # import IPython;IPython.embed()\n\n val_transforms = []\n val_transforms.append(RemoveLabelTransform(-1, 0))\n if params.get(\"selected_data_channels\") is not None:\n val_transforms.append(DataChannelSelectionTransform(params.get(\"selected_data_channels\")))\n if params.get(\"selected_seg_channels\") is not None:\n val_transforms.append(SegChannelSelectionTransform(params.get(\"selected_seg_channels\")))\n\n if params.get(\"move_last_seg_chanel_to_data\") is not None and params.get(\"move_last_seg_chanel_to_data\"):\n print(\"only used for cascaded!\")\n raise NotImplementedError\n # val_transforms.append(MoveSegAsOneHotToData(1, params.get(\"all_segmentation_labels\"), 'seg', 'data'))\n\n\n val_transforms.append(RenameTransform('seg', 'target', True))\n\n if regions is not None:\n val_transforms.append(ConvertSegmentationToRegionsTransform(regions, 'target', 'target'))\n\n if deep_supervision_scales is not None:\n if soft_ds:\n assert classes is not None\n val_transforms.append(DownsampleSegForDSTransform3(deep_supervision_scales, 'target', 'target', classes))\n else:\n val_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target',\n output_key='target'))\n\n val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))\n val_transforms = Compose(val_transforms)\n\n if use_nondetMultiThreadedAugmenter:\n if NonDetMultiThreadedAugmenter is None:\n raise RuntimeError('NonDetMultiThreadedAugmenter is not yet available')\n batchgenerator_val = NonDetMultiThreadedAugmenter(dataloader_val, val_transforms,\n max(params.get('num_threads') // 2, 1),\n params.get(\"num_cached_per_thread\"),\n seeds=seeds_val, pin_memory=pin_memory)\n else:\n batchgenerator_val = MultiThreadedAugmenter(dataloader_val, val_transforms,\n max(params.get('num_threads') // 2, 1),\n params.get(\"num_cached_per_thread\"),\n seeds=seeds_val, pin_memory=pin_memory)\n # batchgenerator_val = SingleThreadedAugmenter(dataloader_val, val_transforms)\n return batchgenerator_train, batchgenerator_val" }, { "identifier": "MultipleOutputLoss2", "path": "nn_transunet/trainer/loss_functions.py", "snippet": "class MultipleOutputLoss2(nn.Module):\n def __init__(self, loss, weight_factors=None):\n \"\"\"\n use this if you have several outputs and ground truth (both list of same len) and the loss should be computed\n between them (x[0] and y[0], x[1] and y[1] etc)\n :param loss:\n :param weight_factors:\n \"\"\"\n super(MultipleOutputLoss2, self).__init__()\n self.weight_factors = weight_factors\n self.loss = loss\n\n def forward(self, x, y):\n assert isinstance(x, (tuple, list)), \"x must be either tuple or list\"\n assert isinstance(y, (tuple, list)), \"y must be either tuple or list\"\n if self.weight_factors is None:\n weights = [1] * len(x)\n else:\n weights = self.weight_factors\n\n l = weights[0] * self.loss(x[0], y[0])\n for i in range(1, len(x)):\n if weights[i] != 0:\n l += weights[i] * self.loss(x[i], y[i])\n return l" }, { "identifier": "maybe_to_torch", "path": "nn_transunet/trainer/network_trainer.py", "snippet": "def maybe_to_torch(d):\n if isinstance(d, list):\n d = [maybe_to_torch(i) if not isinstance(i, torch.Tensor) else i for i in d]\n elif not isinstance(d, torch.Tensor):\n d = torch.from_numpy(d).float()\n return d" }, { "identifier": "to_cuda", "path": "nn_transunet/trainer/network_trainer.py", "snippet": "def to_cuda(data, non_blocking=True, gpu_id=0):\n if isinstance(data, list):\n data = [i.cuda(gpu_id, non_blocking=non_blocking) for i in data]\n else:\n data = data.cuda(gpu_id, non_blocking=non_blocking)\n return data" }, { "identifier": "nnUNetTrainer", "path": "nn_transunet/trainer/nnUNetTrainer.py", "snippet": "class nnUNetTrainer(NetworkTrainer):\n def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,\n unpack_data=True, deterministic=True, fp16=False):\n \"\"\"\n :param deterministic:\n :param fold: can be either [0 ... 5) for cross-validation, 'all' to train on all available training data or\n None if you wish to load some checkpoint and do inference only\n :param plans_file: the pkl file generated by preprocessing. This file will determine all design choices\n :param subfolder_with_preprocessed_data: must be a subfolder of dataset_directory (just the name of the folder,\n not the entire path). This is where the preprocessed data lies that will be used for network training. We made\n this explicitly available so that differently preprocessed data can coexist and the user can choose what to use.\n Can be None if you are doing inference only.\n :param output_folder: where to store parameters, plot progress and to the validation\n :param dataset_directory: the parent directory in which the preprocessed Task data is stored. This is required\n because the split information is stored in this directory. For running prediction only this input is not\n required and may be set to None\n :param batch_dice: compute dice loss for each sample and average over all samples in the batch or pretend the\n batch is a pseudo volume?\n :param stage: The plans file may contain several stages (used for lowres / highres / pyramid). Stage must be\n specified for training:\n if stage 1 exists then stage 1 is the high resolution stage, otherwise it's 0\n :param unpack_data: if False, npz preprocessed data will not be unpacked to npy. This consumes less space but\n is considerably slower! Running unpack_data=False with 2d should never be done!\n\n IMPORTANT: If you inherit from nnUNetTrainer and the init args change then you need to redefine self.init_args\n in your init accordingly. Otherwise checkpoints won't load properly!\n \"\"\"\n super(nnUNetTrainer, self).__init__(deterministic, fp16)\n self.unpack_data = unpack_data\n self.init_args = (plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data,\n deterministic, fp16)\n # set through arguments from init\n self.stage = stage\n self.experiment_name = self.__class__.__name__\n self.plans_file = plans_file\n self.output_folder = output_folder\n self.dataset_directory = dataset_directory\n self.output_folder_base = self.output_folder\n self.fold = fold\n self.pin_memory = True\n\n self.plans = None\n\n # if we are running inference only then the self.dataset_directory is set (due to checkpoint loading) but it\n # irrelevant\n if self.dataset_directory is not None and isdir(self.dataset_directory):\n self.gt_niftis_folder = join(\n self.dataset_directory, \"gt_segmentations\")\n else:\n self.gt_niftis_folder = None\n\n self.folder_with_preprocessed_data = None\n\n # set in self.initialize()\n\n self.dl_tr = self.dl_val = None\n self.num_input_channels = self.num_classes = self.net_pool_per_axis = self.patch_size = self.batch_size = \\\n self.threeD = self.base_num_features = self.intensity_properties = self.normalization_schemes = \\\n self.net_num_pool_op_kernel_sizes = self.net_conv_kernel_sizes = None # loaded automatically from plans_file\n\n self.basic_generator_patch_size = self.data_aug_params = self.transpose_forward = self.transpose_backward = None\n\n self.batch_dice = batch_dice\n self.loss = DC_and_CE_loss(\n {'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {})\n # self.loss = PartiallyCrossEntropyLoss()\n\n self.online_eval_foreground_dc = []\n self.online_eval_tp = []\n self.online_eval_fp = []\n self.online_eval_fn = []\n\n self.classes = self.do_dummy_2D_aug = self.use_mask_for_norm = self.only_keep_largest_connected_component = \\\n self.min_region_size_per_class = self.min_size_per_class = None\n\n self.inference_pad_border_mode = \"constant\"\n self.inference_pad_kwargs = {'constant_values': 0}\n\n self.update_fold(fold)\n self.pad_all_sides = None\n\n self.lr_scheduler_eps = 1e-3\n self.lr_scheduler_patience = 30\n self.initial_lr = 1e-2\n # self.initial_lr = 1e-3\n self.weight_decay = 3e-5\n\n self.oversample_foreground_percent = 0.33\n\n self.conv_per_stage = None\n self.regions_class_order = None\n\n def update_fold(self, fold):\n \"\"\"\n used to swap between folds for inference (ensemble of models from cross-validation)\n DO NOT USE DURING TRAINING AS THIS WILL NOT UPDATE THE DATASET SPLIT AND THE DATA AUGMENTATION GENERATORS\n :param fold:\n :return:\n \"\"\"\n if fold is not None:\n if isinstance(fold, str):\n assert fold.startswith(\"all\"), \"if self.fold is a string then it must be \\'all\\'\"\n # assert fold == \"all\", \"if self.fold is a string then it must be \\'all\\'\"\n if self.output_folder.endswith(\"%s\" % str(self.fold)):\n self.output_folder = self.output_folder_base\n self.output_folder = join(self.output_folder, \"%s\" % str(fold))\n else:\n if self.output_folder.endswith(\"fold_%s\" % str(self.fold)):\n self.output_folder = self.output_folder_base\n self.output_folder = join(\n self.output_folder, \"fold_%s\" % str(fold))\n self.fold = fold\n\n def setup_DA_params(self):\n if self.threeD:\n self.data_aug_params = default_3D_augmentation_params\n if self.do_dummy_2D_aug:\n self.data_aug_params[\"dummy_2D\"] = True\n self.print_to_log_file(\"Using dummy2d data augmentation\")\n self.data_aug_params[\"elastic_deform_alpha\"] = \\\n default_2D_augmentation_params[\"elastic_deform_alpha\"]\n self.data_aug_params[\"elastic_deform_sigma\"] = \\\n default_2D_augmentation_params[\"elastic_deform_sigma\"]\n self.data_aug_params[\"rotation_x\"] = default_2D_augmentation_params[\"rotation_x\"]\n else:\n self.do_dummy_2D_aug = False\n if max(self.patch_size) / min(self.patch_size) > 1.5:\n default_2D_augmentation_params['rotation_x'] = (\n -15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi)\n self.data_aug_params = default_2D_augmentation_params\n self.data_aug_params[\"mask_was_used_for_normalization\"] = self.use_mask_for_norm\n\n if self.do_dummy_2D_aug:\n self.basic_generator_patch_size = get_patch_size(self.patch_size[1:],\n self.data_aug_params['rotation_x'],\n self.data_aug_params['rotation_y'],\n self.data_aug_params['rotation_z'],\n self.data_aug_params['scale_range'])\n self.basic_generator_patch_size = np.array(\n [self.patch_size[0]] + list(self.basic_generator_patch_size))\n else:\n self.basic_generator_patch_size = get_patch_size(self.patch_size, self.data_aug_params['rotation_x'],\n self.data_aug_params['rotation_y'],\n self.data_aug_params['rotation_z'],\n self.data_aug_params['scale_range'])\n\n self.data_aug_params['selected_seg_channels'] = [0]\n self.data_aug_params['patch_size_for_spatialtransform'] = self.patch_size\n\n def initialize(self, training=True, force_load_plans=False):\n \"\"\"\n For prediction of test cases just set training=False, this will prevent loading of training data and\n training batchgenerator initialization\n :param training:\n :return:\n \"\"\"\n\n maybe_mkdir_p(self.output_folder)\n\n if force_load_plans or (self.plans is None):\n self.load_plans_file()\n \n self.process_plans(self.plans)\n\n self.setup_DA_params()\n\n if training:\n self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] +\n \"_stage%d\" % self.stage)\n\n self.dl_tr, self.dl_val = self.get_basic_generators()\n if self.unpack_data:\n self.print_to_log_file(\"unpacking dataset\")\n unpack_dataset(self.folder_with_preprocessed_data)\n self.print_to_log_file(\"done\")\n else:\n self.print_to_log_file(\n \"INFO: Not unpacking data! Training may be slow due to that. Pray you are not using 2d or you \"\n \"will wait all winter for your model to finish!\")\n \n self.tr_gen, self.val_gen = get_default_augmentation(self.dl_tr, self.dl_val,\n self.data_aug_params[\n 'patch_size_for_spatialtransform'],\n self.data_aug_params)\n self.print_to_log_file(\"TRAINING KEYS:\\n %s\" % (str(self.dataset_tr.keys())),\n also_print_to_console=False)\n self.print_to_log_file(\"VALIDATION KEYS:\\n %s\" % (str(self.dataset_val.keys())),\n also_print_to_console=False)\n else:\n pass\n self.initialize_network()\n self.initialize_optimizer_and_scheduler()\n # assert isinstance(self.network, (SegmentationNetwork, nn.DataParallel))\n self.was_initialized = True\n\n def initialize_network(self):\n \"\"\"\n This is specific to the U-Net and must be adapted for other network architectures\n :return:\n \"\"\"\n # self.print_to_log_file(self.net_num_pool_op_kernel_sizes)\n # self.print_to_log_file(self.net_conv_kernel_sizes)\n\n net_numpool = len(self.net_num_pool_op_kernel_sizes)\n if self.threeD:\n conv_op = nn.Conv3d\n dropout_op = nn.Dropout3d\n norm_op = nn.InstanceNorm3d\n else:\n conv_op = nn.Conv2d\n dropout_op = nn.Dropout2d\n norm_op = nn.InstanceNorm2d\n norm_op_kwargs = {'eps': 1e-5, 'affine': True}\n dropout_op_kwargs = {'p': 0, 'inplace': True}\n net_nonlin = nn.LeakyReLU\n net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n self.network = Generic_UNet(self.num_input_channels, self.base_num_features, self.num_classes, net_numpool,\n self.conv_per_stage, 2, conv_op, norm_op, norm_op_kwargs, dropout_op,\n dropout_op_kwargs,\n net_nonlin, net_nonlin_kwargs, False, False, lambda x: x, InitWeights_He(\n 1e-2),\n self.net_num_pool_op_kernel_sizes, self.net_conv_kernel_sizes, False, True, True)\n\n # self.network.inference_apply_nonlin = softmax_helper\n # self.network = UNet(self.num_input_channels, self.num_classes)\n # self.network = smp.Unet(encoder_name='resnet50', encoder_weights='imagenet',\n # in_channels=self.num_input_channels, classes=self.num_classes)\n # self.network = smp.DeepLabV3Plus(encoder_name='resnet50', encoder_weights='imagenet',\n # in_channels=self.num_input_channels, classes=self.num_classes)\n # self.network = Attention_UNet(feature_scale=2, n_classes=self.num_classes, is_deconv=True, in_channels=self.num_input_channels)\n # self.network = VNet(n_channels=self.num_input_channels, n_classes=self.num_classes)\n # self.network = NestedUNet(num_classes=self.num_classes, input_channels=self.num_input_channels)\n if torch.cuda.is_available():\n self.network.cuda()\n # checkpoint = torch.load(\"/mnt/lustre/luoxiangde.vendor/projects/nnUNetFrame/DATASET/pCE_model_latest.model\")\n # print(\"Load Weighted Successful\")\n # weights = checkpoint['state_dict']\n # self.network.load_state_dict(weights, strict=False)\n # self.network.half()\n\n # def initialize_optimizer_and_scheduler(self):\n # assert self.network is not None, \"self.initialize_network must be called first\"\n # self.optimizer = torch.optim.Adam(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,\n # amsgrad=True)\n # self.lr_scheduler = lr_scheduler.ReduceLROnPlateau(self.optimizer, mode='min', factor=0.2,\n # patience=self.lr_scheduler_patience,\n # verbose=True, threshold=self.lr_scheduler_eps,\n # threshold_mode=\"abs\")\n def initialize_optimizer_and_scheduler(self):\n assert self.network is not None, \"self.initialize_network must be called first\"\n self.optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,\n momentum=0.99, nesterov=True)\n self.lr_scheduler = None\n\n def save_debug_information(self):\n # saving some debug information\n dct = OrderedDict()\n for k in self.__dir__():\n if not k.startswith(\"__\"):\n if not callable(getattr(self, k)):\n dct[k] = str(getattr(self, k))\n del dct['plans']\n del dct['intensity_properties']\n del dct['dataset']\n del dct['dataset_tr']\n del dct['dataset_val']\n save_json(dct, join(self.output_folder, \"debug.json\"))\n\n import shutil\n\n shutil.copy(self.plans_file, join(\n self.output_folder_base, \"plans.pkl\"))\n\n def run_training(self):\n self.save_debug_information()\n super(nnUNetTrainer, self).run_training()\n\n def load_plans_file(self):\n \"\"\"\n This is what actually configures the entire experiment. The plans file is generated by experiment planning\n :return:\n \"\"\"\n self.plans = load_pickle(self.plans_file)\n\n\n def process_plans(self, plans):\n if self.stage is None:\n assert len(list(plans['plans_per_stage'].keys())) == 1, \\\n \"If self.stage is None then there can be only one stage in the plans file. That seems to not be the \" \\\n \"case. Please specify which stage of the cascade must be trained\"\n self.stage = list(plans['plans_per_stage'].keys())[0]\n self.plans = plans\n\n stage_plans = self.plans['plans_per_stage'][self.stage]\n self.batch_size = stage_plans['batch_size']\n # self.batch_size = 4\n self.net_pool_per_axis = stage_plans['num_pool_per_axis']\n self.patch_size = np.array(stage_plans['patch_size']).astype(int)\n self.do_dummy_2D_aug = stage_plans['do_dummy_2D_data_aug']\n\n if 'pool_op_kernel_sizes' not in stage_plans.keys():\n assert 'num_pool_per_axis' in stage_plans.keys()\n self.print_to_log_file(\n \"WARNING! old plans file with missing pool_op_kernel_sizes. Attempting to fix it...\")\n self.net_num_pool_op_kernel_sizes = []\n for i in range(max(self.net_pool_per_axis)):\n curr = []\n for j in self.net_pool_per_axis:\n if (max(self.net_pool_per_axis) - j) <= i:\n curr.append(2)\n else:\n curr.append(1)\n self.net_num_pool_op_kernel_sizes.append(curr)\n else:\n self.net_num_pool_op_kernel_sizes = stage_plans['pool_op_kernel_sizes']\n\n if 'conv_kernel_sizes' not in stage_plans.keys():\n self.print_to_log_file(\n \"WARNING! old plans file with missing conv_kernel_sizes. Attempting to fix it...\")\n self.net_conv_kernel_sizes = [\n [3] * len(self.net_pool_per_axis)] * (max(self.net_pool_per_axis) + 1)\n else:\n self.net_conv_kernel_sizes = stage_plans['conv_kernel_sizes']\n\n self.pad_all_sides = None # self.patch_size\n self.intensity_properties = plans['dataset_properties']['intensityproperties']\n self.normalization_schemes = plans['normalization_schemes']\n self.base_num_features = plans['base_num_features']\n self.num_input_channels = plans['num_modalities']\n # background is no longer in num_classes\n self.num_classes = plans['num_classes'] + 1\n self.classes = plans['all_classes']\n self.use_mask_for_norm = plans['use_mask_for_norm']\n self.only_keep_largest_connected_component = plans['keep_only_largest_region']\n self.min_region_size_per_class = plans['min_region_size_per_class']\n # DONT USE THIS. plans['min_size_per_class']\n self.min_size_per_class = None\n\n if plans.get('transpose_forward') is None or plans.get('transpose_backward') is None:\n print(\"WARNING! You seem to have data that was preprocessed with a previous version of nnU-Net. \"\n \"You should rerun preprocessing. We will proceed and assume that both transpose_foward \"\n \"and transpose_backward are [0, 1, 2]. If that is not correct then weird things will happen!\")\n plans['transpose_forward'] = [0, 1, 2]\n plans['transpose_backward'] = [0, 1, 2]\n self.transpose_forward = plans['transpose_forward']\n self.transpose_backward = plans['transpose_backward']\n\n if len(self.patch_size) == 2:\n self.threeD = False\n elif len(self.patch_size) == 3:\n self.threeD = True\n else:\n raise RuntimeError(\n \"invalid patch size in plans file: %s\" % str(self.patch_size))\n\n if \"conv_per_stage\" in plans.keys(): # this ha sbeen added to the plans only recently\n self.conv_per_stage = plans['conv_per_stage']\n else:\n self.conv_per_stage = 2\n\n def load_dataset(self):\n self.dataset = load_dataset(self.folder_with_preprocessed_data)\n\n def get_basic_generators(self):\n self.load_dataset()\n self.do_split()\n\n if self.threeD:\n # dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,\n # False, oversample_foreground_percent=self.oversample_foreground_percent,\n # pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r', labeled_cases=10)\n # dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, False,\n # oversample_foreground_percent=self.oversample_foreground_percent,\n # pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r', labeled_cases=10)\n dl_tr = DataLoader3D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,\n False, oversample_foreground_percent=self.oversample_foreground_percent,\n pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r')\n dl_val = DataLoader3D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size, False,\n oversample_foreground_percent=self.oversample_foreground_percent,\n pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r')\n else:\n dl_tr = DataLoader2D(self.dataset_tr, self.basic_generator_patch_size, self.patch_size, self.batch_size,\n oversample_foreground_percent=self.oversample_foreground_percent,\n pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r')\n dl_val = DataLoader2D(self.dataset_val, self.patch_size, self.patch_size, self.batch_size,\n oversample_foreground_percent=self.oversample_foreground_percent,\n pad_mode=\"constant\", pad_sides=self.pad_all_sides, memmap_mode='r')\n return dl_tr, dl_val\n\n\n def preprocess_patient(self, input_files):\n \"\"\"\n Used to predict new unseen data. Not used for the preprocessing of the training/test data\n :param input_files:\n :return:\n \"\"\"\n preprocessor_name = self.plans.get('preprocessor_name')\n if preprocessor_name is None:\n if self.threeD:\n preprocessor_name = \"GenericPreprocessor\"\n preprocessor_class = GenericPreprocessor\n else:\n preprocessor_name = \"PreprocessorFor2D\"\n preprocessor_class = PreprocessorFor2D\n if preprocessor_name == \"GenericPreprocessor\":\n preprocessor_class = GenericPreprocessor\n else:\n preprocessor_class = PreprocessorFor2D\n assert preprocessor_class is not None, \"Could not find preprocessor %s in nnunet.preprocessing\" % \\\n preprocessor_name\n preprocessor = preprocessor_class(self.normalization_schemes, self.use_mask_for_norm,\n self.transpose_forward, self.intensity_properties)\n\n d, s, properties = preprocessor.preprocess_test_case(input_files,\n self.plans['plans_per_stage'][self.stage][\n 'current_spacing'])\n return d, s, properties\n\n def preprocess_predict_nifti(self, input_files: List[str], output_file: str = None,\n softmax_ouput_file: str = None, mixed_precision: bool = True) -> None:\n \"\"\"\n Use this to predict new data\n :param input_files:\n :param output_file:\n :param softmax_ouput_file:\n :param mixed_precision:\n :return:\n \"\"\"\n print(\"preprocessing...\")\n d, s, properties = self.preprocess_patient(input_files)\n print(\"predicting...\")\n pred = self.predict_preprocessed_data_return_seg_and_softmax(d, do_mirroring=self.data_aug_params[\"do_mirror\"],\n mirror_axes=self.data_aug_params['mirror_axes'],\n use_sliding_window=True, step_size=0.5,\n use_gaussian=True, pad_border_mode='constant',\n pad_kwargs={\n 'constant_values': 0},\n verbose=True, all_in_gpu=False,\n mixed_precision=mixed_precision)[1]\n pred = pred.transpose([0] + [i + 1 for i in self.transpose_backward])\n\n if 'segmentation_export_params' in self.plans.keys():\n force_separate_z = self.plans['segmentation_export_params']['force_separate_z']\n interpolation_order = self.plans['segmentation_export_params']['interpolation_order']\n interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z']\n else:\n force_separate_z = None\n interpolation_order = 1\n interpolation_order_z = 0\n\n print(\"resampling to original spacing and nifti export...\")\n save_segmentation_nifti_from_softmax(pred, output_file, properties, interpolation_order,\n self.regions_class_order, None, None, softmax_ouput_file,\n None, force_separate_z=force_separate_z,\n interpolation_order_z=interpolation_order_z)\n print(\"done\")\n\n def predict_preprocessed_data_return_seg_and_softmax(self, data: np.ndarray, do_mirroring: bool = True,\n mirror_axes: Tuple[int] = None,\n use_sliding_window: bool = True, step_size: float = 0.5,\n use_gaussian: bool = True, pad_border_mode: str = 'constant',\n pad_kwargs: dict = None, all_in_gpu: bool = False,\n verbose: bool = True, mixed_precision: bool = True) -> Tuple[\n np.ndarray, np.ndarray]:\n \"\"\"\n :param data:\n :param do_mirroring:\n :param mirror_axes:\n :param use_sliding_window:\n :param step_size:\n :param use_gaussian:\n :param pad_border_mode:\n :param pad_kwargs:\n :param all_in_gpu:\n :param verbose:\n :return:\n \"\"\"\n if pad_border_mode == 'constant' and pad_kwargs is None:\n pad_kwargs = {'constant_values': 0}\n\n if do_mirroring and mirror_axes is None:\n mirror_axes = self.data_aug_params['mirror_axes']\n\n if do_mirroring:\n assert self.data_aug_params[\"do_mirror\"], \"Cannot do mirroring as test time augmentation when training \" \\\n \"was done without mirroring\"\n\n # valid = list((SegmentationNetwork, nn.DataParallel))\n # print(self.network)\n # assert isinstance(self.network, tuple(valid))\n\n current_mode = self.network.training\n self.network.eval()\n ret = SegmentationNetwork.predict_3D(data, do_mirroring=do_mirroring, mirror_axes=mirror_axes,\n use_sliding_window=use_sliding_window, step_size=step_size,\n patch_size=self.patch_size, regions_class_order=self.regions_class_order,\n use_gaussian=use_gaussian, pad_border_mode=pad_border_mode,\n pad_kwargs=pad_kwargs, all_in_gpu=all_in_gpu, verbose=verbose,\n mixed_precision=mixed_precision)\n self.network.train(current_mode)\n return ret\n\n def validate(self, do_mirroring: bool = True, use_sliding_window: bool = True, step_size: float = 0.5,\n save_softmax: bool = True, use_gaussian: bool = True, overwrite: bool = True,\n validation_folder_name: str = 'validation_raw', debug: bool = False, all_in_gpu: bool = False,\n segmentation_export_kwargs: dict = None, run_postprocessing_on_folds: bool = True):\n \"\"\"\n if debug=True then the temporary files generated for postprocessing determination will be kept\n \"\"\"\n\n current_mode = self.network.training\n self.network.eval()\n\n assert self.was_initialized, \"must initialize, ideally with checkpoint (or train first)\"\n if self.dataset_val is None:\n self.load_dataset()\n self.do_split()\n\n if segmentation_export_kwargs is None:\n if 'segmentation_export_params' in self.plans.keys():\n force_separate_z = self.plans['segmentation_export_params']['force_separate_z']\n interpolation_order = self.plans['segmentation_export_params']['interpolation_order']\n interpolation_order_z = self.plans['segmentation_export_params']['interpolation_order_z']\n else:\n force_separate_z = None\n interpolation_order = 1\n interpolation_order_z = 0\n else:\n force_separate_z = segmentation_export_kwargs['force_separate_z']\n interpolation_order = segmentation_export_kwargs['interpolation_order']\n interpolation_order_z = segmentation_export_kwargs['interpolation_order_z']\n\n # predictions as they come from the network go here\n output_folder = join(self.output_folder, validation_folder_name)\n maybe_mkdir_p(output_folder)\n # this is for debug purposes\n my_input_args = {'do_mirroring': do_mirroring,\n 'use_sliding_window': use_sliding_window,\n 'step_size': step_size,\n 'save_softmax': save_softmax,\n 'use_gaussian': use_gaussian,\n 'overwrite': overwrite,\n 'validation_folder_name': validation_folder_name,\n 'debug': debug,\n 'all_in_gpu': all_in_gpu,\n 'segmentation_export_kwargs': segmentation_export_kwargs,\n }\n save_json(my_input_args, join(output_folder, \"validation_args.json\"))\n\n if do_mirroring:\n if not self.data_aug_params['do_mirror']:\n raise RuntimeError(\n \"We did not train with mirroring so you cannot do inference with mirroring enabled\")\n mirror_axes = self.data_aug_params['mirror_axes']\n else:\n mirror_axes = ()\n\n pred_gt_tuples = []\n\n export_pool = Pool(default_num_threads)\n results = []\n\n for k in self.dataset_val.keys():\n properties = load_pickle(self.dataset[k]['properties_file'])\n fname = properties['list_of_data_files'][0].split(\"/\")[-1][:-12]\n if overwrite or (not isfile(join(output_folder, fname + \".nii.gz\"))) or \\\n (save_softmax and not isfile(join(output_folder, fname + \".npz\"))):\n data = np.load(self.dataset[k]['data_file'])['data']\n\n print(k, data.shape)\n data[-1][data[-1] == -1] = 0\n\n softmax_pred = self.predict_preprocessed_data_return_seg_and_softmax(data[:-1],\n do_mirroring=do_mirroring,\n mirror_axes=mirror_axes,\n use_sliding_window=use_sliding_window,\n step_size=step_size,\n use_gaussian=use_gaussian,\n all_in_gpu=all_in_gpu,\n mixed_precision=self.fp16)[1]\n\n softmax_pred = softmax_pred.transpose(\n [0] + [i + 1 for i in self.transpose_backward])\n\n if save_softmax:\n softmax_fname = join(output_folder, fname + \".npz\")\n else:\n softmax_fname = None\n\n \"\"\"There is a problem with python process communication that prevents us from communicating obejcts\n larger than 2 GB between processes (basically when the length of the pickle string that will be sent is\n communicated by the multiprocessing.Pipe object then the placeholder (\\%i I think) does not allow for long\n enough strings (lol). This could be fixed by changing i to l (for long) but that would require manually\n patching system python code. We circumvent that problem here by saving softmax_pred to a npy file that will\n then be read (and finally deleted) by the Process. save_segmentation_nifti_from_softmax can take either\n filename or np.ndarray and will handle this automatically\"\"\"\n if np.prod(softmax_pred.shape) > (2e9 / 4 * 0.85): # *0.85 just to be save\n np.save(join(output_folder, fname + \".npy\"), softmax_pred)\n softmax_pred = join(output_folder, fname + \".npy\")\n\n results.append(export_pool.starmap_async(save_segmentation_nifti_from_softmax,\n ((softmax_pred, join(output_folder, fname + \".nii.gz\"),\n properties, interpolation_order, self.regions_class_order,\n None, None,\n softmax_fname, None, force_separate_z,\n interpolation_order_z),\n )\n )\n )\n\n pred_gt_tuples.append([join(output_folder, fname + \".nii.gz\"),\n join(self.gt_niftis_folder, fname + \".nii.gz\")])\n\n _ = [i.get() for i in results]\n self.print_to_log_file(\"finished prediction\")\n\n # evaluate raw predictions\n self.print_to_log_file(\"evaluation of raw predictions\")\n task = self.dataset_directory.split(\"/\")[-1]\n job_name = self.experiment_name\n _ = aggregate_scores(pred_gt_tuples, labels=list(range(self.num_classes)),\n json_output_file=join(\n output_folder, \"summary.json\"),\n json_name=job_name +\n \" val tiled %s\" % (str(use_sliding_window)),\n json_author=\"Fabian\",\n json_task=task, num_threads=default_num_threads)\n\n # if run_postprocessing_on_folds:\n # # in the old nnunet we would stop here. Now we add a postprocessing. This postprocessing can remove everything\n # # except the largest connected component for each class. To see if this improves results, we do this for all\n # # classes and then rerun the evaluation. Those classes for which this resulted in an improved dice score will\n # # have this applied during inference as well\n # self.print_to_log_file(\"determining postprocessing\")\n # determine_postprocessing(self.output_folder, self.gt_niftis_folder, validation_folder_name,\n # final_subf_name=validation_folder_name + \"_postprocessed\", debug=debug)\n # # after this the final predictions for the vlaidation set can be found in validation_folder_name_base + \"_postprocessed\"\n # # They are always in that folder, even if no postprocessing as applied!\n\n # detemining postprocesing on a per-fold basis may be OK for this fold but what if another fold finds another\n # postprocesing to be better? In this case we need to consolidate. At the time the consolidation is going to be\n # done we won't know what self.gt_niftis_folder was, so now we copy all the niftis into a separate folder to\n # be used later\n gt_nifti_folder = join(self.output_folder_base, \"gt_niftis\")\n maybe_mkdir_p(gt_nifti_folder)\n for f in subfiles(self.gt_niftis_folder, suffix=\".nii.gz\"):\n success = False\n attempts = 0\n e = None\n while not success and attempts < 10:\n try:\n shutil.copy(f, gt_nifti_folder)\n success = True\n except OSError as e:\n attempts += 1\n sleep(1)\n if not success:\n print(\"Could not copy gt nifti file %s into folder %s\" %\n (f, gt_nifti_folder))\n if e is not None:\n raise e\n\n self.network.train(current_mode)\n\n def run_online_evaluation(self, output, target):\n with torch.no_grad():\n num_classes = output.shape[1]\n output_softmax = softmax_helper(output)\n output_seg = output_softmax.argmax(1)\n target = target[:, 0]\n axes = tuple(range(1, len(target.shape)))\n tp_hard = torch.zeros(\n (target.shape[0], num_classes - 1)).to(output_seg.device.index)\n fp_hard = torch.zeros(\n (target.shape[0], num_classes - 1)).to(output_seg.device.index)\n fn_hard = torch.zeros(\n (target.shape[0], num_classes - 1)).to(output_seg.device.index)\n for c in range(1, num_classes):\n tp_hard[:, c - 1] = sum_tensor((output_seg == c).float()\n * (target == c).float(), axes=axes)\n fp_hard[:, c - 1] = sum_tensor((output_seg == c).float()\n * (target != c).float(), axes=axes)\n fn_hard[:, c - 1] = sum_tensor((output_seg != c).float()\n * (target == c).float(), axes=axes)\n\n tp_hard = tp_hard.sum(0, keepdim=False).detach().cpu().numpy()\n fp_hard = fp_hard.sum(0, keepdim=False).detach().cpu().numpy()\n fn_hard = fn_hard.sum(0, keepdim=False).detach().cpu().numpy()\n\n self.online_eval_foreground_dc.append(\n list((2 * tp_hard) / (2 * tp_hard + fp_hard + fn_hard + 1e-8)))\n self.online_eval_tp.append(list(tp_hard))\n self.online_eval_fp.append(list(fp_hard))\n self.online_eval_fn.append(list(fn_hard))\n\n def finish_online_evaluation(self):\n self.online_eval_tp = np.sum(self.online_eval_tp, 0)\n self.online_eval_fp = np.sum(self.online_eval_fp, 0)\n self.online_eval_fn = np.sum(self.online_eval_fn, 0)\n\n global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in\n zip(self.online_eval_tp, self.online_eval_fp, self.online_eval_fn)]\n if not np.isnan(i)]\n self.all_val_eval_metrics.append(np.mean(global_dc_per_class))\n\n self.print_to_log_file(\"Average global foreground Dice:\", [\n np.round(i, 4) for i in global_dc_per_class])\n self.print_to_log_file(\"(interpret this as an estimate for the Dice of the different classes. This is not \"\n \"exact.)\")\n\n self.online_eval_foreground_dc = []\n self.online_eval_tp = []\n self.online_eval_fp = []\n self.online_eval_fn = []\n\n def save_checkpoint(self, fname, save_optimizer=True):\n super(nnUNetTrainer, self).save_checkpoint(fname, save_optimizer)\n info = OrderedDict()\n info['init'] = self.init_args\n info['name'] = self.__class__.__name__\n info['class'] = str(self.__class__)\n info['plans'] = self.plans\n\n write_pickle(info, fname + \".pkl\")" }, { "identifier": "Generic_UNet", "path": "nn_transunet/networks/nnunet_model.py", "snippet": "class Generic_UNet(SegmentationNetwork):\n DEFAULT_BATCH_SIZE_3D = 2\n DEFAULT_PATCH_SIZE_3D = (64, 192, 160)\n SPACING_FACTOR_BETWEEN_STAGES = 2\n BASE_NUM_FEATURES_3D = 30\n MAX_NUMPOOL_3D = 999\n MAX_NUM_FILTERS_3D = 320\n\n DEFAULT_PATCH_SIZE_2D = (256, 256)\n BASE_NUM_FEATURES_2D = 30\n DEFAULT_BATCH_SIZE_2D = 50\n MAX_NUMPOOL_2D = 999\n MAX_FILTERS_2D = 480\n\n use_this_for_batch_size_computation_2D = 19739648\n use_this_for_batch_size_computation_3D = 520000000 # 505789440\n\n def __init__(self, input_channels, base_num_features, num_classes, num_pool, num_conv_per_stage=2,\n feat_map_mul_on_downscale=2, conv_op=nn.Conv2d,\n norm_op=nn.BatchNorm2d, norm_op_kwargs=None,\n dropout_op=nn.Dropout2d, dropout_op_kwargs=None,\n nonlin=nn.LeakyReLU, nonlin_kwargs=None, deep_supervision=True, dropout_in_localization=False,\n final_nonlin=softmax_helper, weightInitializer=InitWeights_He(1e-2), pool_op_kernel_sizes=None,\n conv_kernel_sizes=None,\n upscale_logits=False, convolutional_pooling=False, convolutional_upsampling=False,\n max_num_features=None, basic_block=ConvDropoutNormNonlin,\n seg_output_use_bias=False):\n \"\"\"\n basically more flexible than v1, architecture is the same\n\n Does this look complicated? Nah bro. Functionality > usability\n\n This does everything you need, including world peace.\n\n Questions? -> [email protected]\n \"\"\"\n super(Generic_UNet, self).__init__()\n self.convolutional_upsampling = convolutional_upsampling\n self.convolutional_pooling = convolutional_pooling\n self.upscale_logits = upscale_logits\n if nonlin_kwargs is None:\n nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}\n if dropout_op_kwargs is None:\n dropout_op_kwargs = {'p': 0.5, 'inplace': True}\n if norm_op_kwargs is None:\n norm_op_kwargs = {'eps': 1e-5, 'affine': True, 'momentum': 0.1}\n\n self.conv_kwargs = {'stride': 1, 'dilation': 1, 'bias': True}\n\n self.nonlin = nonlin\n self.nonlin_kwargs = nonlin_kwargs\n self.dropout_op_kwargs = dropout_op_kwargs\n self.norm_op_kwargs = norm_op_kwargs\n self.weightInitializer = weightInitializer\n self.conv_op = conv_op\n self.norm_op = norm_op\n self.dropout_op = dropout_op\n self.num_classes = num_classes\n self.final_nonlin = final_nonlin\n self._deep_supervision = deep_supervision\n self.do_ds = deep_supervision\n\n if conv_op == nn.Conv2d:\n upsample_mode = 'bilinear'\n pool_op = nn.MaxPool2d\n transpconv = nn.ConvTranspose2d\n if pool_op_kernel_sizes is None:\n pool_op_kernel_sizes = [(2, 2)] * num_pool\n if conv_kernel_sizes is None:\n conv_kernel_sizes = [(3, 3)] * (num_pool + 1)\n elif conv_op == nn.Conv3d:\n upsample_mode = 'trilinear'\n pool_op = nn.MaxPool3d\n transpconv = nn.ConvTranspose3d\n if pool_op_kernel_sizes is None:\n pool_op_kernel_sizes = [(2, 2, 2)] * num_pool\n if conv_kernel_sizes is None:\n conv_kernel_sizes = [(3, 3, 3)] * (num_pool + 1)\n else:\n raise ValueError(\"unknown convolution dimensionality, conv op: %s\" % str(conv_op))\n\n self.input_shape_must_be_divisible_by = np.prod(pool_op_kernel_sizes, 0, dtype=np.int64)\n self.pool_op_kernel_sizes = pool_op_kernel_sizes\n self.conv_kernel_sizes = conv_kernel_sizes\n\n self.conv_pad_sizes = []\n for krnl in self.conv_kernel_sizes:\n self.conv_pad_sizes.append([1 if i == 3 else 0 for i in krnl])\n\n if max_num_features is None:\n if self.conv_op == nn.Conv3d:\n self.max_num_features = self.MAX_NUM_FILTERS_3D\n else:\n self.max_num_features = self.MAX_FILTERS_2D\n else:\n self.max_num_features = max_num_features\n\n self.conv_blocks_context = []\n self.conv_blocks_localization = []\n self.td = []\n self.tu = []\n self.seg_outputs = []\n\n output_features = base_num_features\n input_features = input_channels\n\n for d in range(num_pool):\n # determine the first stride\n if d != 0 and self.convolutional_pooling:\n first_stride = pool_op_kernel_sizes[d - 1]\n else:\n first_stride = None\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[d]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[d]\n # add convolutions\n self.conv_blocks_context.append(StackedConvLayers(input_features, output_features, num_conv_per_stage,\n self.conv_op, self.conv_kwargs, self.norm_op,\n self.norm_op_kwargs, self.dropout_op,\n self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs,\n first_stride, basic_block=basic_block))\n if not self.convolutional_pooling:\n self.td.append(pool_op(pool_op_kernel_sizes[d]))\n input_features = output_features\n output_features = int(np.round(output_features * feat_map_mul_on_downscale))\n\n output_features = min(output_features, self.max_num_features)\n\n # now the bottleneck.\n # determine the first stride\n if self.convolutional_pooling:\n first_stride = pool_op_kernel_sizes[-1]\n else:\n first_stride = None\n\n # the output of the last conv must match the number of features from the skip connection if we are not using\n # convolutional upsampling. If we use convolutional upsampling then the reduction in feature maps will be\n # done by the transposed conv\n if self.convolutional_upsampling:\n final_num_features = output_features\n else:\n final_num_features = self.conv_blocks_context[-1].output_channels\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[num_pool]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[num_pool]\n self.conv_blocks_context.append(nn.Sequential(\n StackedConvLayers(input_features, output_features, num_conv_per_stage - 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,\n self.nonlin_kwargs, first_stride, basic_block=basic_block),\n StackedConvLayers(output_features, final_num_features, 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs, self.nonlin,\n self.nonlin_kwargs, basic_block=basic_block)))\n\n # if we don't want to do dropout in the localization pathway then we set the dropout prob to zero here\n if not dropout_in_localization:\n old_dropout_p = self.dropout_op_kwargs['p']\n self.dropout_op_kwargs['p'] = 0.0\n\n # now lets build the localization pathway\n for u in range(num_pool):\n nfeatures_from_down = final_num_features\n nfeatures_from_skip = self.conv_blocks_context[\n -(2 + u)].output_channels # self.conv_blocks_context[-1] is bottleneck, so start with -2\n n_features_after_tu_and_concat = nfeatures_from_skip * 2\n\n # the first conv reduces the number of features to match those of skip\n # the following convs work on that number of features\n # if not convolutional upsampling then the final conv reduces the num of features again\n if u != num_pool - 1 and not self.convolutional_upsampling:\n final_num_features = self.conv_blocks_context[-(3 + u)].output_channels\n else:\n final_num_features = nfeatures_from_skip\n\n if not self.convolutional_upsampling:\n self.tu.append(Upsample(scale_factor=pool_op_kernel_sizes[-(u + 1)], mode=upsample_mode))\n else:\n self.tu.append(transpconv(nfeatures_from_down, nfeatures_from_skip, pool_op_kernel_sizes[-(u + 1)],\n pool_op_kernel_sizes[-(u + 1)], bias=False))\n\n self.conv_kwargs['kernel_size'] = self.conv_kernel_sizes[- (u + 1)]\n self.conv_kwargs['padding'] = self.conv_pad_sizes[- (u + 1)]\n self.conv_blocks_localization.append(nn.Sequential(\n StackedConvLayers(n_features_after_tu_and_concat, nfeatures_from_skip, num_conv_per_stage - 1,\n self.conv_op, self.conv_kwargs, self.norm_op, self.norm_op_kwargs, self.dropout_op,\n self.dropout_op_kwargs, self.nonlin, self.nonlin_kwargs, basic_block=basic_block),\n StackedConvLayers(nfeatures_from_skip, final_num_features, 1, self.conv_op, self.conv_kwargs,\n self.norm_op, self.norm_op_kwargs, self.dropout_op, self.dropout_op_kwargs,\n self.nonlin, self.nonlin_kwargs, basic_block=basic_block)\n ))\n\n for ds in range(len(self.conv_blocks_localization)):\n self.seg_outputs.append(conv_op(self.conv_blocks_localization[ds][-1].output_channels, num_classes,\n 1, 1, 0, 1, 1, seg_output_use_bias))\n\n self.upscale_logits_ops = []\n cum_upsample = np.cumprod(np.vstack(pool_op_kernel_sizes), axis=0)[::-1]\n for usl in range(num_pool - 1):\n if self.upscale_logits:\n self.upscale_logits_ops.append(Upsample(scale_factor=tuple([int(i) for i in cum_upsample[usl + 1]]),\n mode=upsample_mode))\n else:\n self.upscale_logits_ops.append(lambda x: x)\n\n if not dropout_in_localization:\n self.dropout_op_kwargs['p'] = old_dropout_p\n\n # register all modules properly\n self.conv_blocks_localization = nn.ModuleList(self.conv_blocks_localization)\n self.conv_blocks_context = nn.ModuleList(self.conv_blocks_context)\n self.td = nn.ModuleList(self.td)\n self.tu = nn.ModuleList(self.tu)\n self.seg_outputs = nn.ModuleList(self.seg_outputs)\n if self.upscale_logits:\n self.upscale_logits_ops = nn.ModuleList(\n self.upscale_logits_ops) # lambda x:x is not a Module so we need to distinguish here\n\n if self.weightInitializer is not None:\n self.apply(self.weightInitializer)\n # self.apply(print_module_training_status)\n\n def forward(self, x):\n skips = []\n seg_outputs = []\n for d in range(len(self.conv_blocks_context) - 1):\n x = self.conv_blocks_context[d](x)\n skips.append(x)\n if not self.convolutional_pooling:\n x = self.td[d](x) # downsample\n\n x = self.conv_blocks_context[-1](x)\n\n\n for u in range(len(self.tu)):\n x = self.tu[u](x) # upsample\n x = torch.cat((x, skips[-(u + 1)]), dim=1)\n x = self.conv_blocks_localization[u](x)\n seg_outputs.append(self.final_nonlin(self.seg_outputs[u](x)))\n\n if self._deep_supervision and self.do_ds:\n return tuple([seg_outputs[-1]] + [i(j) for i, j in\n zip(list(self.upscale_logits_ops)[::-1], seg_outputs[:-1][::-1])])\n else:\n return seg_outputs[-1]\n\n @staticmethod\n def compute_approx_vram_consumption(patch_size, num_pool_per_axis, base_num_features, max_num_features,\n num_modalities, num_classes, pool_op_kernel_sizes, deep_supervision=False,\n conv_per_stage=2):\n \"\"\"\n This only applies for num_conv_per_stage and convolutional_upsampling=True\n not real vram consumption. just a constant term to which the vram consumption will be approx proportional\n (+ offset for parameter storage)\n :param deep_supervision:\n :param patch_size:\n :param num_pool_per_axis:\n :param base_num_features:\n :param max_num_features:\n :param num_modalities:\n :param num_classes:\n :param pool_op_kernel_sizes:\n :return:\n \"\"\"\n if not isinstance(num_pool_per_axis, np.ndarray):\n num_pool_per_axis = np.array(num_pool_per_axis)\n\n npool = len(pool_op_kernel_sizes)\n\n map_size = np.array(patch_size)\n tmp = np.int64((conv_per_stage * 2 + 1) * np.prod(map_size, dtype=np.int64) * base_num_features +\n num_modalities * np.prod(map_size, dtype=np.int64) +\n num_classes * np.prod(map_size, dtype=np.int64))\n\n num_feat = base_num_features\n\n for p in range(npool):\n for pi in range(len(num_pool_per_axis)):\n map_size[pi] /= pool_op_kernel_sizes[p][pi]\n num_feat = min(num_feat * 2, max_num_features)\n num_blocks = (conv_per_stage * 2 + 1) if p < (npool - 1) else conv_per_stage # conv_per_stage + conv_per_stage for the convs of encode/decode and 1 for transposed conv\n tmp += num_blocks * np.prod(map_size, dtype=np.int64) * num_feat\n if deep_supervision and p < (npool - 2):\n tmp += np.prod(map_size, dtype=np.int64) * num_classes\n # print(p, map_size, num_feat, tmp)\n return tmp" }, { "identifier": "default_2D_augmentation_params", "path": "nn_transunet/data/default_data_augmentation.py", "snippet": "def get_patch_size(final_patch_size, rot_x, rot_y, rot_z, scale_range):\ndef get_default_augmentation(dataloader_train, dataloader_val, patch_size, params=default_3D_augmentation_params,\n border_val_seg=-1, pin_memory=True,\n seeds_train=None, seeds_val=None, regions=None):" }, { "identifier": "unpack_dataset", "path": "nn_transunet/data/dataset_loading.py", "snippet": "def unpack_dataset(folder, threads=default_num_threads, key=\"data\"):\n \"\"\"\n unpacks all npz files in a folder to npy (whatever you want to have unpacked must be saved unter key)\n :param folder:\n :param threads:\n :param key:\n :return:\n \"\"\"\n p = Pool(threads)\n npz_files = subfiles(folder, True, None, \".npz\", True)\n p.map(convert_to_npy, zip(npz_files, [key] * len(npz_files)))\n p.close()\n p.join()" } ]
from collections import OrderedDict from typing import Tuple from ..data.data_augmentation_moreDA import get_moreDA_augmentation from ..trainer.loss_functions import MultipleOutputLoss2 from ..trainer.network_trainer import maybe_to_torch, to_cuda from ..trainer.nnUNetTrainer import nnUNetTrainer from ..networks.nnunet_model import Generic_UNet from ..data.default_data_augmentation import default_2D_augmentation_params, \ get_patch_size, default_3D_augmentation_params from ..data.dataset_loading import unpack_dataset from sklearn.model_selection import KFold from torch.cuda.amp import autocast from batchgenerators.utilities.file_and_folder_operations import * from torch import nn from loss_functions import DC_and_CE_loss from ..networks.transunet3d_model import Generic_TransUNet_max_ppbp import numpy as np import torch import torch.nn.functional as F
16,882
# See the License for the specific language governing permissions and # limitations under the License. softmax_helper = lambda x: F.softmax(x, 1) def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): return initial_lr * (1 - epoch / max_epochs)**exponent class InitWeights_He(object): def __init__(self, neg_slope=1e-2): self.neg_slope = neg_slope def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class InitWeights_XavierUniform(object): def __init__(self, gain=1): self.gain = gain def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.xavier_uniform_(module.weight, self.gain) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class nnUNetTrainerV2(nnUNetTrainer): """ Info for Fabian: same as internal nnUNetTrainerV2_2 """ def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False, input_size=(64, 160, 160),args=None): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) if args is not None: self.input_size=input_size self.model = args.model self.resume = args.resume self.disable_ds=args.disable_ds self.max_num_epochs = args.max_num_epochs # set 1 gpu training self.initial_lr = args.initial_lr # 0.01 self.args = args if self.disable_ds: print("disable_ds") # print("not runnable for this feature! current nnunetV2 (w/o DDP) only support deep supervision version") # raise NotImplementedError else: print("runnning DDP, inheriting nnUNetTrainerV2") self.save_every = 1 # prev 50 # self.max_num_epochs = 1000 # self.initial_lr = 1e-2 self.deep_supervision_scales = None self.ds_loss_weights = None self.pin_memory = True def initialize(self, training=True, force_load_plans=False): """ - replaced get_default_augmentation with get_moreDA_augmentation - enforce to only run this code once - loss function wrapper for deep supervision :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights if self.disable_ds: self.ds_loss_weights[0]=1 self.ds_loss_weights[1:]=0 self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}) else: # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset")
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. softmax_helper = lambda x: F.softmax(x, 1) def poly_lr(epoch, max_epochs, initial_lr, exponent=0.9): return initial_lr * (1 - epoch / max_epochs)**exponent class InitWeights_He(object): def __init__(self, neg_slope=1e-2): self.neg_slope = neg_slope def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.kaiming_normal_(module.weight, a=self.neg_slope) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class InitWeights_XavierUniform(object): def __init__(self, gain=1): self.gain = gain def __call__(self, module): if isinstance(module, nn.Conv3d) or isinstance(module, nn.Conv2d) or isinstance(module, nn.ConvTranspose2d) or isinstance(module, nn.ConvTranspose3d): module.weight = nn.init.xavier_uniform_(module.weight, self.gain) if module.bias is not None: module.bias = nn.init.constant_(module.bias, 0) class nnUNetTrainerV2(nnUNetTrainer): """ Info for Fabian: same as internal nnUNetTrainerV2_2 """ def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None, unpack_data=True, deterministic=True, fp16=False, input_size=(64, 160, 160),args=None): super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage, unpack_data, deterministic, fp16) if args is not None: self.input_size=input_size self.model = args.model self.resume = args.resume self.disable_ds=args.disable_ds self.max_num_epochs = args.max_num_epochs # set 1 gpu training self.initial_lr = args.initial_lr # 0.01 self.args = args if self.disable_ds: print("disable_ds") # print("not runnable for this feature! current nnunetV2 (w/o DDP) only support deep supervision version") # raise NotImplementedError else: print("runnning DDP, inheriting nnUNetTrainerV2") self.save_every = 1 # prev 50 # self.max_num_epochs = 1000 # self.initial_lr = 1e-2 self.deep_supervision_scales = None self.ds_loss_weights = None self.pin_memory = True def initialize(self, training=True, force_load_plans=False): """ - replaced get_default_augmentation with get_moreDA_augmentation - enforce to only run this code once - loss function wrapper for deep supervision :param training: :param force_load_plans: :return: """ if not self.was_initialized: maybe_mkdir_p(self.output_folder) if force_load_plans or (self.plans is None): self.load_plans_file() self.process_plans(self.plans) self.setup_DA_params() ################# Here we wrap the loss for deep supervision ############ # we need to know the number of outputs of the network net_numpool = len(self.net_num_pool_op_kernel_sizes) # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases # this gives higher resolution outputs more weight in the loss weights = np.array([1 / (2 ** i) for i in range(net_numpool)]) # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1 mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)]) weights[~mask] = 0 weights = weights / weights.sum() self.ds_loss_weights = weights if self.disable_ds: self.ds_loss_weights[0]=1 self.ds_loss_weights[1:]=0 self.loss = DC_and_CE_loss({'batch_dice': self.batch_dice, 'smooth': 1e-5, 'do_bg': False}, {}) else: # now wrap the loss self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights) ################# END ################### self.folder_with_preprocessed_data = join(self.dataset_directory, self.plans['data_identifier'] + "_stage%d" % self.stage) if training: self.dl_tr, self.dl_val = self.get_basic_generators() if self.unpack_data: print("unpacking dataset")
unpack_dataset(self.folder_with_preprocessed_data)
7
2023-10-11 05:19:25+00:00
24k
AMAAI-Lab/Video2Music
train.py
[ { "identifier": "compute_vevo_accuracy", "path": "dataset/vevo_dataset.py", "snippet": "def compute_vevo_accuracy(out, tgt):\n softmax = nn.Softmax(dim=-1)\n out = torch.argmax(softmax(out), dim=-1)\n\n out = out.flatten()\n tgt = tgt.flatten()\n\n mask = (tgt != CHORD_PAD)\n\n out = out[mask]\n tgt = tgt[mask]\n\n if(len(tgt) == 0):\n return 1.0\n\n num_right = (out == tgt)\n num_right = torch.sum(num_right).type(TORCH_FLOAT)\n\n acc = num_right / len(tgt)\n\n return acc" }, { "identifier": "create_vevo_datasets", "path": "dataset/vevo_dataset.py", "snippet": "def create_vevo_datasets(dataset_root = \"./dataset\", max_seq_chord=300, max_seq_video=300, vis_models=\"2d/clip_l14p\", emo_model=\"6c_l14p\", split_ver=\"v1\", random_seq=True, is_video=True):\n\n train_dataset = VevoDataset(\n dataset_root = dataset_root, split=\"train\", split_ver=split_ver, \n vis_models=vis_models, emo_model =emo_model, max_seq_chord=max_seq_chord, max_seq_video=max_seq_video, \n random_seq=random_seq, is_video = is_video )\n \n val_dataset = VevoDataset(\n dataset_root = dataset_root, split=\"val\", split_ver=split_ver, \n vis_models=vis_models, emo_model =emo_model, max_seq_chord=max_seq_chord, max_seq_video=max_seq_video, \n random_seq=random_seq, is_video = is_video )\n \n test_dataset = VevoDataset(\n dataset_root = dataset_root, split=\"test\", split_ver=split_ver, \n vis_models=vis_models, emo_model =emo_model, max_seq_chord=max_seq_chord, max_seq_video=max_seq_video, \n random_seq=random_seq, is_video = is_video )\n \n return train_dataset, val_dataset, test_dataset" }, { "identifier": "MusicTransformer", "path": "model/music_transformer.py", "snippet": "class MusicTransformer(nn.Module):\n def __init__(self, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence_midi=2048, max_sequence_chord=300, rpr=False):\n super(MusicTransformer, self).__init__()\n\n self.dummy = DummyDecoder()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq_midi = max_sequence_midi\n self.max_seq_chord = max_sequence_chord\n self.rpr = rpr\n\n # Input embedding for video and music features\n self.embedding = nn.Embedding(CHORD_SIZE, self.d_model)\n\n # self.embedding_key = nn.Embedding(1, self.d_model)\n self.embedding_root = nn.Embedding(CHORD_ROOT_SIZE, self.d_model)\n self.embedding_attr = nn.Embedding(CHORD_ATTR_SIZE, self.d_model)\n\n self.positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq_chord)\n self.Linear_chord = nn.Linear(self.d_model+1, self.d_model)\n\n # Base transformer\n if(not self.rpr):\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy\n )\n # RPR Transformer\n else:\n encoder_norm = LayerNorm(self.d_model)\n encoder_layer = TransformerEncoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout, er_len=self.max_seq_chord)\n\n encoder = TransformerEncoderRPR(encoder_layer, self.nlayers, encoder_norm)\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=0, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=self.dummy, custom_encoder=encoder\n )\n # Final output is a softmaxed linear layer\n self.Wout = nn.Linear(self.d_model, CHORD_SIZE)\n self.Wout_root = nn.Linear(self.d_model, CHORD_ROOT_SIZE)\n self.Wout_attr = nn.Linear(self.d_model, CHORD_ATTR_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n\n # forward\n def forward(self, x, x_root, x_attr, feature_key, mask=True):\n if(mask is True):\n mask = self.transformer.generate_square_subsequent_mask(x.shape[1]).to(get_device())\n else:\n mask = None\n\n ### Chord + Key (DECODER) ###\n # x = self.embedding(x)\n \n x_root = self.embedding_root(x_root)\n x_attr = self.embedding_attr(x_attr)\n x = x_root + x_attr\n\n feature_key_padded = torch.full((x.shape[0], x.shape[1], 1), feature_key.item())\n feature_key_padded = feature_key_padded.to(get_device())\n x = torch.cat([x, feature_key_padded], dim=-1)\n xf = self.Linear_chord(x)\n\n ### POSITIONAL ENCODING ###\n xf = xf.permute(1,0,2) # -> (max_seq-1, batch_size, d_model)\n xf = self.positional_encoding(xf)\n \n ### TRANSFORMER ###\n x_out = self.transformer(src=xf, tgt=xf, tgt_mask=mask)\n x_out = x_out.permute(1,0,2)\n \n if IS_SEPERATED:\n y_root = self.Wout_root(x_out)\n y_attr = self.Wout_attr(x_out)\n del mask\n return y_root, y_attr\n else:\n y = self.Wout(x_out)\n del mask\n return y\n\n # generate\n def generate(self, feature_key=None, primer=None, primer_root=None, primer_attr=None, target_seq_length=300, beam=0, beam_chance=1.0):\n assert (not self.training), \"Cannot generate while in training mode\"\n\n with open('dataset/vevo_meta/chord_inv.json') as json_file:\n chordInvDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_root.json') as json_file:\n chordRootDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_attr.json') as json_file:\n chordAttrDic = json.load(json_file)\n\n print(\"Generating sequence of max length:\", target_seq_length)\n gen_seq = torch.full((1,target_seq_length), CHORD_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_root = torch.full((1,target_seq_length), CHORD_ROOT_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_attr = torch.full((1,target_seq_length), CHORD_ATTR_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n \n num_primer = len(primer)\n\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n gen_seq_root[..., :num_primer] = primer_root.type(TORCH_LABEL_TYPE).to(get_device())\n \n gen_seq_attr[..., :num_primer] = primer_attr.type(TORCH_LABEL_TYPE).to(get_device())\n\n cur_i = num_primer\n while(cur_i < target_seq_length):\n # gen_seq_batch = gen_seq.clone()\n # y = self.softmax(self.forward(gen_seq[..., :cur_i]))[..., :CHORD_END]\n y = self.softmax( self.forward( gen_seq[..., :cur_i], gen_seq_root[..., :cur_i], gen_seq_attr[..., :cur_i], feature_key) )[..., :CHORD_END]\n \n token_probs = y[:, cur_i-1, :]\n if(beam == 0):\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0,1)\n if(beam_ran <= beam_chance):\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n beam_rows = top_i // CHORD_SIZE\n beam_cols = top_i % CHORD_SIZE\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n else:\n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n #print(\"next token:\",next_token)\n gen_seq[:, cur_i] = next_token\n gen_chord = chordInvDic[ str( next_token.item() ) ]\n \n chord_arr = gen_chord.split(\":\")\n if len(chord_arr) == 1:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = 1\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n elif len(chord_arr) == 2:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = chordAttrDic[chord_arr[1]]\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n \n # Let the transformer decide to end if it wants to\n if(next_token == CHORD_END):\n print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n break\n \n cur_i += 1\n if(cur_i % 50 == 0):\n print(cur_i, \"/\", target_seq_length)\n return gen_seq[:, :cur_i]" }, { "identifier": "VideoMusicTransformer", "path": "model/video_music_transformer.py", "snippet": "class VideoMusicTransformer(nn.Module):\n def __init__(self, n_layers=6, num_heads=8, d_model=512, dim_feedforward=1024,\n dropout=0.1, max_sequence_midi =2048, max_sequence_video=300, max_sequence_chord=300, total_vf_dim = 0, rpr=False):\n super(VideoMusicTransformer, self).__init__()\n self.nlayers = n_layers\n self.nhead = num_heads\n self.d_model = d_model\n self.d_ff = dim_feedforward\n self.dropout = dropout\n self.max_seq_midi = max_sequence_midi\n self.max_seq_video = max_sequence_video\n self.max_seq_chord = max_sequence_chord\n self.rpr = rpr\n\n # Input embedding for video and music features\n self.embedding = nn.Embedding(CHORD_SIZE, self.d_model)\n self.embedding_root = nn.Embedding(CHORD_ROOT_SIZE, self.d_model)\n self.embedding_attr = nn.Embedding(CHORD_ATTR_SIZE, self.d_model)\n \n self.total_vf_dim = total_vf_dim\n self.Linear_vis = nn.Linear(self.total_vf_dim, self.d_model)\n self.Linear_chord = nn.Linear(self.d_model+1, self.d_model)\n \n # Positional encoding\n self.positional_encoding = PositionalEncoding(self.d_model, self.dropout, self.max_seq_chord)\n self.positional_encoding_video = PositionalEncoding(self.d_model, self.dropout, self.max_seq_video)\n\n # Add condition (minor or major)\n self.condition_linear = nn.Linear(1, self.d_model)\n \n # Base transformer\n if(not self.rpr):\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=self.nlayers, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff\n )\n # RPR Transformer\n else:\n decoder_norm = LayerNorm(self.d_model)\n decoder_layer = TransformerDecoderLayerRPR(self.d_model, self.nhead, self.d_ff, self.dropout, er_len=self.max_seq_chord)\n decoder = TransformerDecoderRPR(decoder_layer, self.nlayers, decoder_norm)\n self.transformer = nn.Transformer(\n d_model=self.d_model, nhead=self.nhead, num_encoder_layers=self.nlayers,\n num_decoder_layers=self.nlayers, dropout=self.dropout, # activation=self.ff_activ,\n dim_feedforward=self.d_ff, custom_decoder=decoder\n ) \n \n self.Wout = nn.Linear(self.d_model, CHORD_SIZE)\n self.Wout_root = nn.Linear(self.d_model, CHORD_ROOT_SIZE)\n self.Wout_attr = nn.Linear(self.d_model, CHORD_ATTR_SIZE)\n self.softmax = nn.Softmax(dim=-1)\n \n def forward(self, x, x_root, x_attr, feature_semantic_list, feature_key, feature_scene_offset, feature_motion, feature_emotion, mask=True):\n if(mask is True):\n mask = self.transformer.generate_square_subsequent_mask(x.shape[1]).to(get_device())\n else:\n mask = None\n \n x_root = self.embedding_root(x_root)\n x_attr = self.embedding_attr(x_attr)\n x = x_root + x_attr\n\n feature_key_padded = torch.full((x.shape[0], x.shape[1], 1), feature_key.item())\n feature_key_padded = feature_key_padded.to(get_device())\n x = torch.cat([x, feature_key_padded], dim=-1)\n\n xf = self.Linear_chord(x)\n\n ### Video (SemanticList + SceneOffset + Motion + Emotion) (ENCODER) ###\n vf_concat = feature_semantic_list[0].float()\n\n for i in range(1, len(feature_semantic_list)):\n vf_concat = torch.cat( (vf_concat, feature_semantic_list[i].float()), dim=2) \n \n vf_concat = torch.cat([vf_concat, feature_scene_offset.unsqueeze(-1).float()], dim=-1) # -> (max_seq_video, batch_size, d_model+1)\n vf_concat = torch.cat([vf_concat, feature_motion.unsqueeze(-1).float()], dim=-1) # -> (max_seq_video, batch_size, d_model+1)\n vf_concat = torch.cat([vf_concat, feature_emotion.float()], dim=-1) # -> (max_seq_video, batch_size, d_model+1)\n vf = self.Linear_vis(vf_concat)\n \n ### POSITIONAL ENCODING ###\n xf = xf.permute(1,0,2) # -> (max_seq-1, batch_size, d_model)\n vf = vf.permute(1,0,2) # -> (max_seq_video, batch_size, d_model)\n xf = self.positional_encoding(xf)\n vf = self.positional_encoding_video(vf)\n\n ### TRANSFORMER ###\n x_out = self.transformer(src=vf, tgt=xf, tgt_mask=mask)\n x_out = x_out.permute(1,0,2)\n\n if IS_SEPERATED:\n y_root = self.Wout_root(x_out)\n y_attr = self.Wout_attr(x_out)\n del mask\n return y_root, y_attr\n else:\n y = self.Wout(x_out)\n del mask\n return y\n \n def generate(self, feature_semantic_list = [], feature_key=None, feature_scene_offset=None, feature_motion=None, feature_emotion=None,\n primer=None, primer_root=None, primer_attr=None, target_seq_length=300, beam=0, beam_chance=1.0, max_conseq_N = 0, max_conseq_chord = 2):\n \n assert (not self.training), \"Cannot generate while in training mode\"\n print(\"Generating sequence of max length:\", target_seq_length)\n\n with open('dataset/vevo_meta/chord_inv.json') as json_file:\n chordInvDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_root.json') as json_file:\n chordRootDic = json.load(json_file)\n with open('dataset/vevo_meta/chord_attr.json') as json_file:\n chordAttrDic = json.load(json_file)\n\n gen_seq = torch.full((1,target_seq_length), CHORD_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_root = torch.full((1,target_seq_length), CHORD_ROOT_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n gen_seq_attr = torch.full((1,target_seq_length), CHORD_ATTR_PAD, dtype=TORCH_LABEL_TYPE, device=get_device())\n \n num_primer = len(primer)\n gen_seq[..., :num_primer] = primer.type(TORCH_LABEL_TYPE).to(get_device())\n gen_seq_root[..., :num_primer] = primer_root.type(TORCH_LABEL_TYPE).to(get_device())\n gen_seq_attr[..., :num_primer] = primer_attr.type(TORCH_LABEL_TYPE).to(get_device())\n\n cur_i = num_primer\n while(cur_i < target_seq_length):\n y = self.softmax( self.forward( gen_seq[..., :cur_i], gen_seq_root[..., :cur_i], gen_seq_attr[..., :cur_i], \n feature_semantic_list, feature_key, feature_scene_offset, feature_motion, feature_emotion) )[..., :CHORD_END]\n \n token_probs = y[:, cur_i-1, :]\n if(beam == 0):\n beam_ran = 2.0\n else:\n beam_ran = random.uniform(0,1)\n if(beam_ran <= beam_chance):\n token_probs = token_probs.flatten()\n top_res, top_i = torch.topk(token_probs, beam)\n beam_rows = top_i // CHORD_SIZE\n beam_cols = top_i % CHORD_SIZE\n gen_seq = gen_seq[beam_rows, :]\n gen_seq[..., cur_i] = beam_cols\n else:\n # token_probs.shape : [1, 157] \n # 0: N, 1: C, ... , 156: B:maj7\n # 157 chordEnd 158 padding\n if max_conseq_N == 0:\n token_probs[0][0] = 0.0\n isMaxChord = True\n if cur_i >= max_conseq_chord :\n preChord = gen_seq[0][cur_i-1].item() \n for k in range (1, max_conseq_chord):\n if preChord != gen_seq[0][cur_i-1-k].item():\n isMaxChord = False\n else:\n isMaxChord = False\n \n if isMaxChord:\n preChord = gen_seq[0][cur_i-1].item()\n token_probs[0][preChord] = 0.0\n \n distrib = torch.distributions.categorical.Categorical(probs=token_probs)\n next_token = distrib.sample()\n gen_seq[:, cur_i] = next_token\n gen_chord = chordInvDic[ str( next_token.item() ) ]\n \n chord_arr = gen_chord.split(\":\")\n if len(chord_arr) == 1:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = 1\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n elif len(chord_arr) == 2:\n chordRootID = chordRootDic[chord_arr[0]]\n chordAttrID = chordAttrDic[chord_arr[1]]\n chordRootID = torch.tensor([chordRootID]).to(get_device())\n chordAttrID = torch.tensor([chordAttrID]).to(get_device())\n gen_seq_root[:, cur_i] = chordRootID\n gen_seq_attr[:, cur_i] = chordAttrID\n \n # Let the transformer decide to end if it wants to\n if(next_token == CHORD_END):\n print(\"Model called end of sequence at:\", cur_i, \"/\", target_seq_length)\n break\n cur_i += 1\n if(cur_i % 50 == 0):\n print(cur_i, \"/\", target_seq_length)\n return gen_seq[:, :cur_i]" }, { "identifier": "SmoothCrossEntropyLoss", "path": "model/loss.py", "snippet": "class SmoothCrossEntropyLoss(_Loss):\n \"\"\"\n https://arxiv.org/abs/1512.00567\n \"\"\"\n __constants__ = ['label_smoothing', 'vocab_size', 'ignore_index', 'reduction']\n\n def __init__(self, label_smoothing, vocab_size, ignore_index=-100, reduction='mean', is_logits=True):\n assert 0.0 <= label_smoothing <= 1.0\n super().__init__(reduction=reduction)\n\n self.label_smoothing = label_smoothing\n self.vocab_size = vocab_size\n self.ignore_index = ignore_index\n self.input_is_logits = is_logits\n\n def forward(self, input, target):\n \"\"\"\n Args:\n input: [B * T, V]\n target: [B * T]\n Returns:\n cross entropy: [1]\n \"\"\"\n mask = (target == self.ignore_index).unsqueeze(-1)\n q = F.one_hot(target.long(), self.vocab_size).type(torch.float32)\n u = 1.0 / self.vocab_size\n q_prime = (1.0 - self.label_smoothing) * q + self.label_smoothing * u\n q_prime = q_prime.masked_fill(mask, 0)\n\n ce = self.cross_entropy_with_logits(q_prime, input)\n if self.reduction == 'mean':\n lengths = torch.sum(target != self.ignore_index)\n return ce.sum() / lengths\n elif self.reduction == 'sum':\n return ce.sum()\n else:\n raise NotImplementedError\n\n def cross_entropy_with_logits(self, p, q):\n return -torch.sum(p * (q - q.logsumexp(dim=-1, keepdim=True)), dim=-1)" }, { "identifier": "get_device", "path": "utilities/device.py", "snippet": "def get_device():\n \"\"\"\n ----------\n Author: Damon Gwinn\n ----------\n Grabs the default device. Default device is CUDA if available and use_cuda is not False, CPU otherwise.\n ----------\n \"\"\"\n\n if((not USE_CUDA) or (TORCH_CUDA_DEVICE is None)):\n return TORCH_CPU_DEVICE\n else:\n return TORCH_CUDA_DEVICE" }, { "identifier": "use_cuda", "path": "utilities/device.py", "snippet": "def use_cuda(cuda_bool):\n \"\"\"\n ----------\n Author: Damon Gwinn\n ----------\n Sets whether to use CUDA (if available), or use the CPU (not recommended)\n ----------\n \"\"\"\n\n global USE_CUDA\n USE_CUDA = cuda_bool" }, { "identifier": "LrStepTracker", "path": "utilities/lr_scheduling.py", "snippet": "class LrStepTracker:\n \"\"\"\n ----------\n Author: Ryan Marshall\n Modified: Damon Gwinn\n ----------\n Class for custom learn rate scheduler (to be used by torch.optim.lr_scheduler.LambdaLR).\n\n Learn rate for each step (batch) given the warmup steps is:\n lr = [ 1/sqrt(d_model) ] * min[ 1/sqrt(step) , step * (warmup_steps)^-1.5 ]\n\n This is from Attention is All you Need (https://arxiv.org/abs/1706.03762)\n ----------\n \"\"\"\n\n def __init__(self, model_dim=512, warmup_steps=4000, init_steps=0):\n # Store Values\n self.warmup_steps = warmup_steps\n self.model_dim = model_dim\n self.init_steps = init_steps\n\n # Begin Calculations\n self.invsqrt_dim = (1 / math.sqrt(model_dim))\n self.invsqrt_warmup = (1 / (warmup_steps * math.sqrt(warmup_steps)))\n\n # step\n def step(self, step):\n \"\"\"\n ----------\n Author: Ryan Marshall\n Modified: Damon Gwinn\n ----------\n Method to pass to LambdaLR. Increments the step and computes the new learn rate.\n ----------\n \"\"\"\n\n step += self.init_steps\n if(step <= self.warmup_steps):\n return self.invsqrt_dim * self.invsqrt_warmup * step\n else:\n invsqrt_step = (1 / math.sqrt(step))\n return self.invsqrt_dim * invsqrt_step" }, { "identifier": "get_lr", "path": "utilities/lr_scheduling.py", "snippet": "def get_lr(optimizer):\n \"\"\"\n ----------\n Author: Damon Gwinn\n ----------\n Hack to get the current learn rate of the model\n ----------\n \"\"\"\n\n for param_group in optimizer.param_groups:\n return param_group['lr']" }, { "identifier": "parse_train_args", "path": "utilities/argument_funcs.py", "snippet": "def parse_train_args():\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"-dataset_dir\", type=str, default=\"./dataset/\", help=\"Folder of VEVO dataset\")\n \n parser.add_argument(\"-input_dir_music\", type=str, default=\"./dataset/vevo_chord/\" + MUSIC_TYPE, help=\"Folder of video CNN feature files\")\n parser.add_argument(\"-input_dir_video\", type=str, default=\"./dataset/vevo_vis\", help=\"Folder of video CNN feature files\")\n\n parser.add_argument(\"-output_dir\", type=str, default=\"./saved_models\", help=\"Folder to save model weights. Saves one every epoch\")\n \n parser.add_argument(\"-weight_modulus\", type=int, default=1, help=\"How often to save epoch weights (ex: value of 10 means save every 10 epochs)\")\n parser.add_argument(\"-print_modulus\", type=int, default=1, help=\"How often to print train results for a batch (batch loss, learn rate, etc.)\")\n parser.add_argument(\"-n_workers\", type=int, default=1, help=\"Number of threads for the dataloader\")\n parser.add_argument(\"--force_cpu\", action=\"store_true\", help=\"Forces model to run on a cpu even when gpu is available\")\n parser.add_argument(\"--no_tensorboard\", action=\"store_true\", help=\"Turns off tensorboard result reporting\")\n parser.add_argument(\"-continue_weights\", type=str, default=None, help=\"Model weights to continue training based on\")\n parser.add_argument(\"-continue_epoch\", type=int, default=None, help=\"Epoch the continue_weights model was at\")\n parser.add_argument(\"-lr\", type=float, default=None, help=\"Constant learn rate. Leave as None for a custom scheduler.\")\n parser.add_argument(\"-ce_smoothing\", type=float, default=None, help=\"Smoothing parameter for smoothed cross entropy loss (defaults to no smoothing)\")\n parser.add_argument(\"-batch_size\", type=int, default=1, help=\"Batch size to use\")\n parser.add_argument(\"-epochs\", type=int, default=5, help=\"Number of epochs to use\")\n\n parser.add_argument(\"-max_sequence_midi\", type=int, default=2048, help=\"Maximum midi sequence to consider\")\n parser.add_argument(\"-max_sequence_video\", type=int, default=300, help=\"Maximum video sequence to consider\")\n parser.add_argument(\"-max_sequence_chord\", type=int, default=300, help=\"Maximum video sequence to consider\")\n\n parser.add_argument(\"-n_layers\", type=int, default=6, help=\"Number of decoder layers to use\")\n parser.add_argument(\"-num_heads\", type=int, default=8, help=\"Number of heads to use for multi-head attention\")\n parser.add_argument(\"-d_model\", type=int, default=512, help=\"Dimension of the model (output dim of embedding layers, etc.)\")\n parser.add_argument(\"-dim_feedforward\", type=int, default=1024, help=\"Dimension of the feedforward layer\")\n parser.add_argument(\"-dropout\", type=float, default=0.1, help=\"Dropout rate\")\n\n parser.add_argument(\"-is_video\", type=bool, default=IS_VIDEO, help=\"MusicTransformer or VideoMusicTransformer\")\n\n if IS_VIDEO:\n parser.add_argument(\"-vis_models\", type=str, default=VIS_MODELS_SORTED, help=\"...\")\n else:\n parser.add_argument(\"-vis_models\", type=str, default=\"\", help=\"...\")\n\n parser.add_argument(\"-emo_model\", type=str, default=\"6c_l14p\", help=\"...\")\n parser.add_argument(\"-rpr\", type=bool, default=RPR, help=\"...\")\n return parser.parse_args()" }, { "identifier": "print_train_args", "path": "utilities/argument_funcs.py", "snippet": "def print_train_args(args):\n print(SEPERATOR)\n \n print(\"dataset_dir:\", args.dataset_dir )\n \n print(\"input_dir_music:\", args.input_dir_music)\n print(\"input_dir_video:\", args.input_dir_video)\n\n print(\"output_dir:\", args.output_dir)\n\n print(\"weight_modulus:\", args.weight_modulus)\n print(\"print_modulus:\", args.print_modulus)\n print(\"\")\n print(\"n_workers:\", args.n_workers)\n print(\"force_cpu:\", args.force_cpu)\n print(\"tensorboard:\", not args.no_tensorboard)\n print(\"\")\n print(\"continue_weights:\", args.continue_weights)\n print(\"continue_epoch:\", args.continue_epoch)\n print(\"\")\n print(\"lr:\", args.lr)\n print(\"ce_smoothing:\", args.ce_smoothing)\n print(\"batch_size:\", args.batch_size)\n print(\"epochs:\", args.epochs)\n print(\"\")\n print(\"rpr:\", args.rpr)\n\n print(\"max_sequence_midi:\", args.max_sequence_midi)\n print(\"max_sequence_video:\", args.max_sequence_video)\n print(\"max_sequence_chord:\", args.max_sequence_chord)\n \n print(\"n_layers:\", args.n_layers)\n print(\"num_heads:\", args.num_heads)\n print(\"d_model:\", args.d_model)\n print(\"\")\n print(\"dim_feedforward:\", args.dim_feedforward)\n print(\"dropout:\", args.dropout)\n print(\"is_video:\", args.is_video)\n\n print(SEPERATOR)\n print(\"\")" }, { "identifier": "write_model_params", "path": "utilities/argument_funcs.py", "snippet": "def write_model_params(args, output_file):\n o_stream = open(output_file, \"w\")\n\n o_stream.write(\"rpr: \" + str(args.rpr) + \"\\n\")\n o_stream.write(\"lr: \" + str(args.lr) + \"\\n\")\n o_stream.write(\"ce_smoothing: \" + str(args.ce_smoothing) + \"\\n\")\n o_stream.write(\"batch_size: \" + str(args.batch_size) + \"\\n\")\n\n o_stream.write(\"max_sequence_midi: \" + str(args.max_sequence_midi) + \"\\n\")\n o_stream.write(\"max_sequence_video: \" + str(args.max_sequence_video) + \"\\n\")\n o_stream.write(\"max_sequence_chord: \" + str(args.max_sequence_chord) + \"\\n\")\n \n o_stream.write(\"n_layers: \" + str(args.n_layers) + \"\\n\")\n o_stream.write(\"num_heads: \" + str(args.num_heads) + \"\\n\")\n o_stream.write(\"d_model: \" + str(args.d_model) + \"\\n\")\n o_stream.write(\"dim_feedforward: \" + str(args.dim_feedforward) + \"\\n\")\n o_stream.write(\"dropout: \" + str(args.dropout) + \"\\n\")\n\n o_stream.write(\"is_video: \" + str(args.is_video) + \"\\n\")\n o_stream.write(\"vis_models: \" + str(args.vis_models) + \"\\n\")\n o_stream.write(\"input_dir_music: \" + str(args.input_dir_music) + \"\\n\")\n o_stream.write(\"input_dir_video: \" + str(args.input_dir_video) + \"\\n\")\n\n o_stream.close()" }, { "identifier": "train_epoch", "path": "utilities/run_model_vevo.py", "snippet": "def train_epoch(cur_epoch, model, dataloader, \n train_loss_func, train_loss_emotion_func,\n opt, lr_scheduler=None, print_modulus=1, isVideo=True):\n \n loss_chord = -1\n loss_emotion = -1\n model.train()\n for batch_num, batch in enumerate(dataloader):\n time_before = time.time()\n opt.zero_grad()\n\n x = batch[\"x\"].to(get_device())\n tgt = batch[\"tgt\"].to(get_device())\n x_root = batch[\"x_root\"].to(get_device())\n tgt_root = batch[\"tgt_root\"].to(get_device())\n x_attr = batch[\"x_attr\"].to(get_device())\n tgt_attr = batch[\"tgt_attr\"].to(get_device())\n tgt_emotion = batch[\"tgt_emotion\"].to(get_device())\n tgt_emotion_prob = batch[\"tgt_emotion_prob\"].to(get_device())\n \n feature_semantic_list = [] \n for feature_semantic in batch[\"semanticList\"]:\n feature_semantic_list.append( feature_semantic.to(get_device()) )\n\n feature_key = batch[\"key\"].to(get_device())\n feature_scene_offset = batch[\"scene_offset\"].to(get_device())\n feature_motion = batch[\"motion\"].to(get_device())\n feature_emotion = batch[\"emotion\"].to(get_device())\n\n if isVideo:\n # use VideoMusicTransformer\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = train_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = train_loss_func.forward(y_attr, tgt_attr)\n loss_chord = loss_chord_root + loss_chord_attr\n\n first_14 = tgt_emotion[:, :14]\n last_2 = tgt_emotion[:, -2:]\n tgt_emotion_attr = torch.cat((first_14, last_2), dim=1)\n\n loss_emotion = train_loss_emotion_func.forward(y_attr, tgt_emotion_attr)\n\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n total_loss.backward()\n opt.step()\n if(lr_scheduler is not None):\n lr_scheduler.step()\n \n else:\n #videomusic tran nosep\n y = model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n tgt_emotion = tgt_emotion.squeeze()\n loss_chord = train_loss_func.forward(y, tgt)\n loss_emotion = train_loss_emotion_func.forward(y, tgt_emotion)\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n total_loss.backward()\n opt.step()\n if(lr_scheduler is not None):\n lr_scheduler.step()\n\n else:\n # music transformer\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_key)\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = train_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = train_loss_func.forward(y_attr, tgt_attr)\n\n loss_chord = loss_chord_root + loss_chord_attr\n loss_emotion = -1\n \n total_loss = loss_chord\n total_loss.backward()\n opt.step()\n if(lr_scheduler is not None):\n lr_scheduler.step()\n else:\n # use MusicTransformer (no sep)\n y = model(x,\n x_root,\n x_attr,\n feature_key)\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n\n loss_chord = train_loss_func.forward(y, tgt)\n loss_emotion = -1\n\n total_loss = loss_chord\n total_loss.backward()\n\n opt.step()\n\n if(lr_scheduler is not None):\n lr_scheduler.step()\n\n time_after = time.time()\n time_took = time_after - time_before\n \n if((batch_num+1) % print_modulus == 0):\n print(SEPERATOR)\n print(\"Epoch\", cur_epoch, \" Batch\", batch_num+1, \"/\", len(dataloader))\n print(\"LR:\", get_lr(opt))\n print(\"Train loss (total):\", float(total_loss))\n print(\"Train loss (chord):\", float(loss_chord))\n print(\"Train loss (emotion):\", float(loss_emotion))\n print(\"\")\n print(\"Time (s):\", time_took)\n print(SEPERATOR)\n print(\"\")\n return" }, { "identifier": "eval_model", "path": "utilities/run_model_vevo.py", "snippet": "def eval_model(model, dataloader, \n eval_loss_func, eval_loss_emotion_func,\n isVideo = True, isGenConfusionMatrix=False):\n model.eval()\n avg_acc = -1\n avg_cor = -1\n avg_acc_cor = -1\n\n avg_h1 = -1\n avg_h3 = -1\n avg_h5 = -1\n \n avg_loss_chord = -1\n avg_loss_emotion = -1\n avg_total_loss = -1\n\n true_labels = []\n true_root_labels = []\n true_attr_labels = []\n \n pred_labels = []\n pred_root_labels = []\n pred_attr_labels = []\n \n with torch.set_grad_enabled(False):\n n_test = len(dataloader)\n n_test_cor = 0 \n\n sum_loss_chord = 0.0\n sum_loss_emotion = 0.0\n sum_total_loss = 0.0\n\n sum_acc = 0.0\n sum_cor = 0.0\n\n sum_h1 = 0.0\n sum_h3 = 0.0\n sum_h5 = 0.0\n \n for batch in dataloader:\n x = batch[\"x\"].to(get_device())\n tgt = batch[\"tgt\"].to(get_device())\n x_root = batch[\"x_root\"].to(get_device())\n tgt_root = batch[\"tgt_root\"].to(get_device())\n x_attr = batch[\"x_attr\"].to(get_device())\n tgt_attr = batch[\"tgt_attr\"].to(get_device())\n tgt_emotion = batch[\"tgt_emotion\"].to(get_device())\n tgt_emotion_prob = batch[\"tgt_emotion_prob\"].to(get_device())\n \n feature_semantic_list = [] \n for feature_semantic in batch[\"semanticList\"]:\n feature_semantic_list.append( feature_semantic.to(get_device()) )\n \n feature_key = batch[\"key\"].to(get_device())\n feature_scene_offset = batch[\"scene_offset\"].to(get_device())\n feature_motion = batch[\"motion\"].to(get_device())\n feature_emotion = batch[\"emotion\"].to(get_device())\n\n if isVideo:\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n\n sum_acc += float(compute_vevo_accuracy_root_attr(y_root, y_attr, tgt))\n cor = float(compute_vevo_correspondence_root_attr(y_root, y_attr, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,1))\n sum_h3 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,3))\n sum_h5 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,5))\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = eval_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = eval_loss_func.forward(y_attr, tgt_attr)\n loss_chord = loss_chord_root + loss_chord_attr\n\n first_14 = tgt_emotion[:, :14]\n last_2 = tgt_emotion[:, -2:]\n tgt_emotion_attr = torch.cat((first_14, last_2), dim=1)\n\n loss_emotion = eval_loss_emotion_func.forward(y_attr, tgt_emotion_attr)\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n else:\n y= model(x,\n x_root,\n x_attr,\n feature_semantic_list, \n feature_key, \n feature_scene_offset,\n feature_motion,\n feature_emotion)\n \n sum_acc += float(compute_vevo_accuracy(y, tgt ))\n cor = float(compute_vevo_correspondence(y, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k(y, tgt,1))\n sum_h3 += float(compute_hits_k(y, tgt,3))\n sum_h5 += float(compute_hits_k(y, tgt,5))\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n\n tgt = tgt.flatten()\n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n \n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord = eval_loss_func.forward(y, tgt)\n loss_emotion = eval_loss_emotion_func.forward(y, tgt_emotion)\n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n\n if isGenConfusionMatrix:\n pred = y.argmax(dim=1).detach().cpu().numpy()\n pred_root = []\n pred_attr = []\n\n for i in pred:\n if i == 0:\n pred_root.append(0)\n pred_attr.append(0)\n elif i == 157:\n pred_root.append(CHORD_ROOT_END)\n pred_attr.append(CHORD_ATTR_END)\n elif i == 158:\n pred_root.append(CHORD_ROOT_PAD)\n pred_attr.append(CHORD_ATTR_PAD)\n else:\n rootindex = int( (i-1)/13 ) + 1\n attrindex = (i-1)%13 + 1\n pred_root.append(rootindex)\n pred_attr.append(attrindex)\n \n pred_root = np.array(pred_root)\n pred_attr = np.array(pred_attr)\n\n true = tgt.detach().cpu().numpy()\n true_root = tgt_root.detach().cpu().numpy()\n true_attr = tgt_attr.detach().cpu().numpy()\n \n pred_labels.extend(pred)\n pred_root_labels.extend(pred_root)\n pred_attr_labels.extend(pred_attr)\n \n true_labels.extend(true)\n true_root_labels.extend(true_root)\n true_attr_labels.extend(true_attr)\n else:\n if IS_SEPERATED:\n y_root, y_attr = model(x,\n x_root,\n x_attr,\n feature_key)\n\n sum_acc += float(compute_vevo_accuracy_root_attr(y_root, y_attr, tgt))\n cor = float(compute_vevo_correspondence_root_attr(y_root, y_attr, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,1))\n sum_h3 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,3))\n sum_h5 += float(compute_hits_k_root_attr(y_root, y_attr, tgt,5))\n \n y_root = y_root.reshape(y_root.shape[0] * y_root.shape[1], -1)\n y_attr = y_attr.reshape(y_attr.shape[0] * y_attr.shape[1], -1)\n \n tgt_root = tgt_root.flatten()\n tgt_attr = tgt_attr.flatten()\n tgt_emotion = tgt_emotion.squeeze()\n\n loss_chord_root = eval_loss_func.forward(y_root, tgt_root)\n loss_chord_attr = eval_loss_func.forward(y_attr, tgt_attr)\n loss_chord = loss_chord_root + loss_chord_attr\n\n first_14 = tgt_emotion[:, :14]\n last_2 = tgt_emotion[:, -2:]\n tgt_emotion_attr = torch.cat((first_14, last_2), dim=1)\n loss_emotion = eval_loss_emotion_func.forward(y_attr, tgt_emotion_attr)\n \n total_loss = LOSS_LAMBDA * loss_chord + (1-LOSS_LAMBDA) * loss_emotion\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n else:\n # use MusicTransformer no sep\n y = model(x,\n x_root,\n x_attr,\n feature_key)\n \n sum_acc += float(compute_vevo_accuracy(y, tgt ))\n cor = float(compute_vevo_correspondence(y, tgt, tgt_emotion, tgt_emotion_prob, EMOTION_THRESHOLD))\n \n if cor >= 0 :\n n_test_cor +=1\n sum_cor += cor\n\n sum_h1 += float(compute_hits_k(y, tgt,1))\n sum_h3 += float(compute_hits_k(y, tgt,3))\n sum_h5 += float(compute_hits_k(y, tgt,5))\n\n tgt_emotion = tgt_emotion.squeeze()\n \n y = y.reshape(y.shape[0] * y.shape[1], -1)\n tgt = tgt.flatten()\n loss_chord = eval_loss_func.forward(y, tgt)\n loss_emotion = eval_loss_emotion_func.forward(y, tgt_emotion)\n total_loss = loss_chord\n\n sum_loss_chord += float(loss_chord)\n sum_loss_emotion += float(loss_emotion)\n sum_total_loss += float(total_loss)\n\n avg_loss_chord = sum_loss_chord / n_test\n avg_loss_emotion = sum_loss_emotion / n_test\n avg_total_loss = sum_total_loss / n_test\n\n avg_acc = sum_acc / n_test\n avg_cor = sum_cor / n_test_cor\n \n avg_h1 = sum_h1 / n_test\n avg_h3 = sum_h3 / n_test\n avg_h5 = sum_h5 / n_test\n \n avg_acc_cor = (avg_acc + avg_cor)/ 2.0\n\n if isGenConfusionMatrix:\n chordInvDicPath = \"./dataset/vevo_meta/chord_inv.json\"\n chordRootInvDicPath = \"./dataset/vevo_meta/chord_root_inv.json\"\n chordAttrInvDicPath = \"./dataset/vevo_meta/chord_attr_inv.json\"\n \n with open(chordInvDicPath) as json_file:\n chordInvDic = json.load(json_file)\n with open(chordRootInvDicPath) as json_file:\n chordRootInvDic = json.load(json_file)\n with open(chordAttrInvDicPath) as json_file:\n chordAttrInvDic = json.load(json_file)\n\n # Confusion matrix (CHORD)\n topChordList = []\n with open(\"./dataset/vevo_meta/top_chord.txt\", encoding = 'utf-8') as f:\n for line in f:\n line = line.strip()\n line_arr = line.split(\" \")\n if len(line_arr) == 3 :\n chordID = line_arr[1]\n topChordList.append( int(chordID) )\n topChordList = np.array(topChordList)\n topChordList = topChordList[:10]\n mask = np.isin(true_labels, topChordList)\n true_labels = np.array(true_labels)[mask]\n pred_labels = np.array(pred_labels)[mask]\n\n conf_matrix = confusion_matrix(true_labels, pred_labels, labels=topChordList)\n label_names = [ chordInvDic[str(label_id)] for label_id in topChordList ]\n \n plt.figure(figsize=(8, 6))\n plt.imshow(conf_matrix, cmap=plt.cm.Blues)\n plt.title(\"Confusion Matrix\")\n plt.colorbar()\n tick_marks = np.arange(len(topChordList))\n plt.xticks(tick_marks, label_names, rotation=45)\n plt.yticks(tick_marks, label_names)\n thresh = conf_matrix.max() / 2.0\n for i in range(conf_matrix.shape[0]):\n for j in range(conf_matrix.shape[1]):\n plt.text(j, i, format(conf_matrix[i, j], 'd'),\n ha=\"center\", va=\"center\",\n color=\"white\" if conf_matrix[i, j] > thresh else \"black\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig(\"confusion_matrix.png\")\n plt.show()\n\n # Confusion matrix (CHORD ROOT) \n chordRootList = np.arange(1, 13)\n conf_matrix = confusion_matrix(true_root_labels, pred_root_labels, labels= chordRootList )\n \n label_names = [ chordRootInvDic[str(label_id)] for label_id in chordRootList ]\n \n plt.figure(figsize=(8, 6))\n plt.imshow(conf_matrix, cmap=plt.cm.Blues)\n plt.title(\"Confusion Matrix (Chord root)\")\n plt.colorbar()\n tick_marks = np.arange(len(chordRootList))\n plt.xticks(tick_marks, label_names, rotation=45)\n plt.yticks(tick_marks, label_names)\n thresh = conf_matrix.max() / 2.0\n for i in range(conf_matrix.shape[0]):\n for j in range(conf_matrix.shape[1]):\n plt.text(j, i, format(conf_matrix[i, j], 'd'),\n ha=\"center\", va=\"center\",\n color=\"white\" if conf_matrix[i, j] > thresh else \"black\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig(\"confusion_matrix_root.png\")\n plt.show()\n\n # Confusion matrix (CHORD ATTR)\n chordAttrList = np.arange(1, 14)\n conf_matrix = confusion_matrix(true_attr_labels, pred_attr_labels, labels= chordAttrList )\n \n label_names = [ chordAttrInvDic[str(label_id)] for label_id in chordAttrList ]\n \n plt.figure(figsize=(8, 6))\n plt.imshow(conf_matrix, cmap=plt.cm.Blues)\n plt.title(\"Confusion Matrix (Chord quality)\")\n plt.colorbar()\n tick_marks = np.arange(len(chordAttrList))\n plt.xticks(tick_marks, label_names, rotation=45)\n plt.yticks(tick_marks, label_names)\n thresh = conf_matrix.max() / 2.0\n for i in range(conf_matrix.shape[0]):\n for j in range(conf_matrix.shape[1]):\n plt.text(j, i, format(conf_matrix[i, j], 'd'),\n ha=\"center\", va=\"center\",\n color=\"white\" if conf_matrix[i, j] > thresh else \"black\")\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n plt.savefig(\"confusion_matrix_quality.png\")\n plt.show()\n\n return { \"avg_total_loss\" : avg_total_loss, \n \"avg_loss_chord\" : avg_loss_chord, \n \"avg_loss_emotion\": avg_loss_emotion, \n \"avg_acc\" : avg_acc, \n \"avg_cor\" : avg_cor, \n \"avg_acc_cor\" : avg_acc_cor, \n \"avg_h1\" : avg_h1, \n \"avg_h3\" : avg_h3,\n \"avg_h5\" : avg_h5 }" } ]
import os import csv import shutil import torch import torch.nn as nn from torch.optim.lr_scheduler import LambdaLR from torch.utils.data import DataLoader from torch.optim import Adam from dataset.vevo_dataset import compute_vevo_accuracy, create_vevo_datasets from model.music_transformer import MusicTransformer from model.video_music_transformer import VideoMusicTransformer from model.loss import SmoothCrossEntropyLoss from utilities.constants import * from utilities.device import get_device, use_cuda from utilities.lr_scheduling import LrStepTracker, get_lr from utilities.argument_funcs import parse_train_args, print_train_args, write_model_params from utilities.run_model_vevo import train_epoch, eval_model from torch.utils.tensorboard import SummaryWriter
14,509
if args.is_video: for vf in train_dataset[0]["semanticList"]: total_vf_dim += vf.shape[1] total_vf_dim += 1 # Scene_offset total_vf_dim += 1 # Motion # Emotion if args.emo_model.startswith("6c"): total_vf_dim += 6 else: total_vf_dim += 5 train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) if args.is_video: model = VideoMusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_video=args.max_sequence_video, max_sequence_chord=args.max_sequence_chord, total_vf_dim=total_vf_dim, rpr=args.rpr).to(get_device()) else: model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_chord=args.max_sequence_chord, rpr=args.rpr).to(get_device()) start_epoch = BASELINE_EPOCH if(args.continue_weights is not None): if(args.continue_epoch is None): print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") assert(False) else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif(args.continue_epoch is not None): print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") assert(False) ##### Lr Scheduler vs static lr ##### if(args.lr is None): if(args.continue_epoch is None): init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step) else: lr = args.lr ##### Not smoothing evaluation loss ##### eval_loss_func = nn.CrossEntropyLoss(ignore_index=CHORD_PAD) ##### SmoothCrossEntropyLoss or CrossEntropyLoss for training ##### if(args.ce_smoothing is None): train_loss_func = eval_loss_func else: train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, CHORD_SIZE, ignore_index=CHORD_PAD) eval_loss_emotion_func = nn.BCEWithLogitsLoss() train_loss_emotion_func = eval_loss_emotion_func ##### Optimizer ##### opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) if(args.lr is None): lr_scheduler = LambdaLR(opt, lr_stepper.step) else: lr_scheduler = None ##### Tracking best evaluation loss ##### best_eval_loss = float("inf") best_eval_loss_epoch = -1 ##### Results reporting ##### if(not os.path.isfile(results_file)): with open(results_file, "w", newline="") as o_stream: writer = csv.writer(o_stream) writer.writerow(CSV_HEADER) ##### TRAIN LOOP ##### for epoch in range(start_epoch, args.epochs): if(epoch > BASELINE_EPOCH): print(SEPERATOR) print("NEW EPOCH:", epoch+1) print(SEPERATOR) print("") # Train train_epoch(epoch+1, model, train_loader, train_loss_func, train_loss_emotion_func, opt, lr_scheduler, args.print_modulus, isVideo= args.is_video) print(SEPERATOR) print("Evaluating:") else: print(SEPERATOR) print("Baseline model evaluation (Epoch 0):") train_metric_dict = eval_model(model, train_loader, train_loss_func, train_loss_emotion_func, isVideo= args.is_video) train_total_loss = train_metric_dict["avg_total_loss"] train_loss_chord = train_metric_dict["avg_loss_chord"] train_loss_emotion = train_metric_dict["avg_loss_emotion"] train_h1 = train_metric_dict["avg_h1"] train_h3 = train_metric_dict["avg_h3"] train_h5 = train_metric_dict["avg_h5"] eval_metric_dict = eval_model(model, val_loader, eval_loss_func, eval_loss_emotion_func, isVideo= args.is_video) eval_total_loss = eval_metric_dict["avg_total_loss"] eval_loss_chord = eval_metric_dict["avg_loss_chord"] eval_loss_emotion = eval_metric_dict["avg_loss_emotion"] eval_h1 = eval_metric_dict["avg_h1"] eval_h3 = eval_metric_dict["avg_h3"] eval_h5 = eval_metric_dict["avg_h5"]
CSV_HEADER = ["Epoch", "Learn rate", "Avg Train loss (total)", "Avg Train loss (chord)", "Avg Train loss (emotion)", "Avg Eval loss (total)", "Avg Eval loss (chord)", "Avg Eval loss (emotion)"] BASELINE_EPOCH = -1 version = VERSION split_ver = SPLIT_VER split_path = "split_" + split_ver VIS_MODELS_ARR = [ "2d/clip_l14p" ] # main def main( vm = "" , isPrintArgs = True ): args = parse_train_args() if isPrintArgs: print_train_args(args) if vm != "": args.vis_models = vm if args.is_video: vis_arr = args.vis_models.split(" ") vis_arr.sort() vis_abbr_path = "" for v in vis_arr: vis_abbr_path = vis_abbr_path + "_" + VIS_ABBR_DIC[v] vis_abbr_path = vis_abbr_path[1:] else: vis_abbr_path = "no_video" if(args.force_cpu): use_cuda(False) print("WARNING: Forced CPU usage, expect model to perform slower") print("") os.makedirs( args.output_dir, exist_ok=True) os.makedirs( os.path.join( args.output_dir, version), exist_ok=True) ##### Output prep ##### params_file = os.path.join(args.output_dir, version, "model_params.txt") write_model_params(args, params_file) weights_folder = os.path.join(args.output_dir, version, "weights") os.makedirs(weights_folder, exist_ok=True) results_folder = os.path.join(args.output_dir, version) os.makedirs(results_folder, exist_ok=True) results_file = os.path.join(results_folder, "results.csv") best_loss_file = os.path.join(results_folder, "best_loss_weights.pickle") best_text = os.path.join(results_folder, "best_epochs.txt") ##### Tensorboard ##### if(args.no_tensorboard): tensorboard_summary = None else: tensorboad_dir = os.path.join(args.output_dir, version, "tensorboard") tensorboard_summary = SummaryWriter(log_dir=tensorboad_dir) train_dataset, val_dataset, _ = create_vevo_datasets( dataset_root = "./dataset/", max_seq_chord = args.max_sequence_chord, max_seq_video = args.max_sequence_video, vis_models = args.vis_models, emo_model = args.emo_model, split_ver = SPLIT_VER, random_seq = True, is_video = args.is_video) total_vf_dim = 0 if args.is_video: for vf in train_dataset[0]["semanticList"]: total_vf_dim += vf.shape[1] total_vf_dim += 1 # Scene_offset total_vf_dim += 1 # Motion # Emotion if args.emo_model.startswith("6c"): total_vf_dim += 6 else: total_vf_dim += 5 train_loader = DataLoader(train_dataset, batch_size=args.batch_size, num_workers=args.n_workers, shuffle=True) val_loader = DataLoader(val_dataset, batch_size=args.batch_size, num_workers=args.n_workers) if args.is_video: model = VideoMusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_video=args.max_sequence_video, max_sequence_chord=args.max_sequence_chord, total_vf_dim=total_vf_dim, rpr=args.rpr).to(get_device()) else: model = MusicTransformer(n_layers=args.n_layers, num_heads=args.num_heads, d_model=args.d_model, dim_feedforward=args.dim_feedforward, dropout=args.dropout, max_sequence_midi=args.max_sequence_midi, max_sequence_chord=args.max_sequence_chord, rpr=args.rpr).to(get_device()) start_epoch = BASELINE_EPOCH if(args.continue_weights is not None): if(args.continue_epoch is None): print("ERROR: Need epoch number to continue from (-continue_epoch) when using continue_weights") assert(False) else: model.load_state_dict(torch.load(args.continue_weights)) start_epoch = args.continue_epoch elif(args.continue_epoch is not None): print("ERROR: Need continue weights (-continue_weights) when using continue_epoch") assert(False) ##### Lr Scheduler vs static lr ##### if(args.lr is None): if(args.continue_epoch is None): init_step = 0 else: init_step = args.continue_epoch * len(train_loader) lr = LR_DEFAULT_START lr_stepper = LrStepTracker(args.d_model, SCHEDULER_WARMUP_STEPS, init_step) else: lr = args.lr ##### Not smoothing evaluation loss ##### eval_loss_func = nn.CrossEntropyLoss(ignore_index=CHORD_PAD) ##### SmoothCrossEntropyLoss or CrossEntropyLoss for training ##### if(args.ce_smoothing is None): train_loss_func = eval_loss_func else: train_loss_func = SmoothCrossEntropyLoss(args.ce_smoothing, CHORD_SIZE, ignore_index=CHORD_PAD) eval_loss_emotion_func = nn.BCEWithLogitsLoss() train_loss_emotion_func = eval_loss_emotion_func ##### Optimizer ##### opt = Adam(model.parameters(), lr=lr, betas=(ADAM_BETA_1, ADAM_BETA_2), eps=ADAM_EPSILON) if(args.lr is None): lr_scheduler = LambdaLR(opt, lr_stepper.step) else: lr_scheduler = None ##### Tracking best evaluation loss ##### best_eval_loss = float("inf") best_eval_loss_epoch = -1 ##### Results reporting ##### if(not os.path.isfile(results_file)): with open(results_file, "w", newline="") as o_stream: writer = csv.writer(o_stream) writer.writerow(CSV_HEADER) ##### TRAIN LOOP ##### for epoch in range(start_epoch, args.epochs): if(epoch > BASELINE_EPOCH): print(SEPERATOR) print("NEW EPOCH:", epoch+1) print(SEPERATOR) print("") # Train train_epoch(epoch+1, model, train_loader, train_loss_func, train_loss_emotion_func, opt, lr_scheduler, args.print_modulus, isVideo= args.is_video) print(SEPERATOR) print("Evaluating:") else: print(SEPERATOR) print("Baseline model evaluation (Epoch 0):") train_metric_dict = eval_model(model, train_loader, train_loss_func, train_loss_emotion_func, isVideo= args.is_video) train_total_loss = train_metric_dict["avg_total_loss"] train_loss_chord = train_metric_dict["avg_loss_chord"] train_loss_emotion = train_metric_dict["avg_loss_emotion"] train_h1 = train_metric_dict["avg_h1"] train_h3 = train_metric_dict["avg_h3"] train_h5 = train_metric_dict["avg_h5"] eval_metric_dict = eval_model(model, val_loader, eval_loss_func, eval_loss_emotion_func, isVideo= args.is_video) eval_total_loss = eval_metric_dict["avg_total_loss"] eval_loss_chord = eval_metric_dict["avg_loss_chord"] eval_loss_emotion = eval_metric_dict["avg_loss_emotion"] eval_h1 = eval_metric_dict["avg_h1"] eval_h3 = eval_metric_dict["avg_h3"] eval_h5 = eval_metric_dict["avg_h5"]
lr = get_lr(opt)
8
2023-10-13 09:06:24+00:00
24k
eai-lab/On-NAS
cifar_search.py
[ { "identifier": "genotypes", "path": "utils/genotypes.py", "snippet": "PRIMITIVES = [\n \"max_pool_3x3\",\n \"avg_pool_3x3\",\n \"skip_connect\", # identity\n \"sep_conv_3x3\",\n \"sep_conv_5x5\",\n \"dil_conv_3x3\",\n \"dil_conv_5x5\",\n \"none\",\n]\nPRIMITIVES_FEWSHOT = [\n \"max_pool_3x3\",\n \"avg_pool_3x3\",\n \"skip_connect\", # identity\n \"conv_1x5_5x1\",\n \"conv_3x3\",\n \"sep_conv_3x3\",\n # \"sep_conv_5x5\",\n \"dil_conv_3x3\",\n # \"dil_conv_5x5\",\n # \"none\",\n]\ndef to_dag(C_in, gene, reduction):\ndef from_str(s):\ndef parse(alpha, k, primitives=PRIMITIVES_FEWSHOT):\ndef parse_pairwise(alpha, alpha_pairwise, primitives=PRIMITIVES_FEWSHOT): # deprecated" }, { "identifier": "SearchCNNController", "path": "models/search_cnn.py", "snippet": "class SearchCNNController(nn.Module):\n \"\"\" SearchCNN controller supporting multi-gpu \"\"\"\n def __init__(\n self,\n \n C_in,\n C,\n n_classes,\n n_layers,\n config,\n n_nodes=4,\n reduction_layers=[],\n stem_multiplier=3,\n device_ids=None,\n normalizer=dict(),\n PRIMITIVES=None,\n feature_scale_rate=2,\n use_hierarchical_alphas=False, # deprecated\n use_pairwise_input_alphas=False,\n alpha_prune_threshold=0.0,\n ):\n super().__init__()\n self.n_nodes = n_nodes\n self.criterion = nn.CrossEntropyLoss()\n self.use_pairwise_input_alphas = use_pairwise_input_alphas\n self.use_hierarchical_alphas = use_hierarchical_alphas\n self.alpha_prune_threshold = alpha_prune_threshold\n \n if \"name\" not in normalizer.keys():\n normalizer[\"func\"] = SoftMax\n normalizer[\"params\"] = dict()\n normalizer[\"params\"][\"temp_anneal_mode\"] = None\n elif normalizer[\"name\"] == \"softmax\":\n normalizer[\"func\"] = SoftMax\n elif normalizer[\"name\"] == \"relusoftmax\":\n normalizer[\"func\"] = ReLUSoftMax\n elif normalizer[\"name\"] == \"gumbel_softmax\":\n normalizer[\"func\"] = GumbelSoftMax\n else:\n raise RuntimeError(f\"Unknown normalizer {normalizer['name']}\")\n self.normalizer = normalizer\n\n if device_ids is None:\n device_ids = list(range(torch.cuda.device_count()))\n self.device_ids = device_ids\n\n \n \n # initialize architect parameters: alphas\n if PRIMITIVES is None:\n PRIMITIVES = gt.PRIMITIVES\n\n self.primitives = PRIMITIVES\n n_ops = len(PRIMITIVES)\n\n self.alpha_normal = nn.ParameterList()\n self.alpha_reduce = nn.ParameterList()\n\n \n for i in range(n_nodes):\n # create alpha parameters over parallel operations\n self.alpha_normal.append(nn.Parameter(1e-3 * torch.randn(i + 2, n_ops)))\n self.alpha_reduce.append(nn.Parameter(1e-3 * torch.randn(i + 2, n_ops)))\n \n \n\n \n assert not (\n use_hierarchical_alphas and use_pairwise_input_alphas\n ), \"Hierarchical and pairwise alphas exclude each other.\"\n\n self.alpha_pw_normal = None\n self.alpha_pw_reduce = None\n self.alpha_in_normal = None\n self.alpha_in_reduce = None\n if use_hierarchical_alphas: # deprecated\n # create alpha parameters the different input nodes for a cell, i.e. for each node in a\n # cell an additional distribution over the input nodes is introduced\n print(\"Using hierarchical alphas.\")\n\n self.alpha_in_normal = nn.ParameterList()\n self.alpha_in_reduce = nn.ParameterList()\n\n for i in range(n_nodes):\n self.alpha_in_normal.append(nn.Parameter(1e-3 * torch.randn(i + 2)))\n self.alpha_in_reduce.append(nn.Parameter(1e-3 * torch.randn(i + 2)))\n\n elif use_pairwise_input_alphas:\n print(\"Using pairwise input alphas.\")\n\n self.alpha_pw_normal = nn.ParameterList()\n self.alpha_pw_reduce = nn.ParameterList()\n\n \n for i in range(n_nodes):\n num_comb = int(scipy.special.binom(i + 2, 2))\n self.alpha_pw_normal.append(nn.Parameter(1e-3 * torch.randn(num_comb)))\n self.alpha_pw_reduce.append(nn.Parameter(1e-3 * torch.randn(num_comb)))\n \n \n\n # setup alphas list\n self._alphas = []\n \n for n, p in self.named_parameters():\n if \"alpha\" in n:\n self._alphas.append((n, p))\n\n \n \n self.net = SearchCNN(\n \n C_in,\n C,\n n_classes,\n n_layers,\n config,\n n_nodes,\n reduction_layers,\n stem_multiplier,\n PRIMITIVES=self.primitives,\n feature_scale_rate=feature_scale_rate,\n )\n\n \n\n def apply_normalizer(self, alpha):\n return self.normalizer[\"func\"](alpha, self.normalizer[\"params\"])\n\n def _get_normalized_alphas(self):\n weights_normal = [self.apply_normalizer(alpha) for alpha in self.alpha_normal]\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n\n weights_pw_normal = None\n weights_pw_reduce = None\n weights_in_normal = None\n weights_in_reduce = None\n if self.alpha_in_normal is not None:\n weights_in_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_in_normal\n ]\n weights_in_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_in_reduce\n ]\n elif self.alpha_pw_normal is not None:\n weights_pw_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_normal\n ]\n weights_pw_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_reduce\n ]\n\n return (\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n )\n\n def prune_alphas(self, prune_threshold=0.0, val=-10e8):\n \"\"\"Set the alphas with probability below prune_threshold to a large negative value\n\n Note:\n The prune_threshold applies to the alpha probabilities (after the softmax is\n applied) while `val` corresponds to the logit values (thus a large negative value\n corresponds to a low probability).\n \"\"\"\n\n # reset temperature for prunning\n model_has_normalizer = hasattr(self, \"normalizer\")\n if model_has_normalizer:\n curr_step_backup = self.normalizer[\"params\"][\"curr_step\"]\n self.normalizer[\"params\"][\"curr_step\"] = (\n self.normalizer[\"params\"][\"max_steps\"] - 1\n )\n\n weights_normal = [self.apply_normalizer(alpha) for alpha in self.alpha_normal]\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n for idx in range(len(weights_normal)):\n # need to modify data because alphas are leaf variables\n self.alpha_normal[idx].data[weights_normal[idx] < prune_threshold] = val\n self.alpha_reduce[idx].data[weights_reduce[idx] < prune_threshold] = val\n\n # set curr_step back to original value\n self.normalizer[\"params\"][\"curr_step\"] = curr_step_backup\n\n def get_sparse_alphas_pw(self, alpha_prune_threshold=0.0):\n\n \"\"\"\n Convert alphas to zero-one-vectors under consideration of pairwise alphas\n\n\n :param alpha_prune_threshold: threshold for pruning\n\n :return: binary tensors with shape like alpha_normal and alpha_reduce, indicating whether an op is included in the\n sparsified one shot model\n \"\"\"\n\n assert (\n self.alpha_pw_normal is not None\n ), \"Error: function only availaible for pw models\"\n\n weights_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_normal\n ] # get normalized weights\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n\n weights_pw_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_normal\n ]\n\n weights_pw_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_reduce\n ]\n\n weights_normal_sparse = list()\n\n # get all the pairs of inputs\n for node_idx, node_weights in enumerate(weights_normal):\n input_pairs = list()\n\n # get pairs of inputs correspeonding to indices in alpha_pw\n for input_1 in range(len(node_weights)):\n for input_2 in range(input_1 + 1, len(node_weights)):\n input_pairs.append([input_1, input_2])\n\n assert len(input_pairs) == len(\n weights_pw_normal[node_idx]\n ), \"error: pairwise alpha length does not match pairwise terms length\"\n\n keep_inputs = list() # list of input nodes that are kept\n\n for input_pair_idx in range(len(input_pairs)):\n if (\n weights_pw_normal[node_idx][input_pair_idx] >= alpha_prune_threshold\n ): # if pw weight larger than threshold keep input\n keep_inputs.extend(input_pairs[input_pair_idx])\n\n weights_normal_sparse.append(\n torch.stack(\n [\n (weight >= alpha_prune_threshold).type(torch.float)\n if weight_idx in keep_inputs\n else torch.zeros_like(weight)\n for weight_idx, weight in enumerate(node_weights)\n ]\n )\n )\n\n ### same for reduction\n\n weights_reduce_sparse = list()\n\n for node_idx, node_weights in enumerate(weights_reduce):\n input_pairs = list()\n\n # get pairs of inputs correspeonding to indices in alpha_pw\n for input_1 in range(len(node_weights)):\n for input_2 in range(input_1 + 1, len(node_weights)):\n input_pairs.append([input_1, input_2])\n\n assert len(input_pairs) == len(\n weights_pw_reduce[node_idx]\n ), \"error: pairwise alpha length does not match pairwise terms length\"\n\n keep_inputs = list() # list of input nodes that are kept\n\n for input_pair_idx in range(len(input_pairs)):\n if (\n weights_pw_reduce[node_idx][input_pair_idx] >= alpha_prune_threshold\n ): # if pw weight larger than threshold keep input\n keep_inputs.extend(input_pairs[input_pair_idx])\n\n weights_reduce_sparse.append(\n torch.stack(\n [\n (weight >= alpha_prune_threshold).type(torch.float)\n if weight_idx in keep_inputs\n else torch.zeros_like(weight)\n for weight_idx, weight in enumerate(node_weights)\n ]\n )\n )\n\n return weights_normal_sparse, weights_reduce_sparse\n\n def get_sparse_num_params(self, alpha_prune_threshold=0.0):\n \"\"\"Get number of parameters for sparse one-shot-model\n\n Returns:\n A torch tensor\n \"\"\"\n\n weights_normal, weights_reduce = self.get_sparse_alphas_pw(\n alpha_prune_threshold\n )\n # this returns tensors with only 0's and 1's depending on whether an op is used in the sparsified model\n\n # get none active ops/layer names\n\n # for normal cell\n none_active_ops_normal = list()\n for node_idx, node in enumerate(weights_normal):\n for mixed_op_idx, mixed_op in enumerate(node):\n none_active_ops_idx = (mixed_op == 0.0).nonzero()\n for op in none_active_ops_idx:\n none_active_ops_normal.append(\n str(node_idx)\n + \".\"\n + str(mixed_op_idx)\n + \"._ops.\"\n + str(int(op))\n )\n\n # and for reduction cell\n none_active_ops_reduce = list()\n for node_idx, node in enumerate(weights_reduce):\n for mixed_op_idx, mixed_op in enumerate(node):\n none_active_ops_idx = (mixed_op == 0.0).nonzero()\n for op in none_active_ops_idx:\n none_active_ops_reduce.append(\n str(node_idx)\n + \".\"\n + str(mixed_op_idx)\n + \"._ops.\"\n + str(int(op))\n )\n\n all_params = sum(\n p.numel() for p in self.net.parameters()\n ) # params of one-shot model\n\n # get normal and reduction layers\n normal_cells = list()\n red_cells = list()\n for lyr, cell in enumerate(self.net.cells):\n if cell.reduction:\n red_cells.append(lyr)\n else:\n normal_cells.append(lyr)\n\n # count params of non-active ops\n\n none_active_params = 0\n for layer_name, layer_weights in self.named_parameters():\n # check if layer is part of normal or reduction cell\n if \"net.cells.\" in layer_name: # layer part of cells at all?\n for cell in normal_cells: # normal cell?\n if \"net.cells.\" + str(cell) in layer_name: # normal cell\n none_active_ops = none_active_ops_normal\n\n # else reduction cell\n for cell in red_cells:\n if \"net.cells.\" + str(cell) in layer_name: # normal cell\n none_active_ops = none_active_ops_reduce\n\n if any(\n [none_active_op in layer_name for none_active_op in none_active_ops]\n ): # check if layer is part of none-active ops\n none_active_params += layer_weights.numel()\n\n active_params = all_params - none_active_params\n\n return active_params\n\n def drop_path_prob(self, p):\n \"\"\" Set drop path probability \"\"\"\n for module in self.net.modules():\n if isinstance(module, ops.DropPath_):\n module.p = p\n def forward(self, x, sparsify_input_alphas=None):\n \"\"\"Forward pass through the network\n\n Args:\n x: The input tensor\n sparsify_input_alphas: Whether to sparsify the alphas over the input nodes. Use `None`\n to not sparsify input alphas.\n For hierarchical alphas, `sparsify_input_alphas` should be a (float) threshold on\n the probability (i.e. between 0 and 1). Alphas above the threshold (and thus the\n corresponding input nodes) are kept.\n For pairwise alphas, if `sparsify_input_alphas` is larger than 0, then only the\n largest alpha is kept.\n Note that the sparsification is not be differentiable and thus cannot be used during\n training.\n\n Returns:\n The network output\n \"\"\"\n (\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n ) = self._get_normalized_alphas()\n\n \n if len(self.device_ids) == 1 :\n output= self.net(\n x,\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n sparsify_input_alphas=sparsify_input_alphas,\n alpha_prune_threshold=self.alpha_prune_threshold,\n )\n return output\n\n \n # scatter x\n xs = nn.parallel.scatter(x, self.device_ids)\n # broadcast weights\n wnormal_copies = broadcast_list(weights_normal, self.device_ids)\n wreduce_copies = broadcast_list(weights_reduce, self.device_ids)\n if weights_in_normal is not None:\n wnormal_in_copies = broadcast_list(weights_in_normal, self.device_ids)\n wreduce_in_copies = broadcast_list(weights_in_reduce, self.device_ids)\n else:\n \n wnormal_in_copies = None\n wreduce_in_copies = None\n\n if weights_pw_normal is not None:\n wnormal_pw_copies = broadcast_list(weights_pw_normal, self.device_ids)\n wreduce_pw_copies = broadcast_list(weights_pw_reduce, self.device_ids)\n else:\n wnormal_pw_copies = None\n wreduce_pw_copies = None\n\n # replicate modules\n replicas = nn.parallel.replicate(self.net, self.device_ids)\n outputs = nn.parallel.parallel_apply(\n replicas,\n list(\n zip(\n xs,\n wnormal_copies,\n wreduce_copies,\n # wnormal_in_copies,\n # wreduce_in_copies,\n # wnormal_pw_copies,\n # wreduce_pw_copies,\n )\n ),\n devices=self.device_ids,\n )\n return nn.parallel.gather(outputs, self.device_ids[0])\n\n def loss(self, X, y):\n logits = self.forward(X)\n return self.criterion(logits, y)\n\n def print_alphas(self, logger):\n # remove formats\n org_formatters = []\n for handler in logger.handlers:\n org_formatters.append(handler.formatter)\n handler.setFormatter(logging.Formatter(\"%(message)s\"))\n\n normalizer = self.get_normalizer(deterministic=True)\n logger.info(\"####### ALPHA #######\")\n logger.info(\"# Alpha - normal\")\n for alpha in self.alpha_normal:\n logger.info(normalizer(alpha))\n\n logger.info(\"\\n# Alpha - reduce\")\n for alpha in self.alpha_reduce:\n logger.info(normalizer(alpha))\n logger.info(\"#####################\")\n\n # restore formats\n for handler, formatter in zip(logger.handlers, org_formatters):\n handler.setFormatter(formatter)\n\n def genotype(self):\n if self.use_pairwise_input_alphas:\n\n weights_pw_normal = [\n F.softmax(alpha, dim=-1) for alpha in self.alpha_pw_normal\n ]\n weights_pw_reduce = [\n F.softmax(alpha, dim=-1) for alpha in self.alpha_pw_reduce\n ]\n\n gene_normal = gt.parse_pairwise(\n self.alpha_normal, weights_pw_normal, primitives=self.primitives\n )\n gene_reduce = gt.parse_pairwise(\n self.alpha_reduce, weights_pw_reduce, primitives=self.primitives\n )\n\n elif self.use_hierarchical_alphas:\n raise NotImplementedError\n else:\n\n gene_normal = gt.parse(self.alpha_normal, k=2, primitives=self.primitives)\n gene_reduce = gt.parse(self.alpha_reduce, k=2, primitives=self.primitives)\n\n concat = range(2, 2 + self.n_nodes) # concat all intermediate nodes\n\n return gt.Genotype(\n normal=gene_normal,\n normal_concat=concat,\n reduce=gene_reduce,\n reduce_concat=concat,\n )\n\n def weights(self):\n return self.net.parameters()\n\n def named_weights(self):\n return self.net.named_parameters()\n\n def named_weights_with_net(self):\n return self.named_parameters()\n\n def alphas(self):\n for n, p in self._alphas:\n yield p\n\n def named_alphas(self):\n for n, p in self._alphas:\n yield n, p" }, { "identifier": "SearchCNNControllerPC", "path": "models/search_cnn_PC.py", "snippet": "class SearchCNNControllerPC(nn.Module):\n \"\"\" SearchCNN controller supporting multi-gpu \"\"\"\n\n def __init__(\n self,\n C_in,\n C,\n n_classes,\n n_layers,\n n_nodes=4,\n reduction_layers=[],\n stem_multiplier=3,\n device_ids=None,\n normalizer=dict(),\n PRIMITIVES=None,\n feature_scale_rate=2,\n use_hierarchical_alphas=False, # deprecated\n use_pairwise_input_alphas=False,\n use_pc_adaptation=False,\n alpha_prune_threshold=0.0,\n ):\n super().__init__()\n self.n_nodes = n_nodes\n self.criterion = nn.CrossEntropyLoss()\n self.use_pairwise_input_alphas = use_pairwise_input_alphas\n self.use_hierarchical_alphas = use_hierarchical_alphas\n self.alpha_prune_threshold = alpha_prune_threshold\n self.use_pc_adaptation = use_pc_adaptation\n if \"name\" not in normalizer.keys():\n normalizer[\"func\"] = SoftMax\n normalizer[\"params\"] = dict()\n normalizer[\"params\"][\"temp_anneal_mode\"] = None\n elif normalizer[\"name\"] == \"softmax\":\n normalizer[\"func\"] = SoftMax\n elif normalizer[\"name\"] == \"relusoftmax\":\n normalizer[\"func\"] = ReLUSoftMax\n elif normalizer[\"name\"] == \"gumbel_softmax\":\n normalizer[\"func\"] = GumbelSoftMax\n else:\n raise RuntimeError(f\"Unknown normalizer {normalizer['name']}\")\n self.normalizer = normalizer\n\n if device_ids is None:\n device_ids = list(range(torch.cuda.device_count()))\n self.device_ids = device_ids\n\n # initialize architect parameters: alphas\n if PRIMITIVES is None:\n PRIMITIVES = gt.PRIMITIVES\n\n self.primitives = PRIMITIVES\n n_ops = len(PRIMITIVES)\n\n self.alpha_normal = nn.ParameterList()\n self.alpha_reduce = nn.ParameterList()\n\n\n self.pc_beta_normal = nn.ParameterList()\n self.pc_beta_reduce = nn.ParameterList()\n\n for i in range(n_nodes):\n # create alpha parameters over parallel operations\n self.alpha_normal.append(nn.Parameter(1e-3 * torch.randn(i + 2, n_ops)))\n self.alpha_reduce.append(nn.Parameter(1e-3 * torch.randn(i + 2, n_ops)))\n\n assert not (\n use_hierarchical_alphas and use_pairwise_input_alphas\n ), \"Hierarchical and pairwise alphas exclude each other.\"\n\n self.alpha_pw_normal = None\n self.alpha_pw_reduce = None\n self.alpha_in_normal = None\n self.alpha_in_reduce = None\n self.pc_alpha_normal = None\n self.pc_alpha_reduce = None \n\n if use_hierarchical_alphas: # deprecated\n # create alpha parameters the different input nodes for a cell, i.e. for each node in a\n # cell an additional distribution over the input nodes is introduced\n print(\"Using hierarchical alphas.\")\n\n self.alpha_in_normal = nn.ParameterList()\n self.alpha_in_reduce = nn.ParameterList()\n\n for i in range(n_nodes):\n self.alpha_in_normal.append(nn.Parameter(1e-3 * torch.randn(i + 2)))\n self.alpha_in_reduce.append(nn.Parameter(1e-3 * torch.randn(i + 2)))\n\n elif use_pairwise_input_alphas:\n print(\"Using pairwise input alphas.\")\n\n self.alpha_pw_normal = nn.ParameterList()\n self.alpha_pw_reduce = nn.ParameterList()\n\n for i in range(n_nodes):\n num_comb = int(scipy.special.binom(i + 2, 2))\n self.alpha_pw_normal.append(nn.Parameter(1e-3 * torch.randn(num_comb)))\n self.alpha_pw_reduce.append(nn.Parameter(1e-3 * torch.randn(num_comb)))\n \n if use_pc_adaptation:\n # initialize pc_beta here\n # beta have to be [[2],[3],[4]]\n self.pc_alpha_normal = nn.ParameterList()\n self.pc_alpha_reduce = nn.ParameterList()\n for i in range(n_nodes):\n num_edges = i + 2\n self.pc_alpha_normal.append(nn.Parameter(1e-3 * torch.randn(num_edges)))\n self.pc_alpha_reduce.append(nn.Parameter(1e-3 * torch.randn(num_edges)))\n\n\n # setup alphas list\n self._alphas = []\n for n, p in self.named_parameters():\n if \"alpha\" in n:\n self._alphas.append((n, p))\n\n self.net = SearchCNNPC(\n C_in,\n C,\n n_classes,\n n_layers,\n n_nodes,\n reduction_layers,\n stem_multiplier,\n PRIMITIVES=self.primitives,\n feature_scale_rate=feature_scale_rate,\n )\n\n def apply_normalizer(self, alpha):\n return self.normalizer[\"func\"](alpha, self.normalizer[\"params\"])\n\n def _get_normalized_alphas(self):\n weights_normal = [self.apply_normalizer(alpha) for alpha in self.alpha_normal]\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n\n weights_pw_normal = None\n weights_pw_reduce = None\n weights_in_normal = None\n weights_in_reduce = None\n weights_pc_normal = None\n weights_pc_reduce = None\n\n if self.alpha_in_normal is not None:\n weights_in_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_in_normal\n ]\n weights_in_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_in_reduce\n ]\n elif self.alpha_pw_normal is not None:\n weights_pw_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_normal\n ]\n weights_pw_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_reduce\n ]\n if self.pc_alpha_normal is not None:\n weights_pc_normal = [\n self.apply_normalizer(alpha) for alpha in self.pc_alpha_normal\n ]\n weights_pc_reduce = [\n self.apply_normalizer(alpha) for alpha in self.pc_alpha_reduce\n ]\n return (\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n weights_pc_normal,\n weights_pc_reduce,\n )\n\n def prune_alphas(self, prune_threshold=0.0, val=-10e8):\n \"\"\"Set the alphas with probability below prune_threshold to a large negative value\n\n Note:\n The prune_threshold applies to the alpha probabilities (after the softmax is\n applied) while `val` corresponds to the logit values (thus a large negative value\n corresponds to a low probability).\n \"\"\"\n\n # reset temperature for prunning\n model_has_normalizer = hasattr(self, \"normalizer\")\n if model_has_normalizer:\n curr_step_backup = self.normalizer[\"params\"][\"curr_step\"]\n self.normalizer[\"params\"][\"curr_step\"] = (\n self.normalizer[\"params\"][\"max_steps\"] - 1\n )\n\n weights_normal = [self.apply_normalizer(alpha) for alpha in self.alpha_normal]\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n for idx in range(len(weights_normal)):\n # need to modify data because alphas are leaf variables\n self.alpha_normal[idx].data[weights_normal[idx] < prune_threshold] = val\n self.alpha_reduce[idx].data[weights_reduce[idx] < prune_threshold] = val\n\n # set curr_step back to original value\n self.normalizer[\"params\"][\"curr_step\"] = curr_step_backup\n\n def get_sparse_alphas_pw(self, alpha_prune_threshold=0.0):\n\n \"\"\"\n Convert alphas to zero-one-vectors under consideration of pairwise alphas\n\n\n :param alpha_prune_threshold: threshold for pruning\n\n :return: binary tensors with shape like alpha_normal and alpha_reduce, indicating whether an op is included in the\n sparsified one shot model\n \"\"\"\n\n assert (\n self.alpha_pw_normal is not None\n ), \"Error: function only availaible for pw models\"\n\n weights_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_normal\n ] # get normalized weights\n weights_reduce = [self.apply_normalizer(alpha) for alpha in self.alpha_reduce]\n\n weights_pw_normal = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_normal\n ]\n\n weights_pw_reduce = [\n self.apply_normalizer(alpha) for alpha in self.alpha_pw_reduce\n ]\n\n weights_normal_sparse = list()\n\n # get all the pairs of inputs\n for node_idx, node_weights in enumerate(weights_normal):\n input_pairs = list()\n\n # get pairs of inputs correspeonding to indices in alpha_pw\n for input_1 in range(len(node_weights)):\n for input_2 in range(input_1 + 1, len(node_weights)):\n input_pairs.append([input_1, input_2])\n\n assert len(input_pairs) == len(\n weights_pw_normal[node_idx]\n ), \"error: pairwise alpha length does not match pairwise terms length\"\n\n keep_inputs = list() # list of input nodes that are kept\n\n for input_pair_idx in range(len(input_pairs)):\n if (\n weights_pw_normal[node_idx][input_pair_idx] >= alpha_prune_threshold\n ): # if pw weight larger than threshold keep input\n keep_inputs.extend(input_pairs[input_pair_idx])\n\n weights_normal_sparse.append(\n torch.stack(\n [\n (weight >= alpha_prune_threshold).type(torch.float)\n if weight_idx in keep_inputs\n else torch.zeros_like(weight)\n for weight_idx, weight in enumerate(node_weights)\n ]\n )\n )\n\n ### same for reduction\n\n weights_reduce_sparse = list()\n\n for node_idx, node_weights in enumerate(weights_reduce):\n input_pairs = list()\n\n # get pairs of inputs correspeonding to indices in alpha_pw\n for input_1 in range(len(node_weights)):\n for input_2 in range(input_1 + 1, len(node_weights)):\n input_pairs.append([input_1, input_2])\n\n assert len(input_pairs) == len(\n weights_pw_reduce[node_idx]\n ), \"error: pairwise alpha length does not match pairwise terms length\"\n\n keep_inputs = list() # list of input nodes that are kept\n\n for input_pair_idx in range(len(input_pairs)):\n if (\n weights_pw_reduce[node_idx][input_pair_idx] >= alpha_prune_threshold\n ): # if pw weight larger than threshold keep input\n keep_inputs.extend(input_pairs[input_pair_idx])\n\n weights_reduce_sparse.append(\n torch.stack(\n [\n (weight >= alpha_prune_threshold).type(torch.float)\n if weight_idx in keep_inputs\n else torch.zeros_like(weight)\n for weight_idx, weight in enumerate(node_weights)\n ]\n )\n )\n\n return weights_normal_sparse, weights_reduce_sparse\n\n def get_sparse_num_params(self, alpha_prune_threshold=0.0):\n \"\"\"Get number of parameters for sparse one-shot-model\n\n Returns:\n A torch tensor\n \"\"\"\n\n weights_normal, weights_reduce = self.get_sparse_alphas_pw(\n alpha_prune_threshold\n )\n # this returns tensors with only 0's and 1's depending on whether an op is used in the sparsified model\n\n # get none active ops/layer names\n\n # for normal cell\n none_active_ops_normal = list()\n for node_idx, node in enumerate(weights_normal):\n for mixed_op_idx, mixed_op in enumerate(node):\n none_active_ops_idx = (mixed_op == 0.0).nonzero()\n for op in none_active_ops_idx:\n none_active_ops_normal.append(\n str(node_idx)\n + \".\"\n + str(mixed_op_idx)\n + \"._ops.\"\n + str(int(op))\n )\n\n # and for reduction cell\n none_active_ops_reduce = list()\n for node_idx, node in enumerate(weights_reduce):\n for mixed_op_idx, mixed_op in enumerate(node):\n none_active_ops_idx = (mixed_op == 0.0).nonzero()\n for op in none_active_ops_idx:\n none_active_ops_reduce.append(\n str(node_idx)\n + \".\"\n + str(mixed_op_idx)\n + \"._ops.\"\n + str(int(op))\n )\n\n all_params = sum(\n p.numel() for p in self.net.parameters()\n ) # params of one-shot model\n\n # get normal and reduction layers\n normal_cells = list()\n red_cells = list()\n for lyr, cell in enumerate(self.net.cells):\n if cell.reduction:\n red_cells.append(lyr)\n else:\n normal_cells.append(lyr)\n\n # count params of non-active ops\n\n none_active_params = 0\n for layer_name, layer_weights in self.named_parameters():\n # check if layer is part of normal or reduction cell\n if \"net.cells.\" in layer_name: # layer part of cells at all?\n for cell in normal_cells: # normal cell?\n if \"net.cells.\" + str(cell) in layer_name: # normal cell\n none_active_ops = none_active_ops_normal\n\n # else reduction cell\n for cell in red_cells:\n if \"net.cells.\" + str(cell) in layer_name: # normal cell\n none_active_ops = none_active_ops_reduce\n\n if any(\n [none_active_op in layer_name for none_active_op in none_active_ops]\n ): # check if layer is part of none-active ops\n none_active_params += layer_weights.numel()\n\n active_params = all_params - none_active_params\n\n return active_params\n\n def drop_path_prob(self, p):\n \"\"\" Set drop path probability \"\"\"\n for module in self.net.modules():\n if isinstance(module, ops_7c.DropPath_):\n module.p = p\n\n def forward(self, x, sparsify_input_alphas=None):\n \"\"\"Forward pass through the network\n\n Args:\n x: The input tensor\n sparsify_input_alphas: Whether to sparsify the alphas over the input nodes. Use `None`\n to not sparsify input alphas.\n For hierarchical alphas, `sparsify_input_alphas` should be a (float) threshold on\n the probability (i.e. between 0 and 1). Alphas above the threshold (and thus the\n corresponding input nodes) are kept.\n For pairwise alphas, if `sparsify_input_alphas` is larger than 0, then only the\n largest alpha is kept.\n Note that the sparsification is not be differentiable and thus cannot be used during\n training.\n\n Returns:\n The network output\n \"\"\"\n\n (\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n weights_pc_normal,\n weights_pc_reduce,\n ) = self._get_normalized_alphas()\n\n if len(self.device_ids) == 1:\n return self.net(\n x,\n weights_normal,\n weights_reduce,\n weights_in_normal,\n weights_in_reduce,\n weights_pw_normal,\n weights_pw_reduce,\n weights_pc_normal,\n weights_pc_reduce,\n sparsify_input_alphas=sparsify_input_alphas,\n alpha_prune_threshold=self.alpha_prune_threshold,\n )\n\n # scatter x\n xs = nn.parallel.scatter(x, self.device_ids)\n # broadcast weights\n wnormal_copies = broadcast_list(weights_normal, self.device_ids)\n wreduce_copies = broadcast_list(weights_reduce, self.device_ids)\n\n if weights_in_normal is not None:\n wnormal_in_copies = broadcast_list(weights_in_normal, self.device_ids)\n wreduce_in_copies = broadcast_list(weights_in_reduce, self.device_ids)\n else:\n wnormal_in_copies = None\n wreduce_in_copies = None\n\n if weights_pw_normal is not None:\n wnormal_pw_copies = broadcast_list(weights_pw_normal, self.device_ids)\n wreduce_pw_copies = broadcast_list(weights_pw_reduce, self.device_ids)\n else:\n wnormal_pw_copies = None\n wreduce_pw_copies = None\n\n # replicate modules\n replicas = nn.parallel.replicate(self.net, self.device_ids)\n outputs = nn.parallel.parallel_apply(\n replicas,\n list(\n zip(\n xs,\n wnormal_copies,\n wreduce_copies,\n wnormal_in_copies,\n wreduce_in_copies,\n wnormal_pw_copies,\n wreduce_pw_copies,\n )\n ),\n devices=self.device_ids,\n )\n return nn.parallel.gather(outputs, self.device_ids[0])\n\n def loss(self, X, y):\n logits = self.forward(X)\n return self.criterion(logits, y)\n\n def print_alphas(self, logger):\n # remove formats\n org_formatters = []\n for handler in logger.handlers:\n org_formatters.append(handler.formatter)\n handler.setFormatter(logging.Formatter(\"%(message)s\"))\n\n normalizer = self.get_normalizer(deterministic=True)\n logger.info(\"####### ALPHA #######\")\n logger.info(\"# Alpha - normal\")\n for alpha in self.alpha_normal:\n logger.info(normalizer(alpha))\n\n logger.info(\"\\n# Alpha - reduce\")\n for alpha in self.alpha_reduce:\n logger.info(normalizer(alpha))\n logger.info(\"#####################\")\n\n # restore formats\n for handler, formatter in zip(logger.handlers, org_formatters):\n handler.setFormatter(formatter)\n\n def genotype(self):\n if self.use_pairwise_input_alphas:\n\n weights_pw_normal = [\n F.softmax(alpha, dim=-1) for alpha in self.alpha_pw_normal\n ]\n weights_pw_reduce = [\n F.softmax(alpha, dim=-1) for alpha in self.alpha_pw_reduce\n ]\n\n gene_normal = gt.parse_pairwise(\n self.alpha_normal, weights_pw_normal, primitives=self.primitives\n )\n gene_reduce = gt.parse_pairwise(\n self.alpha_reduce, weights_pw_reduce, primitives=self.primitives\n )\n\n elif self.use_hierarchical_alphas:\n raise NotImplementedError\n else:\n\n gene_normal = gt.parse(self.alpha_normal, k=2, primitives=self.primitives)\n gene_reduce = gt.parse(self.alpha_reduce, k=2, primitives=self.primitives)\n\n concat = range(2, 2 + self.n_nodes) # concat all intermediate nodes\n\n return gt.Genotype(\n normal=gene_normal,\n normal_concat=concat,\n reduce=gene_reduce,\n reduce_concat=concat,\n )\n\n def weights(self):\n return self.net.parameters()\n\n def named_weights(self):\n return self.net.named_parameters()\n\n def named_weights_with_net(self):\n return self.named_parameters()\n\n def alphas(self):\n for n, p in self._alphas:\n yield p\n\n def named_alphas(self):\n for n, p in self._alphas:\n yield n, p" }, { "identifier": "Darts", "path": "task_optimizer/darts.py", "snippet": "class Darts:\n def __init__(self, model, config, do_schedule_lr=False):\n\n self.config = config\n self.config.logger = None\n self.model = model\n self.do_schedule_lr = do_schedule_lr\n self.task_train_steps = config.task_train_steps\n self.test_task_train_steps = config.test_task_train_steps\n self.warm_up_epochs = config.warm_up_epochs\n self.eval_switch = 0\n self.pprevious_grads = 0\n # weights optimizer\n\n self.w_optim = torch.optim.Adam(\n self.model.weights(),\n lr=self.config.w_lr,\n betas=(0.0, 0.999), # config.w_momentum,\n weight_decay=self.config.w_weight_decay,\n ) #\n\n # architecture optimizer\n self.a_optim = torch.optim.Adam(\n model.alphas(),\n self.config.alpha_lr,\n betas=(0.0, 0.999),\n weight_decay=self.config.alpha_weight_decay,\n )\n self.architect = Architect(\n self.model,\n self.config.w_momentum,\n self.config.w_weight_decay,\n self.config.use_first_order_darts,\n )\n def step(\n self,\n task,\n epoch,\n global_progress=\"\",\n test_phase=False,\n alpha_logger=None,\n sparsify_input_alphas=None,\n ):\n \n\n\n log_alphas = False\n\n if test_phase:\n top1_logger = self.config.top1_logger_test\n losses_logger = self.config.losses_logger_test\n train_steps = self.config.test_task_train_steps\n arch_adap_steps = int(train_steps * self.config.test_adapt_steps)\n \n if alpha_logger is not None:\n log_alphas = True\n\n else:\n top1_logger = self.config.top1_logger\n losses_logger = self.config.losses_logger\n train_steps = self.config.task_train_steps\n arch_adap_steps = train_steps\n \n\n \n\n lr = self.config.w_lr\n\n if self.config.w_task_anneal:\n for group in self.w_optim.param_groups:\n group[\"lr\"] = self.config.w_lr\n\n w_task_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n self.w_optim, train_steps, eta_min=0.0\n )\n else:\n w_task_lr_scheduler = None\n\n if self.config.a_task_anneal:\n for group in self.a_optim.param_groups:\n group[\"lr\"] = self.config.alpha_lr\n\n a_task_lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(\n self.a_optim, arch_adap_steps, eta_min=0.0\n )\n\n else:\n a_task_lr_scheduler = None\n\n model_has_normalizer = hasattr(self.model, \"normalizer\")\n if model_has_normalizer:\n self.model.normalizer[\"params\"][\"curr_step\"] = 0.0\n self.architect.v_net.normalizer[\"params\"][\"curr_step\"] = 0.0\n self.model.normalizer[\"params\"][\"max_steps\"] = float(arch_adap_steps)\n self.architect.v_net.normalizer[\"params\"][\"max_steps\"] = float(\n arch_adap_steps\n )\n from tqdm import tqdm\n if self.config.drop_path_prob > 0.0:\n if not test_phase or self.config.use_drop_path_in_meta_testing:\n self.model.drop_path_prob(self.config.drop_path_prob)\n\n p_bar = tqdm(range(train_steps))\n self.config.total_steps = train_steps * len(task.train_loader)\n \n\n\n for train_step in p_bar: # task train_steps = epochs per task\n warm_up = (\n epoch < self.warm_up_epochs\n ) # if epoch < warm_up_epochs, do warm up\n if (\n train_step >= arch_adap_steps\n ): # no architecture adap after arch_adap_steps steps\n warm_up = 1\n\n if w_task_lr_scheduler is not None:\n w_task_lr_scheduler.step()\n\n if a_task_lr_scheduler is not None:\n a_task_lr_scheduler.step()\n torch.cuda.reset_peak_memory_stats(device=0)\n \n task_specific_model = train( \n task,\n self.model,\n self.architect,\n self.w_optim,\n self.a_optim,\n lr,\n global_progress,\n self.config,\n warm_up,\n test_phase\n )\n mem = torch.cuda.memory_stats(0)['allocated_bytes.all.peak']/(1024**2)\n p_bar.set_postfix({\"Memory\" : f\"{mem : .2f}\",\"Task average\":f\"{self.config.top1_logger_test.avg:.1%}\"})\n if train_step == 9:\n self.config.memory_snap = mem\n if (\n model_has_normalizer\n and train_step < (arch_adap_steps - 1)\n and not warm_up\n ): \n self.model.normalizer[\"params\"][\"curr_step\"] += 1\n self.architect.v_net.normalizer[\"params\"][\"curr_step\"] += 1\n\n w_task = OrderedDict(\n {\n layer_name: copy.deepcopy(layer_weight)\n for layer_name, layer_weight in self.model.named_weights()\n # if layer_weight.grad is not None\n }\n )\n a_task = OrderedDict(\n {\n layer_name: copy.deepcopy(layer_alpha)\n for layer_name, layer_alpha in self.model.named_alphas()\n # if layer_alpha.grad is not None\n }\n )\n\n \n w_task_bot = OrderedDict(\n {\n layer_name: copy.deepcopy(layer_weight)\n for layer_name, layer_weight in task_specific_model.named_weights()\n \n }\n )\n a_task_bot = OrderedDict(\n {\n layer_name: copy.deepcopy(layer_alpha)\n for layer_name, layer_alpha in task_specific_model.named_alphas()\n \n }\n )\n # Log genotype\n genotype = self.model.genotype()\n\n if log_alphas:\n alpha_logger[\"normal_relaxed\"].append(\n copy.deepcopy(self.model.alpha_normal)\n )\n alpha_logger[\"reduced_relaxed\"].append(\n copy.deepcopy(self.model.alpha_reduce)\n )\n alpha_logger[\"all_alphas\"].append(a_task)\n alpha_logger[\"normal_hierarchical\"].append(\n copy.deepcopy(self.model.alpha_in_normal)\n )\n alpha_logger[\"reduced_hierarchical\"].append(\n copy.deepcopy(self.model.alpha_in_reduce)\n )\n alpha_logger[\"normal_pairwise\"].append(\n copy.deepcopy(self.model.alpha_pw_normal)\n )\n alpha_logger[\"reduced_pairwise\"].append(\n copy.deepcopy(self.model.alpha_pw_reduce)\n )\n\n # for test data evaluation, turn off drop path\n if self.config.drop_path_prob > 0.0:\n self.model.drop_path_prob(0.0)\n little_switch = 0\n\n if self.config.naivenaive:\n little_switch = 1\n with torch.no_grad():\n self.config.naivenaive = 1\n self.config.eval_switch = 1\n self.config.cell_phase = 3\n\n for batch_idx, batch in enumerate(task.test_loader):\n \n x_test, y_test = batch\n x_test = x_test.to(self.config.device, non_blocking=True)\n y_test = y_test.to(self.config.device, non_blocking=True)\n if isinstance(self.model, SearchCNNController):\n logits = self.model(\n x_test, sparsify_input_alphas=sparsify_input_alphas\n )\n else:\n logits = self.model(x_test)\n loss = self.model.criterion(logits, y_test)\n\n y_test_pred = logits.softmax(dim=1)\n now = time.strftime('%c', time.localtime(time.time()))\n prec1, prec5 = utils.accuracy(logits, y_test, self.config, topk=(1, 5))\n losses_logger.update(loss.item(), 1)\n top1_logger.update(prec1.item(), 1)\n \n self.config.naivenaive = 0 \n self.config.eval_switch = 0\n self.config.cell_phase = 3 \n\n if little_switch == 1:\n self.config.naivenaive = 1\n \n task_info = namedtuple(\n \"task_info\",\n [\n \"genotype\",\n \"top1\",\n \"w_task\",\n \"a_task\",\n \"loss\",\n \"y_test_pred\",\n \"sparse_num_params\",\n \"w_task_bot\",\n \"a_task_bot\"\n ],\n )\n task_info.w_task = w_task\n task_info.a_task = a_task\n task_info.loss = loss\n y_test_pred = y_test_pred\n task_info.y_test_pred = y_test_pred\n task_info.genotype = genotype\n # task_info.top1 = top1\n\n # task_info.sparse_num_params = self.model.get_sparse_num_params(\n # self.model.alpha_prune_threshold\n # )\n task_info.w_task_bot = w_task_bot\n task_info.a_task_bot = a_task_bot\n\n return task_info" }, { "identifier": "Architect", "path": "task_optimizer/darts.py", "snippet": "class Architect:\n \"\"\" Compute gradients of alphas \"\"\"\n\n def __init__(self, net, w_momentum, w_weight_decay, use_first_order_darts):\n \"\"\"\n Args:\n net\n w_momentum: weights momentum\n \"\"\"\n self.net = net\n self.v_net = copy.deepcopy(net)\n self.w_momentum = w_momentum\n self.w_weight_decay = w_weight_decay\n self.use_first_order_darts = use_first_order_darts\n self.pprevious_grads = list()\n \n\n def virtual_step(self, train_X, train_y, xi, w_optim):\n \"\"\"\n Compute unrolled weight w' (virtual step)\n\n Step process:\n 1) forward\n 2) calc loss\n 3) compute gradient (by backprop)\n 4) update gradient\n\n Args:\n xi: learning rate for virtual gradient step (same as weights lr)\n w_optim: weights optimizer\n \"\"\"\n # forward & calc loss\n loss = self.net.loss(train_X, train_y) # L_train(w)\n\n # compute gradient\n gradients = torch.autograd.grad(loss, self.net.weights())\n\n \n \n\n\n\n \n # do virtual step (update gradient)\n # below operations do not need gradient tracking\n with torch.no_grad():\n # dict key is not the value, but the pointer. So original network weight have to\n # be iterated also.\n for w, vw, g in zip(self.net.weights(), self.v_net.weights(), gradients):\n m = w_optim.state[w].get(\"momentum_buffer\", 0.0) * self.w_momentum\n vw.copy_(w - xi * (m + g + self.w_weight_decay * w))\n\n # synchronize alphas\n for a, va in zip(self.net.alphas(), self.v_net.alphas()):\n va.copy_(a)\n\n def backward(self, train_X, train_y, val_X, val_y, xi, w_optim):\n \"\"\"Compute loss and backward its gradients\n Args:\n xi: learning rate for virtual gradient step (same as net lr)\n w_optim: weights optimizer - for virtual step\n \"\"\"\n # calc unrolled loss\n loss = self.v_net.loss(val_X, val_y) # L_val(w`)\n # compute gradient\n v_alphas = tuple(self.v_net.alphas())\n v_weights = tuple(self.v_net.weights())\n v_grads = torch.autograd.grad(loss, v_alphas + v_weights, allow_unused=True)\n dalpha = v_grads[: len(v_alphas)]\n dw = v_grads[len(v_alphas) :]\n\n \n\n if self.use_first_order_darts: # use first oder approximation for darts\n \n with torch.no_grad():\n for alpha, da in zip(self.net.alphas(), dalpha):\n alpha.grad = da\n \n\n else: # 2nd order DARTS\n\n hessian = self.compute_hessian(dw, train_X, train_y)\n\n # update final gradient = dalpha - xi*hessian\n with torch.no_grad():\n for alpha, da, h in zip(self.net.alphas(), dalpha, hessian):\n alpha.grad = da - xi * h\n\n\n\n\n def partial_alpha_backward(self,config, train_X, train_y, val_X, val_y, xi, w_optim):\n \"\"\"Compute loss and backward its gradients\n Args:\n \n xi: learning rate for virtual gradient step (same as net lr)\n w_optim: weights optimizer - for virtual step\n \"\"\"\n # compute gradient\n grad_output_sum = copy.deepcopy(self.v_net.net.config.alpha_previous_grad)\n \n if config.residual_flag == 1:\n pprevious_grad = copy.deepcopy(self.v_net.net.config.alpha_pprevious_grad)\n self.pprevious_grads.append(pprevious_grad) \n \n latent = self.v_net(val_X)\n\n\n v_alphas = tuple(self.v_net.alphas())\n v_weights = tuple(self.v_net.weights())\n\n if config.residual_flag == 1:\n try:\n if self.v_net.net.config.cell_phase == 1:\n grad_output_sum = torch.add(self.pprevious_grads[0],grad_output_sum)\n\n elif self.v_net.net.config.cell_phase == 0:\n grad_output_sum = torch.add(self.pprevious_grads[1],grad_output_sum)\n except:\n print(f\"Shape error,{grad_output_sum.shape} was the desired shape but you got {self.pprevious_grads[0].shape} or {self.pprevious_grads[1].shape}.\")\n print(\"Bypassing residual flag.\")\n\n v_grads = torch.autograd.grad(latent, v_alphas + v_weights, grad_outputs=grad_output_sum, allow_unused=True) \n dalpha = v_grads[: len(v_alphas)]\n dw = v_grads[len(v_alphas) :]\n \n \n\n if self.use_first_order_darts: # use first oder approximation for darts\n \n with torch.no_grad():\n for alpha, da in zip(self.net.alphas(), dalpha):\n if alpha.grad is not None and da is not None:\n alpha.grad.data.add_(da)\n else:\n alpha.grad= da\n\n else: # 2nd order DARTS\n\n hessian = self.compute_hessian(dw, train_X, train_y)\n\n # update final gradient = dalpha - xi*hessian\n with torch.no_grad():\n for alpha, da, h in zip(self.net.alphas(), dalpha, hessian):\n alpha.grad = da - xi * h\n\n def compute_hessian(self, dw, train_X, train_y):\n \"\"\"\n dw = dw` { L_val(w`, alpha) }\n w+ = w + eps * dw\n w- = w - eps * dw\n hessian = (dalpha { L_train(w+, alpha) } - dalpha { L_train(w-, alpha) }) / (2*eps)\n eps = 0.01 / ||dw||\n \"\"\"\n norm = torch.cat([w.view(-1) for w in dw]).norm()\n eps = 0.01 / norm\n \n # w+ = w + eps*dw`\n with torch.no_grad():\n for p, d in zip(self.net.weights(), dw):\n p += eps * d\n\n # dalpha { L_train(w+) }\n loss = self.net.loss(train_X, train_y)\n dalpha_pos = torch.autograd.grad(loss, self.net.alphas())\n\n # w- = w - eps*dw`\n with torch.no_grad():\n for p, d in zip(self.net.weights(), dw):\n p -= 2.0 * eps * d\n\n # dalpha { L_train(w-) }\n loss = self.net.loss(train_X, train_y)\n dalpha_neg = torch.autograd.grad(loss, self.net.alphas())\n\n # recover w\n with torch.no_grad():\n for p, d in zip(self.net.weights(), dw):\n p += eps * d\n\n hessian = [(p - n) / 2.0 * eps for p, n in zip(dalpha_pos, dalpha_neg)]\n return hessian" }, { "identifier": "train", "path": "task_optimizer/darts.py", "snippet": "def train(\n task,\n model,\n architect,\n w_optim,\n alpha_optim,\n lr,\n global_progress,\n config,\n warm_up=False,\n test_phase = False\n):\n model.train()\n pprevious_grads = list()\n initial_model = copy.deepcopy(model)\n \n p_bar_monitor = (enumerate(zip(task.train_loader, task.valid_loader)))#\n for step, ((train_X, train_y), (val_X, val_y)) in p_bar_monitor:\n\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n start.record()\n \n train_X, train_y = train_X.to(config.device), train_y.to(config.device)\n val_X, val_y = val_X.to(config.device), val_y.to(config.device)\n N = train_X.size(0)\n initial_alpha = [copy.deepcopy(x).detach().cpu() for x in model.alphas()]\n \n if config.light_exp == 1:\n\n if config.meta_model != \"pc_adaptation\" and config.meta_model != \"pure_darts\" and config.dataset != \"cifar10\" and config.dataset != \"cifar100\":\n config.cell_phase = config.layers -1\n architect.v_net.net.config.cell_phase = config.layers -1\n # phase 2. architect step (alpha)\n prohibited_list = config.prohibited_list\n if config.naivenaive != 1 and config.eval_switch != 1 and config.meta_model != \"pc_adaptation\" and config.meta_model != \"pure_darts\" and config.dataset not in prohibited_list:\n\n w_optim.zero_grad()\n alpha_optim.zero_grad()\n train_X, train_y = train_X.chunk(config.split_num), train_y.chunk(config.split_num)\n val_X,val_y = val_X.chunk(config.split_num), val_y.chunk(config.split_num)\n \n for (train_X_chunk, train_y_chunk) ,(val_X_chunk,val_y_chunk) in zip(zip(train_X,train_y),zip(val_X,val_y)):\n config.cell_phase = config.layers -1\n architect.v_net.net.config.cell_phase = config.layers -1\n for phase in range(config.layers):\n \n if not warm_up: # only update alphas outside warm up phase\n if config.do_unrolled_architecture_steps:\n architect.virtual_step(train_X_chunk, train_y_chunk, lr, w_optim) # (calc w`)\n \n if config.cell_phase == config.layers -1:\n architect.v_net.net.cells[config.cell_phase].alpha_switch = 1 \n architect.backward(train_X_chunk, train_y_chunk, val_X_chunk, val_y_chunk, lr, w_optim)\n \n \n else:\n architect.v_net.net.cells[config.cell_phase].alpha_switch = 1\n architect.partial_alpha_backward(config, train_X_chunk, train_y_chunk, val_X_chunk, val_y_chunk, lr, w_optim) \n \n \n model.net.alpha_switch = 0\n architect.v_net.net.alpha_switch = 0\n\n # phase 1. child network step (w)\n if config.cell_phase == config.layers -1:\n w_optim.zero_grad()\n logits = model(train_X_chunk)\n loss = model.criterion(logits, train_y_chunk)\n loss_monitor = loss.item()\n loss.backward()\n nn.utils.clip_grad_norm_(model.weights(), config.w_grad_clip) \n w_optim.step()\n\n\n else:\n w_optim.zero_grad()\n output_grad_sum = copy.deepcopy(config.previous_grad)\n pprevious_grad = copy.deepcopy(config.pprevious_grad)\n pprevious_grads.append(pprevious_grad)\n\n if config.residual_flag == 1:\n if config.cell_phase == 1:\n if pprevious_grads[0].shape != output_grad_sum.shape:\n output_grad_sum = output_grad_sum\n else:\n output_grad_sum = torch.add(pprevious_grads[0],output_grad_sum)\n elif config.cell_phase == 0:\n if pprevious_grads[1].shape != output_grad_sum.shape:\n output_grad_sum = output_grad_sum\n else:\n output_grad_sum = torch.add(pprevious_grads[1],output_grad_sum)\n latent = model(train_X_chunk)\n\n\n \n try:\n latent.backward(output_grad_sum)\n \n except:\n if output_grad_sum is not None:\n print(\"batch passed,\",output_grad_sum.shape, \" was the shape of grad saved\")\n print(\"what we had to save was this shape, \", latent.shape )\n print(f\"And this was the phase.{config.cell_phase} what can be the problem here ? \")\n else:\n print(\"output was none. Why?\")\n pass\n nn.utils.clip_grad_norm_(model.weights(), config.w_grad_clip)\n \n\n \n config.cell_phase -= 1\n architect.v_net.net.config.cell_phase -= 1\n alpha_optim.step() \n w_optim.step()\n \n\n \n \n \n\n else:\n if not warm_up: # only update alphas outside warm up phase\n alpha_optim.zero_grad()\n \n if config.do_unrolled_architecture_steps:\n architect.virtual_step(train_X, train_y, lr, w_optim) # (calc w`)\n \n architect.backward(train_X, train_y, val_X, val_y, lr, w_optim)\n alpha_optim.step()\n \n\n \n w_optim.zero_grad()\n \n logits = model(train_X)\n \n loss = model.criterion(logits, train_y)\n loss.backward()\n nn.utils.clip_grad_norm_(model.weights(), config.w_grad_clip)\n w_optim.step()\n\n \n \n\n\n end.record()\n torch.cuda.synchronize()\n config.computing_time += start.elapsed_time(end)\n \n config.total_steps -= 1\n pprevious_grads = list()\n architect.pprevious_grads = list()\n \n if config.alpha_expect and config.meta_model != 'pc_adaptation':\n if len(config.alpha_grad_footprints) <= 5:\n\n learnt_alpha = [copy.deepcopy(x).detach().cpu() for x in model.alphas()]\n alpha_grad = _alpha_subtract(initial_alpha,learnt_alpha)\n config.alpha_grad_footprints.append(alpha_grad) \n\n\n else:\n \n learnt_alpha = [copy.deepcopy(x).detach().cpu() for x in model.alphas()]\n alpha_grad = _alpha_subtract(initial_alpha,learnt_alpha)\n \n config.alpha_grad_footprints.pop(0) \n config.alpha_grad_footprints.append(alpha_grad) \n\n config.alpha_sample_metrics = _exp_alpha_metric(initial_alpha,config)\n architect.v_net.net.config.alpha_sample_metrics = config.alpha_sample_metrics\n\n ###################################################################################\n\n\n task_specific_model = copy.deepcopy(model)\n task_specific_model = get_diff_for_const_bottom(initial_model,task_specific_model)\n \n return task_specific_model" } ]
import os import torch import torch.nn as nn import numpy as np import utils.utils as utils import random import time import pandas as pd import copy import argparse from utils import genotypes as gt from models.search_cnn import SearchCNNController from models.search_cnn_PC import SearchCNNControllerPC from task_optimizer.darts import Darts,Architect from task_optimizer.darts import train as d_train from tqdm import tqdm from tqdm import tqdm
16,121
def _init_alpha_normalizer(name, task_train_steps, t_max, t_min, temp_anneal_mode): normalizer = dict() normalizer["name"] = name normalizer["params"] = dict() normalizer["params"]["curr_step"] = 0.0 # current step for scheduling normalizer normalizer["params"]["max_steps"] = float( task_train_steps ) # for scheduling normalizer normalizer["params"]["t_max"] = t_max normalizer["params"]["t_min"] = t_min normalizer["params"]["temp_anneal_mode"] = temp_anneal_mode # temperature annealing return normalizer def main(config): # set default gpu device id torch.cuda.set_device(config.gpus[0]) # set seed np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) random.seed(config.seed) torch.backends.cudnn.benchmark = True # get data with meta info input_size, input_channels, n_classes, train_data = utils.get_data( config.dataset, config.data_path, cutout_length=0, validation=False) _,_,_,_,test_data = utils.get_data(config.dataset, config.data_path, cutout_length=0, validation=True) # input my model architecture here normalizer = _init_alpha_normalizer( config.normalizer, config.task_train_steps, config.normalizer_t_max, config.normalizer_t_min, config.normalizer_temp_anneal_mode, ) net_crit = nn.CrossEntropyLoss().to(device) model = SearchCNNController( 3, config.init_channels, config.k, config.layers, config, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, alpha_prune_threshold=config.alpha_prune_threshold, ) if config.meta_model == 'pc_adaptation': print("model created as PC adaptation") model = SearchCNNControllerPC( 3, config.init_channels, config.k, config.layers, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, use_pc_adaptation=True, alpha_prune_threshold=config.alpha_prune_threshold ) ############################################################ model = model.to(device) # weights optimizer w_optim = torch.optim.Adam(model.weights(), config.w_lr, betas=(0.0, 0.999), weight_decay=config.w_weight_decay) # alphas optimizer alpha_optim = torch.optim.Adam(model.alphas(), config.alpha_lr, betas=(0.0, 0.999), weight_decay=config.alpha_weight_decay) # split data to train/validation n_train = len(train_data) split = n_train // 2 # changed here indices = list(range(n_train)) train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:]) #and order of these valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split]) train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=train_sampler, num_workers=config.workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=valid_sampler, num_workers=config.workers, pin_memory=True) test_loader = torch.utils.data.DataLoader(test_data,batch_size=config.batch_size, shuffle=True, num_workers=config.workers, pin_memory=True) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optim, config.epochs, eta_min=0.0)
""" Search cell """ ''' Based on https://github.com/boschresearch/metanas which is licensed under GNU Affero General Public License, ''' device = torch.device("cuda") # tensorboard def _init_alpha_normalizer(name, task_train_steps, t_max, t_min, temp_anneal_mode): normalizer = dict() normalizer["name"] = name normalizer["params"] = dict() normalizer["params"]["curr_step"] = 0.0 # current step for scheduling normalizer normalizer["params"]["max_steps"] = float( task_train_steps ) # for scheduling normalizer normalizer["params"]["t_max"] = t_max normalizer["params"]["t_min"] = t_min normalizer["params"]["temp_anneal_mode"] = temp_anneal_mode # temperature annealing return normalizer def main(config): # set default gpu device id torch.cuda.set_device(config.gpus[0]) # set seed np.random.seed(config.seed) torch.manual_seed(config.seed) torch.cuda.manual_seed_all(config.seed) random.seed(config.seed) torch.backends.cudnn.benchmark = True # get data with meta info input_size, input_channels, n_classes, train_data = utils.get_data( config.dataset, config.data_path, cutout_length=0, validation=False) _,_,_,_,test_data = utils.get_data(config.dataset, config.data_path, cutout_length=0, validation=True) # input my model architecture here normalizer = _init_alpha_normalizer( config.normalizer, config.task_train_steps, config.normalizer_t_max, config.normalizer_t_min, config.normalizer_temp_anneal_mode, ) net_crit = nn.CrossEntropyLoss().to(device) model = SearchCNNController( 3, config.init_channels, config.k, config.layers, config, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, alpha_prune_threshold=config.alpha_prune_threshold, ) if config.meta_model == 'pc_adaptation': print("model created as PC adaptation") model = SearchCNNControllerPC( 3, config.init_channels, config.k, config.layers, n_nodes=config.nodes, reduction_layers=config.reduction_layers, device_ids=config.gpus, normalizer=normalizer, PRIMITIVES=gt.PRIMITIVES, feature_scale_rate=1, use_hierarchical_alphas=config.use_hierarchical_alphas, use_pairwise_input_alphas=config.use_pairwise_input_alphas, use_pc_adaptation=True, alpha_prune_threshold=config.alpha_prune_threshold ) ############################################################ model = model.to(device) # weights optimizer w_optim = torch.optim.Adam(model.weights(), config.w_lr, betas=(0.0, 0.999), weight_decay=config.w_weight_decay) # alphas optimizer alpha_optim = torch.optim.Adam(model.alphas(), config.alpha_lr, betas=(0.0, 0.999), weight_decay=config.alpha_weight_decay) # split data to train/validation n_train = len(train_data) split = n_train // 2 # changed here indices = list(range(n_train)) train_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[split:]) #and order of these valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(indices[:split]) train_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=train_sampler, num_workers=config.workers, pin_memory=True) valid_loader = torch.utils.data.DataLoader(train_data, batch_size=config.batch_size, sampler=valid_sampler, num_workers=config.workers, pin_memory=True) test_loader = torch.utils.data.DataLoader(test_data,batch_size=config.batch_size, shuffle=True, num_workers=config.workers, pin_memory=True) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(w_optim, config.epochs, eta_min=0.0)
architect = Architect(model, config.w_momentum, config.w_weight_decay, use_first_order_darts=True)
4
2023-10-08 02:42:27+00:00
24k
LukeForeverYoung/UReader
serve/model_worker.py
[ { "identifier": "IO", "path": "serve/io_utils.py", "snippet": "class IO:\n @staticmethod\n def register(options):\n pass\n\n def open(self, path: str, mode: str):\n raise NotImplementedError\n\n def exists(self, path: str) -> bool:\n raise NotImplementedError\n\n def move(self, src: str, dst: str):\n raise NotImplementedError\n\n def copy(self, src: str, dst: str):\n raise NotImplementedError\n\n def makedirs(self, path: str, exist_ok=True):\n raise NotImplementedError\n\n def remove(self, path: str):\n raise NotImplementedError\n\n def listdir(self, path: str, recursive=False, full_path=False, contains=None):\n raise NotImplementedError\n\n def isdir(self, path: str) -> bool:\n raise NotImplementedError\n\n def isfile(self, path: str) -> bool:\n raise NotImplementedError\n\n def abspath(self, path: str) -> str:\n raise NotImplementedError\n\n def last_modified(self, path: str) -> datetime:\n raise NotImplementedError\n\n def md5(self, path: str) -> str:\n hash_md5 = hashlib.md5()\n with self.open(path, 'rb') as f:\n for chunk in iter(lambda: f.read(4096), b''):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\n re_remote = re.compile(r'(oss|https?)://')\n\n def islocal(self, path: str) -> bool:\n return not self.re_remote.match(path.lstrip())" }, { "identifier": "DefaultIO", "path": "serve/io_utils.py", "snippet": "class DefaultIO(IO):\n __name__ = 'DefaultIO'\n\n def _check_path(self, path):\n if not self.islocal(path):\n raise RuntimeError(\n 'Credentials must be provided to use oss path. '\n 'Make sure you have created \"user/modules/oss_credentials.py\" according to ReadMe.')\n\n def open(self, path, mode='r'):\n self._check_path(path)\n path = self.abspath(path)\n return open(path, mode=mode)\n\n def exists(self, path):\n self._check_path(path)\n path = self.abspath(path)\n return os.path.exists(path)\n\n def move(self, src, dst):\n self._check_path(src)\n self._check_path(dst)\n src = self.abspath(src)\n dst = self.abspath(dst)\n shutil.move(src, dst)\n\n def copy(self, src, dst):\n self._check_path(src)\n self._check_path(dst)\n src = self.abspath(src)\n dst = self.abspath(dst)\n try:\n shutil.copyfile(src, dst)\n except shutil.SameFileError:\n pass\n\n def makedirs(self, path, exist_ok=True):\n self._check_path(path)\n path = self.abspath(path)\n os.makedirs(path, exist_ok=exist_ok)\n\n def remove(self, path):\n self._check_path(path)\n path = self.abspath(path)\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n\n def listdir(self, path, recursive=False, full_path=False, contains=None):\n self._check_path(path)\n path = self.abspath(path)\n contains = contains or ''\n if recursive:\n files = (os.path.join(dp, f) if full_path else f for dp, dn, fn in os.walk(path) for f in fn)\n files = [file for file in files if contains in file]\n else:\n files = os.listdir(path)\n if full_path:\n files = [os.path.join(path, file) for file in files if contains in file]\n return files\n\n def isdir(self, path):\n return os.path.isdir(path)\n\n def isfile(self, path):\n return os.path.isfile(path)\n\n def abspath(self, path):\n return os.path.abspath(path)\n\n def last_modified(self, path):\n return datetime.fromtimestamp(os.path.getmtime(path))" }, { "identifier": "OSS", "path": "serve/io_utils.py", "snippet": "class OSS(DefaultIO):\n \"Mixed IO module to support both system-level and OSS IO methods\"\n __name__ = 'OSS'\n\n def __init__(self, access_key_id: str, access_key_secret: str, region_bucket: List[List[str]]):\n \"\"\"\n the value of \"region_bucket\" should be something like [[\"cn-hangzhou\", \"<yourBucketName>\"], [\"cn-zhangjiakou\", \"<yourBucketName>\"]],\n specifying your buckets and corresponding regions\n \"\"\"\n from oss2 import Auth, Bucket, ObjectIterator\n super().__init__()\n self.ObjectIterator = ObjectIterator\n self.auth = Auth(access_key_id, access_key_secret)\n self.buckets = {\n bucket_name: Bucket(self.auth, f'http://oss-{region}.aliyuncs.com', bucket_name)\n for region, bucket_name in region_bucket\n }\n self.oss_pattern = re.compile(r'oss://([^/]+)/(.+)')\n\n def _split_name(self, path):\n m = self.oss_pattern.match(path)\n if not m:\n raise IOError(f'invalid oss path: \"{path}\", should be \"oss://<bucket_name>/path\"')\n bucket_name, path = m.groups()\n return bucket_name, path\n\n def _split(self, path):\n bucket_name, path = self._split_name(path)\n try:\n bucket = self.buckets[bucket_name]\n except KeyError:\n raise IOError(f'Bucket {bucket_name} not registered in oss_credentials.py')\n return bucket, path\n\n def open(self, full_path, mode='r'):\n if not full_path.startswith('oss://'):\n return super().open(full_path, mode)\n\n bucket, path = self._split(full_path)\n with mute_stderr():\n path_exists = bucket.object_exists(path)\n if 'w' in mode:\n if path_exists:\n bucket.delete_object(path)\n if 'b' in mode:\n return BinaryOSSFile(bucket, path)\n return OSSFile(bucket, path)\n elif mode == 'a':\n position = bucket.head_object(path).content_length if path_exists else 0\n return OSSFile(bucket, path, position=position)\n else:\n if not path_exists:\n raise FileNotFoundError(full_path)\n obj = bucket.get_object(path)\n # auto cache large files to avoid memory issues\n # if obj.content_length > 30 * 1024 ** 2: # 30M\n # from da.utils import cache_file\n # path = cache_file(full_path)\n # return super().open(path, mode)\n if mode == 'rb':\n # TODO for a large file, this will load the whole file into memory\n return NullContextWrapper(BytesIO(obj.read()))\n else:\n assert mode == 'r'\n return NullContextWrapper(StringIO(obj.read().decode()))\n\n def exists(self, path):\n if not path.startswith('oss://'):\n return super().exists(path)\n\n bucket, _path = self._split(path)\n # if file exists\n exists = self._file_exists(bucket, _path)\n # if directory exists\n if not exists:\n try:\n self.listdir(path)\n exists = True\n except FileNotFoundError:\n pass\n return exists\n\n def _file_exists(self, bucket, path):\n with mute_stderr():\n return bucket.object_exists(path)\n\n def move(self, src, dst):\n if not src.startswith('oss://') and not dst.startswith('oss://'):\n return super().move(src, dst)\n self.copy(src, dst)\n self.remove(src)\n\n def copy(self, src, dst):\n cloud_src = src.startswith('oss://')\n cloud_dst = dst.startswith('oss://')\n if not cloud_src and not cloud_dst:\n return super().copy(src, dst)\n\n # download\n if cloud_src and not cloud_dst:\n bucket, src = self._split(src)\n obj = bucket.get_object(src)\n if obj.content_length > 100 * 1024 ** 2: # 100M\n from tqdm import tqdm\n progress = None\n\n def callback(i, n):\n nonlocal progress\n if progress is None:\n progress = tqdm(total=n, unit='B', unit_scale=True, unit_divisor=1024, leave=False,\n desc=f'downloading')\n progress.update(i - progress.n)\n\n bucket.get_object_to_file(src, dst, progress_callback=callback)\n if progress is not None:\n progress.close()\n else:\n bucket.get_object_to_file(src, dst)\n return\n bucket, dst = self._split(dst)\n # upload\n if cloud_dst and not cloud_src:\n bucket.put_object_from_file(dst, src)\n return\n # copy between oss paths\n if src != dst:\n src_bucket_name, src = self._split_name(src)\n bucket.copy_object(src_bucket_name, src, dst)\n # TODO: support large file copy\n # https://help.aliyun.com/document_detail/88465.html?spm=a2c4g.11174283.6.882.4d157da2mgp3xc\n\n def listdir(self, path, recursive=False, full_path=False, contains=None):\n if not path.startswith('oss://'):\n return super().listdir(path, recursive, full_path, contains)\n\n bucket, path = self._split(path)\n path = path.rstrip('/') + '/'\n files = [obj.key for obj in self.ObjectIterator(bucket, prefix=path, delimiter='' if recursive else '/')]\n try:\n files.remove(path)\n except ValueError:\n pass\n if full_path:\n files = [f'oss://{bucket.bucket_name}/{file}' for file in files]\n else:\n files = [file[len(path):] for file in files]\n if not files:\n raise FileNotFoundError(f'No such directory: oss://{bucket.bucket_name}/{path}')\n files = [file for file in files if (contains or '') in file]\n return files\n\n def remove(self, path):\n if not path.startswith('oss://'):\n return super().remove(path)\n\n if self.isfile(path):\n paths = [path]\n else:\n paths = self.listdir(path, recursive=True, full_path=True)\n for path in paths:\n bucket, path = self._split(path)\n bucket.delete_object(path)\n\n def makedirs(self, path, exist_ok=True):\n # there is no need to create directory in oss\n if not path.startswith('oss://'):\n return super().makedirs(path)\n\n def isdir(self, path):\n if not path.startswith('oss://'):\n return super().isdir(path)\n return self.exists(path.rstrip('/') + '/')\n\n def isfile(self, path):\n if not path.startswith('oss://'):\n return super().isdir(path)\n return self.exists(path) and not self.isdir(path)\n\n def abspath(self, path):\n if not path.startswith('oss://'):\n return super().abspath(path)\n return path\n\n def authorize(self, path):\n if not path.startswith('oss://'):\n raise ValueError('Only oss path can use \"authorize\"')\n import oss2\n bucket, path = self._split(path)\n bucket.put_object_acl(path, oss2.OBJECT_ACL_PUBLIC_READ)\n\n def last_modified(self, path):\n if not path.startswith('oss://'):\n return super().last_modified(path)\n bucket, path = self._split(path)\n return datetime.strptime(\n bucket.get_object_meta(path).headers['Last-Modified'],\n r'%a, %d %b %Y %H:%M:%S %Z'\n ) + timedelta(hours=8)" }, { "identifier": "MplugOwlProcessor", "path": "mplug_owl/processing_mplug_owl.py", "snippet": "class MplugOwlProcessor(ProcessorMixin):\n attributes = []\n tokenizer_class = (\"MplugOwlTokenizer\")\n\n def __init__(self, image_processor=None, tokenizer=None, **kwargs):\n super().__init__(**kwargs)\n self.tokens_to_generate = 0\n self.image_processor = image_processor\n self.tokenizer = tokenizer\n self.add_BOS = True\n\n def __call__(self, text=None, images=None, return_tensors=None, **kwargs):\n args = get_args()\n if text is None and images is None:\n raise ValueError(\"You have to specify either text or images. Both cannot be none.\")\n\n if images is not None:\n if not isinstance(images, list):\n images = [images]\n # image_features, = self.image_processor(images, return_tensors=return_tensors, **kwargs)\n process_results = [self.image_processor(image=image, text=None) for image in images]\n if len(process_results)>0 and len(process_results[0][0].shape) == 4:\n # 图片被切分成了多块 默认是doc场景\n text_list = text.split('<image>')\n images = []\n patch_positions = []\n text = text_list[0]\n for ri, (image_input, text_input, patch_position) in enumerate(process_results):\n images.append(image_input)\n patch_positions.append(patch_position)\n if args.patch_pos_embed_type == 'pre':\n # 对于pre处理 v2t最终输出的是一张图的token\n text += '<image>'\n else:\n # 对于post处理 v2t最终输出的是多图\n text += '<image>'*image_input.shape[0]\n text += text_list[ri+1]\n images = torch.cat(images, dim=0)\n patch_positions = torch.cat(patch_positions, dim=0)\n else:\n # 如果没有切片 则正常stack 并创建patch position = num_image (0,0)的patch id以保持一致\n images = [_[0] for _ in process_results]\n images = torch.stack(images, dim=0)\n patch_positions = torch.zeros(images.shape[0],2).long()\n text = text\n if text is not None:\n encoding = tokenize_prompts(\n prompts=[text],\n tokens_to_generate=self.tokens_to_generate,\n add_BOS=self.add_BOS,\n tokenizer=self.tokenizer,\n ignore_dist=True,\n **kwargs,\n )\n # encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)\n\n \n if text is not None and images is not None:\n encoding[\"pixel_values\"] = images\n encoding[\"patch_positions\"] = patch_position\n return BatchEncoding(data=encoding)\n elif text is not None:\n return BatchEncoding(data=encoding)\n else:\n return BatchEncoding(data=dict(pixel_values=images, patch_position=patch_position), tensor_type=return_tensors)\n\n def batch_decode(self, skip_special_tokens=True, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please\n refer to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.batch_decode(*args, skip_special_tokens=skip_special_tokens, **kwargs)\n\n def decode(self, skip_special_tokens=True, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to\n the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.decode(*args, skip_special_tokens=skip_special_tokens, **kwargs)" }, { "identifier": "MplugOwlImageProcessor", "path": "mplug_owl/processing_mplug_owl.py", "snippet": "class MplugOwlImageProcessor(CLIPImageProcessor):\n pass" }, { "identifier": "MplugOwlForConditionalGeneration", "path": "mplug_owl/modeling_mplug_owl.py", "snippet": "class MplugOwlForConditionalGeneration(MplugOwlPreTrainedModel):\n config_class = MplugOwlConfig\n main_input_name = \"pixel_values\"\n\n def __init__(self, config: MplugOwlConfig):\n super().__init__(config)\n\n self.vision_model = MplugOwlVisionModel(config.vision_config)\n\n self.query_tokens = nn.Parameter(\n torch.zeros(1, config.num_query_tokens, config.visual_abstractor_config.hidden_size)\n )\n self.num_queries = config.num_query_tokens\n self.abstractor = MplugOwlVisualAbstractorModel(\n config.visual_abstractor_config, config.text_config.hidden_size\n )\n language_model = AutoModelForCausalLM.from_config(config.text_config)\n self.language_model = language_model\n\n # Initialize weights and apply final processing\n self.post_init()\n self.main_input_name = \"input_ids\"\n from transformers import GenerationConfig\n\n self.generation_config = GenerationConfig(\n max_length=512, do_sample=True, top_k=3, pad_token_id=0, unk_token_id=0, bos_token_id=1, eos_token_id=2\n )\n\n def get_input_embeddings(self):\n return self.language_model.get_input_embeddings()\n\n def set_input_embeddings(self, value):\n self.language_model.set_input_embeddings(value)\n\n def set_output_embeddings(self, new_embeddings):\n self.language_model.set_output_embeddings(new_embeddings)\n\n def get_output_embeddings(self) -> nn.Module:\n return self.language_model.get_output_embeddings()\n\n def get_encoder(self):\n return self.language_model.get_encoder()\n\n def get_decoder(self):\n return self.language_model.get_decoder()\n\n def _tie_weights(self):\n if not self.config.use_decoder_only_language_model:\n self.language_model.encoder.embed_tokens = self.language_model.shared\n self.language_model.decoder.embed_tokens = self.language_model.shared\n\n def _preprocess_accelerate(self):\n r\"\"\"\n Some pre-processing hacks to make the model `accelerate` compatible. Check\n https://github.com/huggingface/transformers/pull/21707 for more details.\n \"\"\"\n hf_device_map = self.hf_device_map\n\n if len(hf_device_map) > 1 and \"language_model\" not in hf_device_map and torch.cuda.device_count() > 1:\n # warn users about unexpected behavior when using multi-GPU + mPLUG-Owl + `accelerate`.\n logger.warning(\n \"The `language_model` is not in the `hf_device_map` dictionary and you are running your script\"\n \" in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`.\"\n \" Please pass a `device_map` that contains `language_model` to remove this warning.\"\n \" Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for\"\n \" more details on creating a `device_map` for large models.\",\n )\n\n if hasattr(self.language_model, \"_hf_hook\"):\n self.language_model._hf_hook.io_same_device = True # For `generate` compatibility\n\n @add_start_docstrings_to_model_forward(MPLUG_OWL_INPUTS_DOCSTRING)\n @replace_return_docstrings(\n output_type=MplugOwlForConditionalGenerationModelOutput, config_class=MplugOwlVisionConfig\n )\n def forward(\n self,\n pixel_values: torch.FloatTensor,\n input_ids: torch.FloatTensor,\n num_images,\n non_padding_mask: Optional[torch.LongTensor] = None,\n non_media_mask: Optional[torch.LongTensor] = None,\n prompt_mask: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n labels: Optional[torch.LongTensor] = None,\n patch_positions=None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, MplugOwlForConditionalGenerationModelOutput]:\n r\"\"\"\n Returns:\n\n SFT example:\n\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import MplugOwlProcessor, MplugOwlForConditionalGeneration\n >>> import torch\n\n >>> device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n >>> processor = MplugOwlProcessor.from_pretrained(\"MAGAer13/mplug-owl-llama-7b\")\n >>> model = MplugOwlForConditionalGeneration.from_pretrained(\n ... \"MAGAer13/mplug-owl-llama-7b\", torch_dtype=torch.float16\n ... )\n >>> model.to(device) # doctest: +IGNORE_RESULT\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> prompt = [\n ... \"The following is a conversation between a curious human and AI assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\\nHuman: <image>\\nHuman: how many cats are there?\\nAI: \"\n ... ]\n >>> inputs = processor(images=[image], text=prompt, return_tensors=\"pt\").to(device, torch.float16)\n\n >>> generated_ids = model.generate(**inputs)\n >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()\n >>> print(generated_text)\n There are two cats in the image.\n ```\"\"\"\n if pixel_values is not None:\n pixel_values = pixel_values.to(self.vision_model.embeddings.cls_token.data.dtype)\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # get text embedding\n text_tokens_ = input_ids.clone()\n batch_size = input_ids.shape[0]\n # labels = text_tokens_[:, 1:].clone().contiguous()\n\n media_token_indices = [\n # [:-1] since we would not use the last token for embedding\n get_media_indices(text_tokens_[i][:-1], self.num_queries)\n for i in range(batch_size)\n ]\n text_tokens_[text_tokens_ < 0] = 1 # Not used\n # text_tokens = text_tokens_[:, :-1].contiguous()\n text_embeds = self.get_input_embeddings()(text_tokens_) # Temporally Embedding\n\n if pixel_values is not None:\n image_embeds = self.vision_model(pixel_values, patch_positions=patch_positions, return_dict=True).last_hidden_state\n\n image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)\n query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n\n query_features = self.abstractor(\n query_embeds=query_tokens,\n encoder_hidden_states=image_embeds,\n encoder_attention_mask=image_attention_mask,\n patch_positions=patch_positions,\n )[\"last_hidden_state\"]\n torch.ones(query_features.size()[:-1], dtype=torch.long).to(query_features.device)\n img_seq_length = query_features.shape[1]\n\n num_images_per_sample = num_images.long().cpu().tolist()\n\n text_chunk_embeds = []\n img_idx = 0\n for b in range(batch_size):\n start = 0\n result = []\n if len(media_token_indices[b]) > 0:\n for i, pos in enumerate(media_token_indices[b][0]):\n if pos > start:\n result.append(text_embeds[b, start:pos])\n result.append(query_features[img_idx + i])\n start = pos + img_seq_length\n if start < text_embeds.shape[1]:\n result.append(text_embeds[b, start:])\n\n img_idx += media_token_indices[b][1]\n text_chunk_embeds.append(torch.cat(result, dim=0))\n\n # Actual Input Embeddings\n input_embeds = torch.stack(text_chunk_embeds, dim=0)\n\n # Create causal mask and position ids\n _, loss_mask, position_ids = get_ltor_masks_and_position_ids_from_embeddings(input_embeds)\n\n # Calculate the loss_mask\n non_padding_mask = non_padding_mask.long()\n non_media_mask = non_media_mask.long()\n prompt_mask = prompt_mask.long() # TODO How to deal with prompt mask\n # from icecream import ic\n # non_padding_mask = non_padding_mask[:,:-1]\n # non_media_mask = non_media_mask[:,:-1]\n # prompt_mask = prompt_mask[:,:-1]\n # attention_mask = attention_mask[:,:-1]\n loss_mask = loss_mask[:, :-1]\n\n loss_mask = loss_mask * non_padding_mask * non_media_mask * prompt_mask\n labels[:, 1:][loss_mask != 1] = -100\n # Forward into GPT\n outputs = self.language_model(\n inputs_embeds=input_embeds,\n attention_mask=attention_mask,\n labels=labels,\n return_dict=return_dict,\n output_attentions=self.config.output_attentions,\n )\n outputs.loss = (outputs.loss * loss_mask.view(-1)\n ).sum()/loss_mask.sum()\n return outputs\n\n @torch.no_grad()\n def generate(\n self,\n pixel_values: torch.FloatTensor = None,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n patch_positions=None,\n isdecoder=True,\n **generate_kwargs,\n ) -> torch.LongTensor:\n \"\"\"\n Overrides `generate` function to be able to use the model as a conditional generator.\n\n Args:\n pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):\n Input images to be processed.\n input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):\n The sequence used as a prompt for the generation.\n attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):\n Mask to avoid performing attention on padding token indices\n\n Returns:\n captions (list): A list of strings of length batch_size * num_captions.\n \"\"\"\n if pixel_values is not None:\n pixel_values = pixel_values.to(self.vision_model.embeddings.cls_token.data.dtype)\n if input_ids is None:\n return self.language_model.generate(attention_mask=attention_mask, **generate_kwargs)\n\n if attention_mask is None:\n attention_mask = input_ids.new_ones(*input_ids.shape)\n\n batch_size = input_ids.size(0)\n media_token_indices = [get_media_indices(input_ids[i], self.num_queries) for i in range(batch_size)]\n input_ids = input_ids.clone() # prevent inplace modify\n input_ids[input_ids < 0] = 0 # Not used\n\n if hasattr(self, \"hf_device_map\"):\n # preprocess for `accelerate`\n self._preprocess_accelerate()\n batch_size = input_ids.shape[0]\n # get text embedding\n inputs_embeds = self.get_input_embeddings()(input_ids)\n # get visual embedding\n if pixel_values is not None:\n pixel_values = pixel_values.to(input_ids.device)\n with torch.no_grad():\n image_embeds = self.vision_model(pixel_values, patch_positions=patch_positions, return_dict=True).last_hidden_state\n image_attention_mask = torch.ones(\n image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device\n )\n query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n query_outputs = self.abstractor(\n query_embeds=query_tokens,\n encoder_hidden_states=image_embeds,\n encoder_attention_mask=image_attention_mask,\n patch_positions=patch_positions,\n return_dict=True,\n )\n query_output = query_outputs[\"last_hidden_state\"]\n image_embeds = query_output\n img_seq_length = image_embeds.shape[1]\n\n # ===================\n # Get actual input embeddings\n # ===================\n text_chunk_embeds = []\n text_chunk_attns = []\n img_idx = 0\n\n for b in range(batch_size):\n start = 0\n result = []\n result_attn = []\n for i, pos in enumerate(media_token_indices[b][0]):\n if pos > start:\n result.append(inputs_embeds[b, start:pos])\n result_attn.append(attention_mask[b, start:pos])\n result.append(image_embeds[img_idx + i])\n result_attn.append(torch.ones(image_embeds[img_idx + i].shape[0], device=inputs_embeds.device))\n start = pos + img_seq_length\n if start < inputs_embeds.shape[1]:\n result.append(inputs_embeds[b, start:])\n result_attn.append(attention_mask[b, start:])\n\n img_idx += media_token_indices[b][1]\n text_chunk_embeds.append(torch.cat(result, dim=0))\n text_chunk_attns.append(torch.cat(result_attn, dim=0))\n inputs_embeds = torch.stack(text_chunk_embeds, dim=0)\n attention_mask = torch.stack(text_chunk_attns, dim=0)\n\n outputs = self.language_model.generate(\n inputs_embeds=inputs_embeds,\n # input_ids=input_ids,\n attention_mask=attention_mask,\n **generate_kwargs,\n )\n\n return outputs\n\n def prepare_inputs_for_generation(\n self, input_ids, pixel_values=None, past_key_values=None, attention_mask=None, **model_kwargs\n ):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # # cut decoder_input_ids if past_key_values is used\n # if past_key_values is not None:\n # input_ids = input_ids[:, -1:]\n\n return {\n \"input_ids\": input_ids,\n \"pixel_values\": pixel_values,\n \"attention_mask\": attention_mask,\n \"is_decoder\": True,\n }" }, { "identifier": "MplugOwlConfig", "path": "mplug_owl/configuration_mplug_owl.py", "snippet": "class MplugOwlConfig(PretrainedConfig):\n r\"\"\"\n [`MplugOwlConfig`] is the configuration class to store the configuration of a [`MplugOwlForConditionalGeneration`].\n It is used to instantiate a mPLUG-Owl model according to the specified arguments, defining the vision model,\n Q-Former model and language model configs. Instantiating a configuration with the defaults will yield a similar\n configuration to that of the mPLUG-Owl [x-plug/x_plug-llama-7b](https://huggingface.co/x-plug/x_plug-llama-7b)\n architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n Args:\n vision_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`MplugOwlVisionConfig`].\n visual_abstractor_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize [`MplugOwlVisualAbstractorConfig`].\n text_config (`dict`, *optional*):\n Dictionary of configuration options used to initialize any [`PretrainedConfig`].\n num_query_tokens (`int`, *optional*, defaults to 32):\n The number of query tokens passed through the Transformer.\n\n kwargs (*optional*):\n Dictionary of keyword arguments.\n\n Example:\n\n ```python\n >>> from transformers import (\n ... MplugOwlVisionConfig,\n ... MplugOwlVisualAbstractorConfig,\n ... OPTConfig,\n ... MplugOwlConfig,\n ... MplugOwlForConditionalGeneration,\n ... )\n\n >>> # Initializing a MplugOwlConfig with x-plug/x_plug-llama-7b style configuration\n >>> configuration = MplugOwlConfig()\n\n >>> # Initializing a MplugOwlForConditionalGeneration (with random weights) from the x-plug/x_plug-llama-7b style configuration\n >>> model = MplugOwlForConditionalGeneration(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n\n >>> # We can also initialize a MplugOwlConfig from a MplugOwlVisionConfig, MplugOwlVisualAbstractorConfig and any PretrainedConfig\n\n >>> # Initializing mPLUG-Owl vision, mPLUG-Owl Q-Former and language model configurations\n >>> vision_config = MplugOwlVisionConfig()\n >>> visual_abstractor_config = MplugOwlVisualAbstractorConfig()\n >>> text_config = OPTConfig()\n\n >>> config = MplugOwlConfig.from_text_vision_configs(vision_config, visual_abstractor_config, text_config)\n ```\"\"\"\n model_type = \"mplug-owl\"\n is_composition = True\n\n def __init__(\n self, vision_config=None, visual_abstractor_config=None, text_config=None, num_query_tokens=64, **kwargs\n ):\n super().__init__(**kwargs)\n if vision_config is None:\n vision_config = MplugOwlVisionConfig().to_dict()\n logger.info(\"vision_config is None.\")\n\n if visual_abstractor_config is None:\n visual_abstractor_config = {}\n logger.info(\"abstractor_config is None. \")\n\n if text_config is None:\n # we use LLAMA 7b by default\n from transformers.llama.configuration_llama import LlamaConfig\n\n text_config = LlamaConfig(pad_token_id=2).to_dict()\n logger.info(\"text_config is None.\")\n\n self.vision_config = MplugOwlVisionConfig(**vision_config)\n self.visual_abstractor_config = MplugOwlVisualAbstractorConfig(**visual_abstractor_config)\n # self.visual_abstractor_config.layer_norm_eps = 1e-6\n text_model_type = text_config[\"model_type\"] if \"model_type\" in text_config else \"llama\"\n self.text_config = CONFIG_MAPPING[text_model_type](**text_config)\n\n self.tie_word_embeddings = self.text_config.tie_word_embeddings\n self.is_encoder_decoder = self.text_config.is_encoder_decoder\n\n self.num_query_tokens = num_query_tokens\n # self.visual_abstractor_config.encoder_hidden_size = self.vision_config.hidden_size\n self.use_decoder_only_language_model = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES\n self.initializer_factor = 1.0\n self.initializer_range = 0.02\n\n for attr in dir(self.text_config):\n if not hasattr(self, attr):\n setattr(self, attr, getattr(self.text_config, attr))\n\n @classmethod\n def from_vision_visual_abstractor_text_configs(\n cls,\n vision_config: MplugOwlVisionConfig,\n visual_abstractor_config: MplugOwlVisualAbstractorConfig,\n text_config: PretrainedConfig,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a [`MplugOwlConfig`] (or a derived class) from a mPLUG-Owl vision model, Q-Former and language\n model configurations.\n\n Returns:\n [`MplugOwlConfig`]: An instance of a configuration object\n \"\"\"\n\n return cls(\n vision_config=vision_config.to_dict(),\n visual_abstractor_config=visual_abstractor_config.to_dict(),\n text_config=text_config.to_dict(),\n **kwargs,\n )\n\n def to_dict(self):\n \"\"\"\n Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].\n\n Returns:\n `Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,\n \"\"\"\n output = copy.deepcopy(self.__dict__)\n output[\"vision_config\"] = self.vision_config.to_dict()\n output[\"visual_abstractor_config\"] = self.visual_abstractor_config.to_dict()\n output[\"text_config\"] = self.text_config.to_dict()\n output[\"model_type\"] = self.__class__.model_type\n return output" }, { "identifier": "MplugOwlTokenizer", "path": "mplug_owl/tokenization_mplug_owl.py", "snippet": "class MplugOwlTokenizer(LlamaTokenizer):\n def __init__(\n self,\n vocab_file,\n unk_token=\"<unk>\",\n bos_token=\"<s>\",\n eos_token=\"</s>\",\n pad_token=\"<unk>\",\n sp_model_kwargs=None,\n add_bos_token=False,\n add_eos_token=False,\n clean_up_tokenization_spaces=False,\n **kwargs,\n ):\n super().__init__(\n vocab_file,\n unk_token,\n bos_token,\n eos_token,\n pad_token,\n sp_model_kwargs,\n add_bos_token,\n add_eos_token,\n clean_up_tokenization_spaces,\n **kwargs,\n )\n self.eod_id = self.eos_token_id" }, { "identifier": "post_process_output", "path": "serve/model_utils.py", "snippet": "def post_process_output(text):\n text = text.strip()\n pattern = re.compile(\n r\"<unk>|<pad>|<s>|</s>|\\[PAD\\]|<\\|endoftext\\|>|\\[UNK\\]|\\[CLS\\]|\\[MASK\\]|<\\|startofpiece\\|>|<\\|endofpiece\\|>|\\[gMASK\\]|\\[sMASK\\]\"\n )\n text = pattern.sub(\"\", text.strip()).strip()\n return text" }, { "identifier": "Stream", "path": "serve/model_utils.py", "snippet": "class Stream(transformers.StoppingCriteria):\n def __init__(self, callback_func=None):\n self.callback_func = callback_func\n\n def __call__(self, input_ids, scores) -> bool:\n if self.callback_func is not None:\n self.callback_func(input_ids[0])\n return False" }, { "identifier": "Iteratorize", "path": "serve/model_utils.py", "snippet": "class Iteratorize:\n\n \"\"\"\n Transforms a function that takes a callback\n into a lazy iterator (generator).\n \"\"\"\n\n def __init__(self, func, kwargs={}, callback=None):\n self.mfunc = func\n self.c_callback = callback\n self.q = Queue()\n self.sentinel = object()\n self.kwargs = kwargs\n self.stop_now = False\n\n def _callback(val):\n if self.stop_now:\n raise ValueError\n self.q.put(val)\n\n def gentask():\n try:\n ret = self.mfunc(callback=_callback, **self.kwargs)\n except ValueError:\n pass\n except:\n traceback.print_exc()\n pass\n\n self.q.put(self.sentinel)\n if self.c_callback:\n self.c_callback(ret)\n\n self.thread = Thread(target=gentask)\n self.thread.start()\n\n def __iter__(self):\n return self\n\n def __next__(self):\n obj = self.q.get(True, None)\n if obj is self.sentinel:\n raise StopIteration\n else:\n return obj\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n self.stop_now = True" }, { "identifier": "MplugOwlProcessor", "path": "mplug_owl/processing_mplug_owl.py", "snippet": "class MplugOwlProcessor(ProcessorMixin):\n attributes = []\n tokenizer_class = (\"MplugOwlTokenizer\")\n\n def __init__(self, image_processor=None, tokenizer=None, **kwargs):\n super().__init__(**kwargs)\n self.tokens_to_generate = 0\n self.image_processor = image_processor\n self.tokenizer = tokenizer\n self.add_BOS = True\n\n def __call__(self, text=None, images=None, return_tensors=None, **kwargs):\n args = get_args()\n if text is None and images is None:\n raise ValueError(\"You have to specify either text or images. Both cannot be none.\")\n\n if images is not None:\n if not isinstance(images, list):\n images = [images]\n # image_features, = self.image_processor(images, return_tensors=return_tensors, **kwargs)\n process_results = [self.image_processor(image=image, text=None) for image in images]\n if len(process_results)>0 and len(process_results[0][0].shape) == 4:\n # 图片被切分成了多块 默认是doc场景\n text_list = text.split('<image>')\n images = []\n patch_positions = []\n text = text_list[0]\n for ri, (image_input, text_input, patch_position) in enumerate(process_results):\n images.append(image_input)\n patch_positions.append(patch_position)\n if args.patch_pos_embed_type == 'pre':\n # 对于pre处理 v2t最终输出的是一张图的token\n text += '<image>'\n else:\n # 对于post处理 v2t最终输出的是多图\n text += '<image>'*image_input.shape[0]\n text += text_list[ri+1]\n images = torch.cat(images, dim=0)\n patch_positions = torch.cat(patch_positions, dim=0)\n else:\n # 如果没有切片 则正常stack 并创建patch position = num_image (0,0)的patch id以保持一致\n images = [_[0] for _ in process_results]\n images = torch.stack(images, dim=0)\n patch_positions = torch.zeros(images.shape[0],2).long()\n text = text\n if text is not None:\n encoding = tokenize_prompts(\n prompts=[text],\n tokens_to_generate=self.tokens_to_generate,\n add_BOS=self.add_BOS,\n tokenizer=self.tokenizer,\n ignore_dist=True,\n **kwargs,\n )\n # encoding = self.tokenizer(text, return_tensors=return_tensors, **kwargs)\n\n \n if text is not None and images is not None:\n encoding[\"pixel_values\"] = images\n encoding[\"patch_positions\"] = patch_position\n return BatchEncoding(data=encoding)\n elif text is not None:\n return BatchEncoding(data=encoding)\n else:\n return BatchEncoding(data=dict(pixel_values=images, patch_position=patch_position), tensor_type=return_tensors)\n\n def batch_decode(self, skip_special_tokens=True, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please\n refer to the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.batch_decode(*args, skip_special_tokens=skip_special_tokens, **kwargs)\n\n def decode(self, skip_special_tokens=True, *args, **kwargs):\n \"\"\"\n This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to\n the docstring of this method for more information.\n \"\"\"\n return self.tokenizer.decode(*args, skip_special_tokens=skip_special_tokens, **kwargs)" }, { "identifier": "MplugOwlForConditionalGeneration", "path": "mplug_owl/modeling_mplug_owl.py", "snippet": "class MplugOwlForConditionalGeneration(MplugOwlPreTrainedModel):\n config_class = MplugOwlConfig\n main_input_name = \"pixel_values\"\n\n def __init__(self, config: MplugOwlConfig):\n super().__init__(config)\n\n self.vision_model = MplugOwlVisionModel(config.vision_config)\n\n self.query_tokens = nn.Parameter(\n torch.zeros(1, config.num_query_tokens, config.visual_abstractor_config.hidden_size)\n )\n self.num_queries = config.num_query_tokens\n self.abstractor = MplugOwlVisualAbstractorModel(\n config.visual_abstractor_config, config.text_config.hidden_size\n )\n language_model = AutoModelForCausalLM.from_config(config.text_config)\n self.language_model = language_model\n\n # Initialize weights and apply final processing\n self.post_init()\n self.main_input_name = \"input_ids\"\n from transformers import GenerationConfig\n\n self.generation_config = GenerationConfig(\n max_length=512, do_sample=True, top_k=3, pad_token_id=0, unk_token_id=0, bos_token_id=1, eos_token_id=2\n )\n\n def get_input_embeddings(self):\n return self.language_model.get_input_embeddings()\n\n def set_input_embeddings(self, value):\n self.language_model.set_input_embeddings(value)\n\n def set_output_embeddings(self, new_embeddings):\n self.language_model.set_output_embeddings(new_embeddings)\n\n def get_output_embeddings(self) -> nn.Module:\n return self.language_model.get_output_embeddings()\n\n def get_encoder(self):\n return self.language_model.get_encoder()\n\n def get_decoder(self):\n return self.language_model.get_decoder()\n\n def _tie_weights(self):\n if not self.config.use_decoder_only_language_model:\n self.language_model.encoder.embed_tokens = self.language_model.shared\n self.language_model.decoder.embed_tokens = self.language_model.shared\n\n def _preprocess_accelerate(self):\n r\"\"\"\n Some pre-processing hacks to make the model `accelerate` compatible. Check\n https://github.com/huggingface/transformers/pull/21707 for more details.\n \"\"\"\n hf_device_map = self.hf_device_map\n\n if len(hf_device_map) > 1 and \"language_model\" not in hf_device_map and torch.cuda.device_count() > 1:\n # warn users about unexpected behavior when using multi-GPU + mPLUG-Owl + `accelerate`.\n logger.warning(\n \"The `language_model` is not in the `hf_device_map` dictionary and you are running your script\"\n \" in a multi-GPU environment. this may lead to unexpected behavior when using `accelerate`.\"\n \" Please pass a `device_map` that contains `language_model` to remove this warning.\"\n \" Please refer to https://github.com/huggingface/blog/blob/main/accelerate-large-models.md for\"\n \" more details on creating a `device_map` for large models.\",\n )\n\n if hasattr(self.language_model, \"_hf_hook\"):\n self.language_model._hf_hook.io_same_device = True # For `generate` compatibility\n\n @add_start_docstrings_to_model_forward(MPLUG_OWL_INPUTS_DOCSTRING)\n @replace_return_docstrings(\n output_type=MplugOwlForConditionalGenerationModelOutput, config_class=MplugOwlVisionConfig\n )\n def forward(\n self,\n pixel_values: torch.FloatTensor,\n input_ids: torch.FloatTensor,\n num_images,\n non_padding_mask: Optional[torch.LongTensor] = None,\n non_media_mask: Optional[torch.LongTensor] = None,\n prompt_mask: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.LongTensor] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n labels: Optional[torch.LongTensor] = None,\n patch_positions=None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, MplugOwlForConditionalGenerationModelOutput]:\n r\"\"\"\n Returns:\n\n SFT example:\n\n ```python\n >>> from PIL import Image\n >>> import requests\n >>> from transformers import MplugOwlProcessor, MplugOwlForConditionalGeneration\n >>> import torch\n\n >>> device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n >>> processor = MplugOwlProcessor.from_pretrained(\"MAGAer13/mplug-owl-llama-7b\")\n >>> model = MplugOwlForConditionalGeneration.from_pretrained(\n ... \"MAGAer13/mplug-owl-llama-7b\", torch_dtype=torch.float16\n ... )\n >>> model.to(device) # doctest: +IGNORE_RESULT\n\n >>> url = \"http://images.cocodataset.org/val2017/000000039769.jpg\"\n >>> image = Image.open(requests.get(url, stream=True).raw)\n\n >>> prompt = [\n ... \"The following is a conversation between a curious human and AI assistant. The assistant gives helpful, detailed, and polite answers to the user's questions.\\nHuman: <image>\\nHuman: how many cats are there?\\nAI: \"\n ... ]\n >>> inputs = processor(images=[image], text=prompt, return_tensors=\"pt\").to(device, torch.float16)\n\n >>> generated_ids = model.generate(**inputs)\n >>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()\n >>> print(generated_text)\n There are two cats in the image.\n ```\"\"\"\n if pixel_values is not None:\n pixel_values = pixel_values.to(self.vision_model.embeddings.cls_token.data.dtype)\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # get text embedding\n text_tokens_ = input_ids.clone()\n batch_size = input_ids.shape[0]\n # labels = text_tokens_[:, 1:].clone().contiguous()\n\n media_token_indices = [\n # [:-1] since we would not use the last token for embedding\n get_media_indices(text_tokens_[i][:-1], self.num_queries)\n for i in range(batch_size)\n ]\n text_tokens_[text_tokens_ < 0] = 1 # Not used\n # text_tokens = text_tokens_[:, :-1].contiguous()\n text_embeds = self.get_input_embeddings()(text_tokens_) # Temporally Embedding\n\n if pixel_values is not None:\n image_embeds = self.vision_model(pixel_values, patch_positions=patch_positions, return_dict=True).last_hidden_state\n\n image_attention_mask = torch.ones(image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)\n query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n\n query_features = self.abstractor(\n query_embeds=query_tokens,\n encoder_hidden_states=image_embeds,\n encoder_attention_mask=image_attention_mask,\n patch_positions=patch_positions,\n )[\"last_hidden_state\"]\n torch.ones(query_features.size()[:-1], dtype=torch.long).to(query_features.device)\n img_seq_length = query_features.shape[1]\n\n num_images_per_sample = num_images.long().cpu().tolist()\n\n text_chunk_embeds = []\n img_idx = 0\n for b in range(batch_size):\n start = 0\n result = []\n if len(media_token_indices[b]) > 0:\n for i, pos in enumerate(media_token_indices[b][0]):\n if pos > start:\n result.append(text_embeds[b, start:pos])\n result.append(query_features[img_idx + i])\n start = pos + img_seq_length\n if start < text_embeds.shape[1]:\n result.append(text_embeds[b, start:])\n\n img_idx += media_token_indices[b][1]\n text_chunk_embeds.append(torch.cat(result, dim=0))\n\n # Actual Input Embeddings\n input_embeds = torch.stack(text_chunk_embeds, dim=0)\n\n # Create causal mask and position ids\n _, loss_mask, position_ids = get_ltor_masks_and_position_ids_from_embeddings(input_embeds)\n\n # Calculate the loss_mask\n non_padding_mask = non_padding_mask.long()\n non_media_mask = non_media_mask.long()\n prompt_mask = prompt_mask.long() # TODO How to deal with prompt mask\n # from icecream import ic\n # non_padding_mask = non_padding_mask[:,:-1]\n # non_media_mask = non_media_mask[:,:-1]\n # prompt_mask = prompt_mask[:,:-1]\n # attention_mask = attention_mask[:,:-1]\n loss_mask = loss_mask[:, :-1]\n\n loss_mask = loss_mask * non_padding_mask * non_media_mask * prompt_mask\n labels[:, 1:][loss_mask != 1] = -100\n # Forward into GPT\n outputs = self.language_model(\n inputs_embeds=input_embeds,\n attention_mask=attention_mask,\n labels=labels,\n return_dict=return_dict,\n output_attentions=self.config.output_attentions,\n )\n outputs.loss = (outputs.loss * loss_mask.view(-1)\n ).sum()/loss_mask.sum()\n return outputs\n\n @torch.no_grad()\n def generate(\n self,\n pixel_values: torch.FloatTensor = None,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n patch_positions=None,\n isdecoder=True,\n **generate_kwargs,\n ) -> torch.LongTensor:\n \"\"\"\n Overrides `generate` function to be able to use the model as a conditional generator.\n\n Args:\n pixel_values (`torch.FloatTensor` of shape (batch_size, num_channels, height, width)):\n Input images to be processed.\n input_ids (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):\n The sequence used as a prompt for the generation.\n attention_mask (`torch.LongTensor` of shape (batch_size, sequence_length), *optional*):\n Mask to avoid performing attention on padding token indices\n\n Returns:\n captions (list): A list of strings of length batch_size * num_captions.\n \"\"\"\n if pixel_values is not None:\n pixel_values = pixel_values.to(self.vision_model.embeddings.cls_token.data.dtype)\n if input_ids is None:\n return self.language_model.generate(attention_mask=attention_mask, **generate_kwargs)\n\n if attention_mask is None:\n attention_mask = input_ids.new_ones(*input_ids.shape)\n\n batch_size = input_ids.size(0)\n media_token_indices = [get_media_indices(input_ids[i], self.num_queries) for i in range(batch_size)]\n input_ids = input_ids.clone() # prevent inplace modify\n input_ids[input_ids < 0] = 0 # Not used\n\n if hasattr(self, \"hf_device_map\"):\n # preprocess for `accelerate`\n self._preprocess_accelerate()\n batch_size = input_ids.shape[0]\n # get text embedding\n inputs_embeds = self.get_input_embeddings()(input_ids)\n # get visual embedding\n if pixel_values is not None:\n pixel_values = pixel_values.to(input_ids.device)\n with torch.no_grad():\n image_embeds = self.vision_model(pixel_values, patch_positions=patch_positions, return_dict=True).last_hidden_state\n image_attention_mask = torch.ones(\n image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device\n )\n query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)\n query_outputs = self.abstractor(\n query_embeds=query_tokens,\n encoder_hidden_states=image_embeds,\n encoder_attention_mask=image_attention_mask,\n patch_positions=patch_positions,\n return_dict=True,\n )\n query_output = query_outputs[\"last_hidden_state\"]\n image_embeds = query_output\n img_seq_length = image_embeds.shape[1]\n\n # ===================\n # Get actual input embeddings\n # ===================\n text_chunk_embeds = []\n text_chunk_attns = []\n img_idx = 0\n\n for b in range(batch_size):\n start = 0\n result = []\n result_attn = []\n for i, pos in enumerate(media_token_indices[b][0]):\n if pos > start:\n result.append(inputs_embeds[b, start:pos])\n result_attn.append(attention_mask[b, start:pos])\n result.append(image_embeds[img_idx + i])\n result_attn.append(torch.ones(image_embeds[img_idx + i].shape[0], device=inputs_embeds.device))\n start = pos + img_seq_length\n if start < inputs_embeds.shape[1]:\n result.append(inputs_embeds[b, start:])\n result_attn.append(attention_mask[b, start:])\n\n img_idx += media_token_indices[b][1]\n text_chunk_embeds.append(torch.cat(result, dim=0))\n text_chunk_attns.append(torch.cat(result_attn, dim=0))\n inputs_embeds = torch.stack(text_chunk_embeds, dim=0)\n attention_mask = torch.stack(text_chunk_attns, dim=0)\n\n outputs = self.language_model.generate(\n inputs_embeds=inputs_embeds,\n # input_ids=input_ids,\n attention_mask=attention_mask,\n **generate_kwargs,\n )\n\n return outputs\n\n def prepare_inputs_for_generation(\n self, input_ids, pixel_values=None, past_key_values=None, attention_mask=None, **model_kwargs\n ):\n input_shape = input_ids.shape\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # # cut decoder_input_ids if past_key_values is used\n # if past_key_values is not None:\n # input_ids = input_ids[:, -1:]\n\n return {\n \"input_ids\": input_ids,\n \"pixel_values\": pixel_values,\n \"attention_mask\": attention_mask,\n \"is_decoder\": True,\n }" }, { "identifier": "build_processors", "path": "pipeline/data_utils/processors/builder.py", "snippet": "def build_processors(processors_cfg):\n processors = dict()\n for task, processor in processors_cfg.items():\n processors[task] = build_from_cfg(processor, PROCESSORS)\n ic(type(processors[task]))\n return processors" } ]
from PIL import Image from io import BytesIO from .io_utils import IO, DefaultIO, OSS from mplug_owl.processing_mplug_owl import MplugOwlProcessor, MplugOwlImageProcessor from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration from mplug_owl.configuration_mplug_owl import MplugOwlConfig from mplug_owl.tokenization_mplug_owl import MplugOwlTokenizer from transformers import GenerationConfig from .model_utils import post_process_output, Stream, Iteratorize from pathlib import Path from mplug_owl.processing_mplug_owl import MplugOwlProcessor from mplug_owl.modeling_mplug_owl import MplugOwlForConditionalGeneration from pipeline.data_utils.processors.builder import build_processors from pipeline.data_utils.processors import * from transformers.models.llama.tokenization_llama import LlamaTokenizer from icecream import ic import torch import gradio as gr import logging import sys import os import json import requests import datetime import uuid import base64 import time import sys import transformers
15,300
# text += text_list[ri+1] # images = torch.cat(images, dim=0) # patch_position = torch.cat(patch_position, dim=0) # print(text) # ic(images.shape) # ic(patch_position.shape) # from mplug_owl.processing_mplug_owl import tokenize_prompts # input_ids = tokenize_prompts(text, tokenizer=self.tokenizer, return_tensors='pt') # return { # "pixel_values": images, # 'patch_position': patch_position, # "input_ids": input_ids # } class mPLUG_Owl_Server: def __init__( self, base_model='MAGAer13/mplug-owl-llama-7b', log_dir='./', load_in_8bit=False, bf16=True, device="cuda", io=None, config=None, ): self.log_dir = log_dir self.config = config self.image_processor = build_processors(config['valid_processors'])['sft'] self.tokenizer = LlamaTokenizer.from_pretrained(base_model) self.processor = MplugOwlProcessor(self.image_processor, self.tokenizer) self.model = MplugOwlForConditionalGeneration.from_pretrained( base_model, torch_dtype=torch.float, ) ckpt = {} for cf in Path(base_model).iterdir(): if 'pytorch_model' in cf.name and cf.name.endswith('.bin'): ckpt.update(torch.load(cf, map_location='cpu')) msg = self.model.load_state_dict(ckpt, strict=False) print(msg) del ckpt self.bf16 = bf16 self.load_in_8bit = load_in_8bit if not load_in_8bit: if bf16: self.model.bfloat16() else: self.model.half() self.model.cuda() self.model.eval() self.io = io def evaluate( self, pixel_values=None, patch_positions=None, input_ids=None, temperature=1.0, top_p=0.9, top_k=5, num_beams=3, max_new_tokens=256, stream_output=True, length_penalty=1.0, no_repeat_ngram_size=2, do_sample=False, early_stopping=True, **kwargs ): generation_config = dict( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, no_repeat_ngram_size=no_repeat_ngram_size, do_sample=do_sample, early_stopping=early_stopping, length_penalty=length_penalty, ) generate_params = { "pixel_values": pixel_values, "patch_positions": patch_positions, "input_ids": input_ids, "return_dict_in_generate": True, "output_scores": True, "max_new_tokens": max_new_tokens, } generate_params.update(generation_config) if stream_output: # Stream the reply 1 token at a time. # This is based on the trick of using 'stopping_criteria' to create an iterator, # from https://github.com/oobabooga/text-generation-webui/blob/ad37f396fc8bcbab90e11ecf17c56c97bfbd4a9c/modules/text_generation.py#L216-L243. def generate_with_callback(callback=None, **kwargs): kwargs.setdefault( "stopping_criteria", transformers.StoppingCriteriaList() ) kwargs["stopping_criteria"].append(Stream(callback_func=callback)) with torch.no_grad(): self.model.generate(**kwargs) def generate_with_streaming(**kwargs): return Iteratorize(generate_with_callback, kwargs, callback=None) with generate_with_streaming(**generate_params) as generator: for output in generator: # new_tokens = len(output) - len(input_ids[0]) decoded_output = self.tokenizer.decode(output) if output[-1] in [self.tokenizer.eos_token_id]: break
sys.path.append("..") server_error_msg = "**NETWORK ERROR DUE TO HIGH TRAFFIC. PLEASE REGENERATE OR REFRESH THIS PAGE.**" # from pipeline.data_utils.xgpt3_dataset import ImageIO # class ImageProcessor(object): # def __init__(self, resolution=224, tokenizer=None): # normalize = transforms.Normalize((0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711)) # # self.transform = transforms.Compose([ # # transforms.Resize((resolution, resolution),interpolation=Image.BICUBIC), # # transforms.ToTensor(), # # normalize, # # ]) # from megatron.data.processors import doc_processor # processor_class = os.environ.get('DocProcessor','DocSFTProcessor') # self.transform = getattr(doc_processor,processor_class)() # self.image_io = ImageIO() # self.tokenizer=tokenizer # def __call__(self, image_paths, prompts): # if isinstance(image_paths, str): # image_paths = [image_paths] # images = [] # images = self.image_io._load_img(image_paths) # images = [self.transform(image, None) for image in images] # image_input, text_input, patch_position # patch_position = [_[2] for _ in images] # images = [_[0] for _ in images] # text_list = prompts[0].split('<image>') # text = text_list[0] # for ri, image in enumerate(images): # if args.patch_pos_embed_type == 'pre': # # 对于pre处理 v2t最终输出的是一张图的token # text += '<image>' # else: # # 对于post处理 v2t最终输出的是多图 # text += '<image>'*image.shape[0] # text += text_list[ri+1] # images = torch.cat(images, dim=0) # patch_position = torch.cat(patch_position, dim=0) # print(text) # ic(images.shape) # ic(patch_position.shape) # from mplug_owl.processing_mplug_owl import tokenize_prompts # input_ids = tokenize_prompts(text, tokenizer=self.tokenizer, return_tensors='pt') # return { # "pixel_values": images, # 'patch_position': patch_position, # "input_ids": input_ids # } class mPLUG_Owl_Server: def __init__( self, base_model='MAGAer13/mplug-owl-llama-7b', log_dir='./', load_in_8bit=False, bf16=True, device="cuda", io=None, config=None, ): self.log_dir = log_dir self.config = config self.image_processor = build_processors(config['valid_processors'])['sft'] self.tokenizer = LlamaTokenizer.from_pretrained(base_model) self.processor = MplugOwlProcessor(self.image_processor, self.tokenizer) self.model = MplugOwlForConditionalGeneration.from_pretrained( base_model, torch_dtype=torch.float, ) ckpt = {} for cf in Path(base_model).iterdir(): if 'pytorch_model' in cf.name and cf.name.endswith('.bin'): ckpt.update(torch.load(cf, map_location='cpu')) msg = self.model.load_state_dict(ckpt, strict=False) print(msg) del ckpt self.bf16 = bf16 self.load_in_8bit = load_in_8bit if not load_in_8bit: if bf16: self.model.bfloat16() else: self.model.half() self.model.cuda() self.model.eval() self.io = io def evaluate( self, pixel_values=None, patch_positions=None, input_ids=None, temperature=1.0, top_p=0.9, top_k=5, num_beams=3, max_new_tokens=256, stream_output=True, length_penalty=1.0, no_repeat_ngram_size=2, do_sample=False, early_stopping=True, **kwargs ): generation_config = dict( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, no_repeat_ngram_size=no_repeat_ngram_size, do_sample=do_sample, early_stopping=early_stopping, length_penalty=length_penalty, ) generate_params = { "pixel_values": pixel_values, "patch_positions": patch_positions, "input_ids": input_ids, "return_dict_in_generate": True, "output_scores": True, "max_new_tokens": max_new_tokens, } generate_params.update(generation_config) if stream_output: # Stream the reply 1 token at a time. # This is based on the trick of using 'stopping_criteria' to create an iterator, # from https://github.com/oobabooga/text-generation-webui/blob/ad37f396fc8bcbab90e11ecf17c56c97bfbd4a9c/modules/text_generation.py#L216-L243. def generate_with_callback(callback=None, **kwargs): kwargs.setdefault( "stopping_criteria", transformers.StoppingCriteriaList() ) kwargs["stopping_criteria"].append(Stream(callback_func=callback)) with torch.no_grad(): self.model.generate(**kwargs) def generate_with_streaming(**kwargs): return Iteratorize(generate_with_callback, kwargs, callback=None) with generate_with_streaming(**generate_params) as generator: for output in generator: # new_tokens = len(output) - len(input_ids[0]) decoded_output = self.tokenizer.decode(output) if output[-1] in [self.tokenizer.eos_token_id]: break
yield post_process_output(decoded_output)
8
2023-10-08 06:29:02+00:00
24k
LeapLabTHU/Rank-DETR
projects/rank_detr/configs/models/rank_detr_r50.py
[ { "identifier": "HungarianMatcher", "path": "detrex/modeling/matcher/matcher.py", "snippet": "class HungarianMatcher(nn.Module):\n \"\"\"HungarianMatcher which computes an assignment between targets and predictions.\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n\n Args:\n cost_class (float): The relative weight of the classification error\n in the matching cost. Default: 1.\n cost_bbox (float): The relative weight of the L1 error of the bounding box\n coordinates in the matching cost. Default: 1.\n cost_giou (float): This is the relative weight of the giou loss of\n the bounding box in the matching cost. Default: 1.\n cost_class_type (str): How the classification error is calculated.\n Choose from ``[\"ce_cost\", \"focal_loss_cost\"]``. Default: \"focal_loss_cost\".\n alpha (float): Weighting factor in range (0, 1) to balance positive vs\n negative examples in focal loss. Default: 0.25.\n gamma (float): Exponent of modulating factor (1 - p_t) to balance easy vs\n hard examples in focal loss. Default: 2.\n \"\"\"\n\n def __init__(\n self,\n cost_class: float = 1,\n cost_bbox: float = 1,\n cost_giou: float = 1,\n cost_class_type: str = \"focal_loss_cost\",\n alpha: float = 0.25,\n gamma: float = 2.0,\n ):\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n self.cost_class_type = cost_class_type\n self.alpha = alpha\n self.gamma = gamma\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n assert cost_class_type in {\n \"ce_cost\",\n \"focal_loss_cost\",\n }, \"only support ce loss or focal loss for computing class cost\"\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Forward function for `HungarianMatcher` which performs the matching.\n\n Args:\n outputs (Dict[str, torch.Tensor]): This is a dict that contains at least these entries:\n\n - ``\"pred_logits\"``: Tensor of shape (bs, num_queries, num_classes) with the classification logits.\n - ``\"pred_boxes\"``: Tensor of shape (bs, num_queries, 4) with the predicted box coordinates.\n\n targets (List[Dict[str, torch.Tensor]]): This is a list of targets (len(targets) = batch_size),\n where each target is a dict containing:\n\n - ``\"labels\"``: Tensor of shape (num_target_boxes, ) (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels. # noqa\n - ``\"boxes\"``: Tensor of shape (num_target_boxes, 4) containing the target box coordinates.\n\n Returns:\n list[torch.Tensor]: A list of size batch_size, containing tuples of `(index_i, index_j)` where:\n\n - ``index_i`` is the indices of the selected predictions (in order)\n - ``index_j`` is the indices of the corresponding selected targets (in order)\n\n For each batch element, it holds: `len(index_i) = len(index_j) = min(num_queries, num_target_boxes)`\n \"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n # We flatten to compute the cost matrices in a batch\n if self.cost_class_type == \"ce_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).softmax(-1)\n ) # [batch_size * num_queries, num_classes]\n elif self.cost_class_type == \"focal_loss_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).sigmoid()\n ) # [batch_size * num_queries, num_classes]\n\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # [batch_size * num_queries, 4]\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n # Compute the classification cost.\n if self.cost_class_type == \"ce_cost\":\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -out_prob[:, tgt_ids]\n elif self.cost_class_type == \"focal_loss_cost\":\n alpha = self.alpha\n gamma = self.gamma\n neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log())\n pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n\n # Compute the giou cost betwen boxes\n cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n\n # Final cost matrix\n C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n C = C.view(bs, num_queries, -1).cpu()\n\n sizes = [len(v[\"boxes\"]) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_bbox: {}\".format(self.cost_bbox),\n \"cost_giou: {}\".format(self.cost_giou),\n \"cost_class_type: {}\".format(self.cost_class_type),\n \"focal cost alpha: {}\".format(self.alpha),\n \"focal cost gamma: {}\".format(self.gamma),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" }, { "identifier": "ChannelMapper", "path": "detrex/modeling/neck/channel_mapper.py", "snippet": "class ChannelMapper(nn.Module):\n \"\"\"Channel Mapper for reduce/increase channels of backbone features. Modified\n from `mmdet <https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/channel_mapper.py>`_.\n\n This is used to reduce/increase the channels of backbone features.\n\n Args:\n input_shape (Dict[str, ShapeSpec]): A dict which contains the backbone features meta infomation,\n e.g. ``input_shape = {\"res5\": ShapeSpec(channels=2048)}``.\n in_features (List[str]): A list contains the keys which maps the features output from the backbone,\n e.g. ``in_features = [\"res\"]``.\n out_channels (int): Number of output channels for each scale.\n kernel_size (int, optional): Size of the convolving kernel for each scale.\n Default: 3.\n stride (int, optional): Stride of convolution for each scale. Default: 1.\n bias (bool, optional): If True, adds a learnable bias to the output of each scale.\n Default: True.\n groups (int, optional): Number of blocked connections from input channels to\n output channels for each scale. Default: 1.\n dilation (int, optional): Spacing between kernel elements for each scale.\n Default: 1.\n norm_layer (nn.Module, optional): The norm layer used for each scale. Default: None.\n activation (nn.Module, optional): The activation layer used for each scale. Default: None.\n num_outs (int, optional): Number of output feature maps. There will be ``extra_convs`` when\n ``num_outs`` is larger than the length of ``in_features``. Default: None.\n\n Examples:\n >>> import torch\n >>> import torch.nn as nn\n >>> from detrex.modeling import ChannelMapper\n >>> from detectron2.modeling import ShapeSpec\n >>> input_features = {\n ... \"p0\": torch.randn(1, 128, 128, 128),\n ... \"p1\": torch.randn(1, 256, 64, 64),\n ... \"p2\": torch.randn(1, 512, 32, 32),\n ... \"p3\": torch.randn(1, 1024, 16, 16),\n ... }\n >>> input_shapes = {\n ... \"p0\": ShapeSpec(channels=128),\n ... \"p1\": ShapeSpec(channels=256),\n ... \"p2\": ShapeSpec(channels=512),\n ... \"p3\": ShapeSpec(channels=1024),\n ... }\n >>> in_features = [\"p0\", \"p1\", \"p2\", \"p3\"]\n >>> neck = ChannelMapper(\n ... input_shapes=input_shapes,\n ... in_features=in_features,\n ... out_channels=256,\n ... norm_layer=nn.GroupNorm(num_groups=32, num_channels=256)\n >>> outputs = neck(input_features)\n >>> for i in range(len(outputs)):\n ... print(f\"output[{i}].shape = {outputs[i].shape}\")\n output[0].shape = torch.Size([1, 256, 128, 128])\n output[1].shape = torch.Size([1, 256, 64, 64])\n output[2].shape = torch.Size([1, 256, 32, 32])\n output[3].shape = torch.Size([1, 256, 16, 16])\n \"\"\"\n\n def __init__(\n self,\n input_shapes: Dict[str, ShapeSpec],\n in_features: List[str],\n out_channels: int,\n kernel_size: int = 3,\n stride: int = 1,\n bias: bool = True,\n groups: int = 1,\n dilation: int = 1,\n norm_layer: nn.Module = None,\n activation: nn.Module = None,\n num_outs: int = None,\n **kwargs,\n ):\n super(ChannelMapper, self).__init__()\n self.extra_convs = None\n\n in_channels_per_feature = [input_shapes[f].channels for f in in_features]\n\n if num_outs is None:\n num_outs = len(input_shapes)\n\n self.convs = nn.ModuleList()\n for in_channel in in_channels_per_feature:\n self.convs.append(\n ConvNormAct(\n in_channels=in_channel,\n out_channels=out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=(kernel_size - 1) // 2,\n bias=bias,\n groups=groups,\n dilation=dilation,\n norm_layer=copy.deepcopy(norm_layer),\n activation=copy.deepcopy(activation),\n )\n )\n\n if num_outs > len(in_channels_per_feature):\n self.extra_convs = nn.ModuleList()\n for i in range(len(in_channels_per_feature), num_outs):\n if i == len(in_channels_per_feature):\n in_channel = in_channels_per_feature[-1]\n else:\n in_channel = out_channels\n self.extra_convs.append(\n ConvNormAct(\n in_channels=in_channel,\n out_channels=out_channels,\n kernel_size=3,\n stride=2,\n padding=1,\n bias=bias,\n groups=groups,\n dilation=dilation,\n norm_layer=copy.deepcopy(norm_layer),\n activation=copy.deepcopy(activation),\n )\n )\n\n self.input_shapes = input_shapes\n self.in_features = in_features\n self.out_channels = out_channels\n\n def forward(self, inputs):\n \"\"\"Forward function for ChannelMapper\n\n Args:\n inputs (Dict[str, torch.Tensor]): The backbone feature maps.\n\n Return:\n tuple(torch.Tensor): A tuple of the processed features.\n \"\"\"\n assert len(inputs) == len(self.convs)\n outs = [self.convs[i](inputs[self.in_features[i]]) for i in range(len(inputs))]\n if self.extra_convs:\n for i in range(len(self.extra_convs)):\n if i == 0:\n outs.append(self.extra_convs[0](inputs[self.in_features[-1]]))\n else:\n outs.append(self.extra_convs[i](outs[-1]))\n return tuple(outs)" }, { "identifier": "PositionEmbeddingSine", "path": "detrex/layers/position_embedding.py", "snippet": "class PositionEmbeddingSine(nn.Module):\n \"\"\"Sinusoidal position embedding used in DETR model.\n\n Please see `End-to-End Object Detection with Transformers\n <https://arxiv.org/pdf/2005.12872>`_ for more details.\n\n Args:\n num_pos_feats (int): The feature dimension for each position along\n x-axis or y-axis. The final returned dimension for each position\n is 2 times of the input value.\n temperature (int, optional): The temperature used for scaling\n the position embedding. Default: 10000.\n scale (float, optional): A scale factor that scales the position\n embedding. The scale will be used only when `normalize` is True.\n Default: 2*pi.\n eps (float, optional): A value added to the denominator for numerical\n stability. Default: 1e-6.\n offset (float): An offset added to embed when doing normalization.\n normalize (bool, optional): Whether to normalize the position embedding.\n Default: False.\n \"\"\"\n\n def __init__(\n self,\n num_pos_feats: int = 64,\n temperature: int = 10000,\n scale: float = 2 * math.pi,\n eps: float = 1e-6,\n offset: float = 0.0,\n normalize: bool = False,\n ):\n super().__init__()\n if normalize:\n assert isinstance(scale, (float, int)), (\n \"when normalize is set,\"\n \"scale should be provided and in float or int type, \"\n f\"found {type(scale)}\"\n )\n self.num_pos_feats = num_pos_feats\n self.temperature = temperature\n self.normalize = normalize\n self.scale = scale\n self.eps = eps\n self.offset = offset\n\n def forward(self, mask: torch.Tensor, **kwargs) -> torch.Tensor:\n \"\"\"Forward function for `PositionEmbeddingSine`.\n\n Args:\n mask (torch.Tensor): ByteTensor mask. Non-zero values representing\n ignored positions, while zero values means valid positions\n for the input tensor. Shape as `(bs, h, w)`.\n\n Returns:\n torch.Tensor: Returned position embedding with\n shape `(bs, num_pos_feats * 2, h, w)`\n \"\"\"\n assert mask is not None\n not_mask = ~mask\n y_embed = not_mask.cumsum(1, dtype=torch.float32)\n x_embed = not_mask.cumsum(2, dtype=torch.float32)\n if self.normalize:\n y_embed = (y_embed + self.offset) / (y_embed[:, -1:, :] + self.eps) * self.scale\n x_embed = (x_embed + self.offset) / (x_embed[:, :, -1:] + self.eps) * self.scale\n dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=mask.device)\n dim_t = self.temperature ** (\n 2 * torch.div(dim_t, 2, rounding_mode=\"floor\") / self.num_pos_feats\n )\n pos_x = x_embed[:, :, :, None] / dim_t\n pos_y = y_embed[:, :, :, None] / dim_t\n\n # use view as mmdet instead of flatten for dynamically exporting to ONNX\n B, H, W = mask.size()\n pos_x = torch.stack((pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4).view(\n B, H, W, -1\n )\n pos_y = torch.stack((pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4).view(\n B, H, W, -1\n )\n pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)\n return pos" }, { "identifier": "RankDetrTransformerEncoder", "path": "projects/rank_detr/modeling/rank_transformer.py", "snippet": "class RankDetrTransformerEncoder(TransformerLayerSequence):\n def __init__(\n self,\n embed_dim: int = 256,\n num_heads: int = 8,\n feedforward_dim: int = 1024,\n attn_dropout: float = 0.1,\n ffn_dropout: float = 0.1,\n num_layers: int = 6,\n post_norm: bool = False,\n num_feature_levels: int = 4,\n use_checkpoint: bool = True,\n ):\n super(RankDetrTransformerEncoder, self).__init__(\n transformer_layers=BaseTransformerLayer(\n attn=MultiScaleDeformableAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n dropout=attn_dropout,\n batch_first=True,\n num_levels=num_feature_levels,\n ),\n ffn=FFN(\n embed_dim=embed_dim,\n feedforward_dim=feedforward_dim,\n output_dim=embed_dim,\n num_fcs=2,\n ffn_drop=ffn_dropout,\n ),\n norm=nn.LayerNorm(embed_dim),\n operation_order=(\"self_attn\", \"norm\", \"ffn\", \"norm\"),\n ),\n num_layers=num_layers,\n )\n self.embed_dim = self.layers[0].embed_dim\n self.pre_norm = self.layers[0].pre_norm\n\n if post_norm:\n self.post_norm_layer = nn.LayerNorm(self.embed_dim)\n else:\n self.post_norm_layer = None\n\n # use encoder checkpoint\n if use_checkpoint:\n for layer in self.layers:\n layer = checkpoint_wrapper(layer)\n\n def forward(\n self,\n query,\n key,\n value,\n query_pos=None,\n key_pos=None,\n attn_masks=None,\n query_key_padding_mask=None,\n key_padding_mask=None,\n **kwargs,\n ):\n\n for layer in self.layers:\n query = layer(\n query,\n key,\n value,\n query_pos=query_pos,\n attn_masks=attn_masks,\n query_key_padding_mask=query_key_padding_mask,\n key_padding_mask=key_padding_mask,\n **kwargs,\n )\n\n if self.post_norm_layer is not None:\n query = self.post_norm_layer(query)\n return query" }, { "identifier": "RankDetrTransformerDecoder", "path": "projects/rank_detr/modeling/rank_transformer.py", "snippet": "class RankDetrTransformerDecoder(TransformerLayerSequence):\n def __init__(\n self,\n embed_dim: int = 256,\n num_heads: int = 8,\n feedforward_dim: int = 1024,\n attn_dropout: float = 0.1,\n ffn_dropout: float = 0.1,\n num_layers: int = 6,\n return_intermediate: bool = True,\n num_feature_levels: int = 4,\n use_checkpoint: bool = True,\n look_forward_twice=True,\n num_queries_one2one=300,\n num_queries_one2many=1500,\n two_stage_num_proposals=300,\n rank_adaptive_classhead=True,\n query_rank_layer=True,\n ):\n super(RankDetrTransformerDecoder, self).__init__(\n transformer_layers=BaseTransformerLayer(\n attn=[\n MultiheadAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n attn_drop=attn_dropout,\n batch_first=True,\n ),\n MultiScaleDeformableAttention(\n embed_dim=embed_dim,\n num_heads=num_heads,\n dropout=attn_dropout,\n batch_first=True,\n num_levels=num_feature_levels,\n ),\n ],\n ffn=FFN(\n embed_dim=embed_dim,\n feedforward_dim=feedforward_dim,\n output_dim=embed_dim,\n ffn_drop=ffn_dropout,\n ),\n norm=nn.LayerNorm(embed_dim),\n operation_order=(\n \"self_attn\",\n \"norm\",\n \"cross_attn\",\n \"norm\",\n \"ffn\",\n \"norm\",\n ),\n ),\n num_layers=num_layers,\n )\n self.return_intermediate = return_intermediate\n\n self.bbox_embed = None\n self.class_embed = None\n self.look_forward_twice = look_forward_twice\n\n # Rank-adaptive Classification Head\n self.rank_adaptive_classhead = rank_adaptive_classhead\n\n # query rank layer\n self.query_rank_layer = query_rank_layer\n self.num_queries_one2one = num_queries_one2one\n self.num_queries_one2many = num_queries_one2many\n if self.query_rank_layer:\n self.rank_aware_content_query = nn.ModuleList([\n copy.deepcopy(nn.Embedding(two_stage_num_proposals, embed_dim))\n for _ in range(num_layers - 1)\n ])\n for m in self.rank_aware_content_query.parameters():\n nn.init.zeros_(m)\n\n self.pre_racq_trans = nn.ModuleList([\n copy.deepcopy(nn.Linear(embed_dim, embed_dim))\n for _ in range(num_layers - 1)\n ])\n self.post_racq_trans = nn.ModuleList([\n copy.deepcopy(nn.Linear(embed_dim * 2, embed_dim))\n for _ in range(num_layers - 1)\n ])\n\n # decoder checkpoint\n if use_checkpoint:\n for layer in self.layers:\n layer = checkpoint_wrapper(layer)\n\n def forward(\n self,\n query,\n key,\n value,\n query_pos=None,\n key_pos=None,\n attn_masks=None,\n query_key_padding_mask=None,\n key_padding_mask=None,\n reference_points=None,\n valid_ratios=None,\n **kwargs,\n ):\n output = query\n\n intermediate = []\n intermediate_reference_points = []\n for layer_idx, layer in enumerate(self.layers):\n\n # query rank layer\n if layer_idx >= 1:\n if self.query_rank_layer:\n output = torch.gather(\n output, 1, rank_indices.unsqueeze(-1).repeat(1, 1, output.shape[-1])\n )\n concat_term = self.pre_racq_trans[layer_idx - 1](\n self.rank_aware_content_query[layer_idx - 1].weight[:output.shape[1]].unsqueeze(0).expand(output.shape[0], -1, -1)\n )\n output = torch.cat((output, concat_term), dim=2)\n output = self.post_racq_trans[layer_idx - 1](output)\n query_pos = torch.gather(\n query_pos, 1, rank_indices.unsqueeze(-1).repeat(1, 1, query_pos.shape[-1])\n )\n if (not self.query_rank_layer) and (self.rank_adaptive_classhead):\n output = torch.gather(\n output, 1, rank_indices.unsqueeze(-1).repeat(1, 1, output.shape[-1])\n )\n query_pos = torch.gather(\n query_pos, 1, rank_indices.unsqueeze(-1).repeat(1, 1, query_pos.shape[-1])\n )\n\n if reference_points.shape[-1] == 4:\n reference_points_input = (\n reference_points[:, :, None]\n * torch.cat([valid_ratios, valid_ratios], -1)[:, None]\n )\n else:\n assert reference_points.shape[-1] == 2\n reference_points_input = reference_points[:, :, None] * valid_ratios[:, None]\n\n output = layer(\n output,\n key,\n value,\n query_pos=query_pos,\n key_pos=key_pos,\n attn_masks=attn_masks,\n query_key_padding_mask=query_key_padding_mask,\n key_padding_mask=key_padding_mask,\n reference_points=reference_points_input,\n **kwargs,\n )\n\n if self.bbox_embed is not None:\n tmp = self.bbox_embed[layer_idx](output)\n if reference_points.shape[-1] == 4:\n new_reference_points = tmp + inverse_sigmoid(reference_points)\n new_reference_points = new_reference_points.sigmoid()\n else:\n assert reference_points.shape[-1] == 2\n new_reference_points = tmp\n new_reference_points[..., :2] = tmp[..., :2] + inverse_sigmoid(reference_points)\n new_reference_points = new_reference_points.sigmoid()\n reference_points = new_reference_points.detach()\n\n if self.return_intermediate:\n\n if (layer_idx >= 0) and (self.query_rank_layer or self.rank_adaptive_classhead):\n # generate rank indices\n outputs_class_tmp = self.class_embed[layer_idx](output) # [bs, num_queries, embed_dim] -> [bs, num_queries, num_classes]\n rank_basis = outputs_class_tmp.sigmoid().max(dim=2, keepdim=False)[0] # tensor shape: [bs, num_queries]\n if self.training:\n rank_indices_one2one = torch.argsort(rank_basis[:, : self.num_queries_one2one], dim=1, descending=True) # tensor shape: [bs, num_queries_one2one]\n rank_indices_one2many = torch.argsort(rank_basis[:, self.num_queries_one2one :], dim=1, descending=True) # tensor shape: [bs, num_queries_one2many]\n rank_indices = torch.cat(\n (\n rank_indices_one2one,\n rank_indices_one2many + torch.ones_like(rank_indices_one2many) * self.num_queries_one2one\n ),\n dim=1,\n ) # tensor shape: [bs, num_queries_one2one+num_queries_one2many]\n else:\n rank_indices = torch.argsort(rank_basis[:, : self.num_queries_one2one], dim=1, descending=True)\n rank_indices = rank_indices.detach()\n # rank the reference points\n reference_points = torch.gather(\n reference_points, 1, rank_indices.unsqueeze(-1).repeat(1, 1, reference_points.shape[-1]))\n new_reference_points = torch.gather(\n new_reference_points, 1, rank_indices.unsqueeze(-1).repeat(1, 1, new_reference_points.shape[-1]))\n\n intermediate.append(output)\n intermediate_reference_points.append(\n new_reference_points if self.look_forward_twice else reference_points\n )\n\n if self.return_intermediate:\n return torch.stack(intermediate), torch.stack(intermediate_reference_points)\n\n return output, reference_points" }, { "identifier": "RankDetrTransformer", "path": "projects/rank_detr/modeling/rank_transformer.py", "snippet": "class RankDetrTransformer(nn.Module):\n \"\"\"Transformer module for Deformable DETR\n\n Args:\n encoder (nn.Module): encoder module.\n decoder (nn.Module): decoder module.\n as_two_stage (bool): whether to use two-stage transformer. Default False.\n num_feature_levels (int): number of feature levels. Default 4.\n two_stage_num_proposals (int): number of proposals in two-stage transformer. Default 300.\n Only used when as_two_stage is True.\n \"\"\"\n\n def __init__(\n self,\n encoder=None,\n decoder=None,\n num_feature_levels=4,\n as_two_stage=False,\n num_queries_one2one=300,\n num_queries_one2many=1500,\n two_stage_num_proposals=300,\n mixed_selection=True,\n rank_adaptive_classhead=True,\n ):\n super(RankDetrTransformer, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n self.num_feature_levels = num_feature_levels\n self.as_two_stage = as_two_stage\n self.two_stage_num_proposals = two_stage_num_proposals\n\n self.embed_dim = self.encoder.embed_dim\n\n self.level_embeds = nn.Parameter(torch.Tensor(self.num_feature_levels, self.embed_dim))\n\n if self.as_two_stage:\n self.enc_output = nn.Linear(self.embed_dim, self.embed_dim)\n self.enc_output_norm = nn.LayerNorm(self.embed_dim)\n self.pos_trans = nn.Linear(self.embed_dim * 2, self.embed_dim * 2)\n self.pos_trans_norm = nn.LayerNorm(self.embed_dim * 2)\n else:\n self.reference_points = nn.Linear(self.embed_dim, 2)\n\n self.mixed_selection = mixed_selection\n\n self.init_weights()\n\n def init_weights(self):\n for p in self.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)\n for m in self.modules():\n if isinstance(m, MultiScaleDeformableAttention):\n m.init_weights()\n if not self.as_two_stage:\n nn.init.xavier_normal_(self.reference_points.weight.data, gain=1.0)\n nn.init.constant_(self.reference_points.bias.data, 0.0)\n nn.init.normal_(self.level_embeds)\n\n def gen_encoder_output_proposals(self, memory, memory_padding_mask, spatial_shapes):\n N, S, C = memory.shape\n proposals = []\n _cur = 0\n for lvl, (H, W) in enumerate(spatial_shapes):\n mask_flatten_ = memory_padding_mask[:, _cur : (_cur + H * W)].view(N, H, W, 1)\n valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)\n valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)\n\n grid_y, grid_x = torch.meshgrid(\n torch.linspace(0, H - 1, H, dtype=torch.float32, device=memory.device),\n torch.linspace(0, W - 1, W, dtype=torch.float32, device=memory.device),\n )\n grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)\n\n scale = torch.cat([valid_W.unsqueeze(-1), valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2)\n grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale\n wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)\n proposal = torch.cat((grid, wh), -1).view(N, -1, 4)\n proposals.append(proposal)\n _cur += H * W\n\n output_proposals = torch.cat(proposals, 1)\n output_proposals_valid = ((output_proposals > 0.01) & (output_proposals < 0.99)).all(\n -1, keepdim=True\n )\n output_proposals = torch.log(output_proposals / (1 - output_proposals))\n output_proposals = output_proposals.masked_fill(\n memory_padding_mask.unsqueeze(-1), float(\"inf\")\n )\n output_proposals = output_proposals.masked_fill(~output_proposals_valid, float(\"inf\"))\n\n output_memory = memory\n output_memory = output_memory.masked_fill(memory_padding_mask.unsqueeze(-1), float(0))\n output_memory = output_memory.masked_fill(~output_proposals_valid, float(0))\n output_memory = self.enc_output_norm(self.enc_output(output_memory))\n return output_memory, output_proposals\n\n @staticmethod\n def get_reference_points(spatial_shapes, valid_ratios, device):\n \"\"\"Get the reference points used in decoder.\n\n Args:\n spatial_shapes (Tensor): The shape of all\n feature maps, has shape (num_level, 2).\n valid_ratios (Tensor): The radios of valid\n points on the feature map, has shape\n (bs, num_levels, 2)\n device (obj:`device`): The device where\n reference_points should be.\n\n Returns:\n Tensor: reference points used in decoder, has \\\n shape (bs, num_keys, num_levels, 2).\n \"\"\"\n reference_points_list = []\n for lvl, (H, W) in enumerate(spatial_shapes):\n # TODO check this 0.5\n ref_y, ref_x = torch.meshgrid(\n torch.linspace(0.5, H - 0.5, H, dtype=torch.float32, device=device),\n torch.linspace(0.5, W - 0.5, W, dtype=torch.float32, device=device),\n )\n ref_y = ref_y.reshape(-1)[None] / (valid_ratios[:, None, lvl, 1] * H)\n ref_x = ref_x.reshape(-1)[None] / (valid_ratios[:, None, lvl, 0] * W)\n ref = torch.stack((ref_x, ref_y), -1)\n reference_points_list.append(ref)\n reference_points = torch.cat(reference_points_list, 1)\n reference_points = reference_points[:, :, None] * valid_ratios[:, None]\n return reference_points\n\n def get_valid_ratio(self, mask):\n \"\"\"Get the valid radios of feature maps of all level.\"\"\"\n _, H, W = mask.shape\n valid_H = torch.sum(~mask[:, :, 0], 1)\n valid_W = torch.sum(~mask[:, 0, :], 1)\n valid_ratio_h = valid_H.float() / H\n valid_ratio_w = valid_W.float() / W\n valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)\n return valid_ratio\n\n def get_proposal_pos_embed(self, proposals, num_pos_feats=128, temperature=10000):\n \"\"\"Get the position embedding of proposal.\"\"\"\n scale = 2 * math.pi\n dim_t = torch.arange(num_pos_feats, dtype=torch.float32, device=proposals.device)\n dim_t = temperature ** (2 * torch.div(dim_t, 2, rounding_mode=\"floor\") / num_pos_feats)\n # N, L, 4\n proposals = proposals.sigmoid() * scale\n # N, L, 4, 128\n pos = proposals[:, :, :, None] / dim_t\n # N, L, 4, 64, 2\n pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()), dim=4).flatten(2)\n return pos\n\n def forward(\n self,\n multi_level_feats,\n multi_level_masks,\n multi_level_pos_embeds,\n query_embed,\n self_attn_mask,\n **kwargs,\n ):\n assert self.as_two_stage or query_embed is not None\n\n feat_flatten = []\n mask_flatten = []\n lvl_pos_embed_flatten = []\n spatial_shapes = []\n for lvl, (feat, mask, pos_embed) in enumerate(\n zip(multi_level_feats, multi_level_masks, multi_level_pos_embeds)\n ):\n bs, c, h, w = feat.shape\n spatial_shape = (h, w)\n spatial_shapes.append(spatial_shape)\n\n feat = feat.flatten(2).transpose(1, 2) # bs, hw, c\n mask = mask.flatten(1)\n pos_embed = pos_embed.flatten(2).transpose(1, 2) # bs, hw, c\n lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1)\n lvl_pos_embed_flatten.append(lvl_pos_embed)\n feat_flatten.append(feat)\n mask_flatten.append(mask)\n feat_flatten = torch.cat(feat_flatten, 1)\n mask_flatten = torch.cat(mask_flatten, 1)\n lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)\n spatial_shapes = torch.as_tensor(\n spatial_shapes, dtype=torch.long, device=feat_flatten.device\n )\n level_start_index = torch.cat(\n (spatial_shapes.new_zeros((1,)), spatial_shapes.prod(1).cumsum(0)[:-1])\n )\n valid_ratios = torch.stack([self.get_valid_ratio(m) for m in multi_level_masks], 1)\n\n reference_points = self.get_reference_points(\n spatial_shapes, valid_ratios, device=feat.device\n )\n\n memory = self.encoder(\n query=feat_flatten,\n key=None,\n value=None,\n query_pos=lvl_pos_embed_flatten,\n query_key_padding_mask=mask_flatten,\n spatial_shapes=spatial_shapes,\n reference_points=reference_points,\n level_start_index=level_start_index,\n valid_ratios=valid_ratios,\n **kwargs,\n )\n\n bs, _, c = memory.shape\n if self.as_two_stage:\n output_memory, output_proposals = self.gen_encoder_output_proposals(\n memory, mask_flatten, spatial_shapes\n )\n\n enc_outputs_class = self.decoder.class_embed[self.decoder.num_layers](output_memory)\n enc_outputs_coord_unact = (\n self.decoder.bbox_embed[self.decoder.num_layers](output_memory) + output_proposals\n )\n\n topk = self.two_stage_num_proposals\n topk_proposals = torch.topk(enc_outputs_class[..., 0], topk, dim=1)[1]\n topk_coords_unact = torch.gather(\n enc_outputs_coord_unact, 1, topk_proposals.unsqueeze(-1).repeat(1, 1, 4)\n )\n topk_coords_unact = topk_coords_unact.detach()\n reference_points = topk_coords_unact.sigmoid()\n init_reference_out = reference_points\n pos_trans_out = self.pos_trans_norm(\n self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact))\n )\n if not self.mixed_selection:\n query_pos, query = torch.split(pos_trans_out, c, dim=2)\n else:\n # query_pos here is the content embed for deformable DETR\n query = query_embed.unsqueeze(0).expand(bs, -1, -1)\n query_pos, _ = torch.split(pos_trans_out, c, dim=2)\n else:\n query_pos, query = torch.split(query_embed, c, dim=1)\n query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1)\n query = query.unsqueeze(0).expand(bs, -1, -1)\n reference_points = self.reference_points(query_pos).sigmoid()\n init_reference_out = reference_points\n\n # decoder\n inter_states, inter_references = self.decoder(\n query=query, # bs, num_queries, embed_dims\n key=None, # bs, num_tokens, embed_dims\n value=memory, # bs, num_tokens, embed_dims\n query_pos=query_pos,\n key_padding_mask=mask_flatten, # bs, num_tokens\n reference_points=reference_points, # num_queries, 4\n spatial_shapes=spatial_shapes, # nlvl, 2\n level_start_index=level_start_index, # nlvl\n valid_ratios=valid_ratios, # bs, nlvl, 2\n attn_masks=[self_attn_mask, None],\n **kwargs,\n )\n\n inter_references_out = inter_references\n if self.as_two_stage:\n return (\n inter_states,\n init_reference_out,\n inter_references_out,\n enc_outputs_class,\n enc_outputs_coord_unact,\n )\n return inter_states, init_reference_out, inter_references_out, None, None" }, { "identifier": "RankDETR", "path": "projects/rank_detr/modeling/rank_detr.py", "snippet": "class RankDETR(nn.Module):\n \"\"\"Implements the Deformable DETR model.\n\n Code is modified from the `official github repo\n <https://github.com/fundamentalvision/Deformable-DETR>`_.\n\n More details can be found in the `paper\n <https://arxiv.org/abs/2010.04159>`_ .\n\n Args:\n backbone (nn.Module): the backbone module.\n position_embedding (nn.Module): the position embedding module.\n neck (nn.Module): the neck module.\n transformer (nn.Module): the transformer module.\n embed_dim (int): the dimension of the embedding.\n num_classes (int): Number of total categories.\n num_queries (int): Number of proposal dynamic anchor boxes in Transformer\n criterion (nn.Module): Criterion for calculating the total losses.\n pixel_mean (List[float]): Pixel mean value for image normalization.\n Default: [123.675, 116.280, 103.530].\n pixel_std (List[float]): Pixel std value for image normalization.\n Default: [58.395, 57.120, 57.375].\n aux_loss (bool): whether to use auxiliary loss. Default: True.\n with_box_refine (bool): whether to use box refinement. Default: False.\n as_two_stage (bool): whether to use two-stage. Default: False.\n select_box_nums_for_evaluation (int): the number of topk candidates\n slected at postprocess for evaluation. Default: 100.\n\n \"\"\"\n\n def __init__(\n self,\n backbone,\n position_embedding,\n neck,\n transformer,\n embed_dim,\n num_classes,\n num_queries_one2one,\n num_queries_one2many,\n criterion,\n pixel_mean,\n pixel_std,\n aux_loss=True,\n with_box_refine=False,\n as_two_stage=False,\n select_box_nums_for_evaluation=100,\n device=\"cuda\",\n mixed_selection=True,\n k_one2many=6,\n lambda_one2many=1.0,\n rank_adaptive_classhead=True,\n ):\n super().__init__()\n num_queries = num_queries_one2one + num_queries_one2many\n # define backbone and position embedding module\n self.backbone = backbone\n self.position_embedding = position_embedding\n\n # define neck module\n self.neck = neck\n\n # define learnable query embedding\n self.num_queries = num_queries\n if not as_two_stage:\n self.query_embedding = nn.Embedding(num_queries, embed_dim * 2)\n elif mixed_selection:\n self.query_embedding = nn.Embedding(num_queries, embed_dim)\n\n # define transformer module\n self.transformer = transformer\n\n # define classification head and box head\n self.num_classes = num_classes\n self.class_embed = nn.Linear(embed_dim, num_classes)\n self.bbox_embed = MLP(embed_dim, embed_dim, 4, 3)\n\n # where to calculate auxiliary loss in criterion\n self.aux_loss = aux_loss\n self.criterion = criterion\n\n # define contoller for box refinement and two-stage variants\n self.with_box_refine = with_box_refine\n self.as_two_stage = as_two_stage\n\n # init parameters for heads\n prior_prob = 0.01\n bias_value = -math.log((1 - prior_prob) / prior_prob)\n self.class_embed.bias.data = torch.ones(num_classes) * bias_value\n nn.init.constant_(self.bbox_embed.layers[-1].weight.data, 0)\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data, 0)\n for _, neck_layer in self.neck.named_modules():\n if isinstance(neck_layer, nn.Conv2d):\n nn.init.xavier_uniform_(neck_layer.weight, gain=1)\n nn.init.constant_(neck_layer.bias, 0)\n\n # If two-stage, the last class_embed and bbox_embed is for region proposal generation\n # Decoder layers share the same heads without box refinement, while use the different\n # heads when box refinement is used.\n num_pred = (\n (transformer.decoder.num_layers + 1) if as_two_stage else transformer.decoder.num_layers\n )\n if with_box_refine:\n self.class_embed = nn.ModuleList(\n [copy.deepcopy(self.class_embed) for i in range(num_pred)]\n )\n self.bbox_embed = nn.ModuleList(\n [copy.deepcopy(self.bbox_embed) for i in range(num_pred)]\n )\n nn.init.constant_(self.bbox_embed[0].layers[-1].bias.data[2:], -2.0)\n self.transformer.decoder.bbox_embed = self.bbox_embed\n else:\n nn.init.constant_(self.bbox_embed.layers[-1].bias.data[2:], -2.0)\n self.class_embed = nn.ModuleList([self.class_embed for _ in range(num_pred)])\n self.bbox_embed = nn.ModuleList([self.bbox_embed for _ in range(num_pred)])\n self.transformer.decoder.bbox_embed = None\n\n # hack implementation for two-stage. The last class_embed and bbox_embed is for region proposal generation\n if as_two_stage:\n self.transformer.decoder.class_embed = self.class_embed\n for box_embed in self.bbox_embed:\n nn.init.constant_(box_embed.layers[-1].bias.data[2:], 0.0)\n\n # set topk boxes selected for inference\n self.select_box_nums_for_evaluation = select_box_nums_for_evaluation\n\n # normalizer for input raw images\n self.device = device\n pixel_mean = torch.Tensor(pixel_mean).to(self.device).view(3, 1, 1)\n pixel_std = torch.Tensor(pixel_std).to(self.device).view(3, 1, 1)\n self.normalizer = lambda x: (x - pixel_mean) / pixel_std\n self.num_queries_one2one = num_queries_one2one\n self.mixed_selection = mixed_selection\n self.k_one2many = k_one2many\n self.lambda_one2many = lambda_one2many\n\n # Rank-adaptive Classification Head\n self.rank_adaptive_classhead = rank_adaptive_classhead\n if self.rank_adaptive_classhead:\n self.rank_adaptive_classhead_emb = nn.ModuleList([\n copy.deepcopy(nn.Embedding(self.num_queries, num_classes))\n for _ in range(transformer.decoder.num_layers)\n ])\n for m in self.rank_adaptive_classhead_emb.parameters():\n nn.init.zeros_(m)\n\n def forward(self, batched_inputs):\n images = self.preprocess_image(batched_inputs)\n\n if self.training:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_ones(batch_size, H, W)\n for img_id in range(batch_size):\n # mask padding regions in batched images\n img_h, img_w = batched_inputs[img_id][\"instances\"].image_size\n img_masks[img_id, :img_h, :img_w] = 0\n else:\n batch_size, _, H, W = images.tensor.shape\n img_masks = images.tensor.new_zeros(batch_size, H, W)\n # disable the one-to-many branch queries\n # save them frist\n save_num_queries = self.num_queries\n save_two_stage_num_proposals = self.transformer.two_stage_num_proposals\n self.num_queries = self.num_queries_one2one\n self.transformer.two_stage_num_proposals = self.num_queries\n\n # original features\n features = self.backbone(images.tensor) # output feature dict\n\n # project backbone features to the reuired dimension of transformer\n # we use multi-scale features in deformable DETR\n multi_level_feats = self.neck(features)\n multi_level_masks = []\n multi_level_position_embeddings = []\n for feat in multi_level_feats:\n multi_level_masks.append(\n F.interpolate(img_masks[None], size=feat.shape[-2:]).to(torch.bool).squeeze(0)\n )\n multi_level_position_embeddings.append(self.position_embedding(multi_level_masks[-1]))\n\n # initialize object query embeddings\n query_embeds = None\n if not self.as_two_stage or self.mixed_selection:\n query_embeds = self.query_embedding.weight[0 : self.num_queries, :]\n\n # make attn mask\n \"\"\" attention mask to prevent information leakage\n \"\"\"\n self_attn_mask = (\n torch.zeros(\n [\n self.num_queries,\n self.num_queries,\n ]\n )\n .bool()\n .to(feat.device)\n )\n self_attn_mask[\n self.num_queries_one2one :,\n 0 : self.num_queries_one2one,\n ] = True\n self_attn_mask[\n 0 : self.num_queries_one2one,\n self.num_queries_one2one :,\n ] = True\n\n (\n inter_states,\n init_reference,\n inter_references,\n enc_outputs_class,\n enc_outputs_coord_unact,\n ) = self.transformer(\n multi_level_feats,\n multi_level_masks,\n multi_level_position_embeddings,\n query_embeds,\n self_attn_mask,\n )\n\n # Calculate output coordinates and classes.\n outputs_classes_one2one = []\n outputs_coords_one2one = []\n outputs_classes_one2many = []\n outputs_coords_one2many = []\n for lvl in range(inter_states.shape[0]):\n if lvl == 0:\n reference = init_reference\n else:\n reference = inter_references[lvl - 1]\n reference = inverse_sigmoid(reference)\n outputs_class = self.class_embed[lvl](inter_states[lvl])\n\n # Rank-adaptive Classification Head\n if self.rank_adaptive_classhead:\n bs, n_query = inter_states[lvl].shape[0], inter_states[lvl].shape[1]\n # bs = batch_size in one gpu; n_query = num_queries_one2one+num_queries_one2many for training, num_queries_one2one for testing\n rank_adaptive_classhead_emb_lvl = self.rank_adaptive_classhead_emb[lvl].weight[:n_query, :].unsqueeze(0).repeat(bs, 1, 1)\n # tensor shape: [bs, n_query, num_classes]\n outputs_class = outputs_class + rank_adaptive_classhead_emb_lvl\n # tensor shape: [bs, n_query, num_classes]\n\n tmp = self.bbox_embed[lvl](inter_states[lvl])\n if reference.shape[-1] == 4:\n tmp += reference\n else:\n assert reference.shape[-1] == 2\n tmp[..., :2] += reference\n outputs_coord = tmp.sigmoid()\n outputs_classes_one2one.append(outputs_class[:, 0 : self.num_queries_one2one])\n outputs_classes_one2many.append(outputs_class[:, self.num_queries_one2one :])\n outputs_coords_one2one.append(outputs_coord[:, 0 : self.num_queries_one2one])\n outputs_coords_one2many.append(outputs_coord[:, self.num_queries_one2one :])\n outputs_classes_one2one = torch.stack(outputs_classes_one2one)\n # tensor shape: [num_decoder_layers, bs, num_queries_one2one, num_classes]\n outputs_coords_one2one = torch.stack(outputs_coords_one2one)\n # tensor shape: [num_decoder_layers, bs, num_queries_one2one, 4]\n outputs_classes_one2many = torch.stack(outputs_classes_one2many)\n # tensor shape: [num_decoder_layers, bs, num_queries_one2many, num_classes]\n outputs_coords_one2many = torch.stack(outputs_coords_one2many)\n # tensor shape: [num_decoder_layers, bs, num_queries_one2many, 4]\n\n # prepare for loss computation\n output = {\n \"pred_logits\": outputs_classes_one2one[-1],\n \"pred_boxes\": outputs_coords_one2one[-1],\n \"pred_logits_one2many\": outputs_classes_one2many[-1],\n \"pred_boxes_one2many\": outputs_coords_one2many[-1],\n }\n if self.aux_loss:\n output[\"aux_outputs\"] = self._set_aux_loss(\n outputs_classes_one2one, outputs_coords_one2one\n )\n output[\"aux_outputs_one2many\"] = self._set_aux_loss(\n outputs_classes_one2many, outputs_coords_one2many\n )\n\n if self.as_two_stage:\n enc_outputs_coord = enc_outputs_coord_unact.sigmoid()\n output[\"enc_outputs\"] = {\n \"pred_logits\": enc_outputs_class,\n \"pred_boxes\": enc_outputs_coord,\n }\n\n if self.training:\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\n targets = self.prepare_targets(gt_instances)\n if self.k_one2many > 0:\n loss_dict = self.train_hybrid(\n output,\n targets,\n self.k_one2many,\n self.criterion,\n self.lambda_one2many,\n )\n else:\n loss_dict = self.criterion(output, targets)\n weight_dict = self.criterion.weight_dict\n new_dict = dict()\n for key, value in weight_dict.items():\n new_dict[key] = value\n new_dict[key + \"_one2many\"] = value\n weight_dict = new_dict\n for k in loss_dict.keys():\n if k in weight_dict:\n loss_dict[k] *= weight_dict[k]\n return loss_dict\n else:\n box_cls = output[\"pred_logits\"]\n box_pred = output[\"pred_boxes\"]\n results = self.inference(box_cls, box_pred, images.image_sizes)\n processed_results = []\n for results_per_image, input_per_image, image_size in zip(\n results, batched_inputs, images.image_sizes\n ):\n height = input_per_image.get(\"height\", image_size[0])\n width = input_per_image.get(\"width\", image_size[1])\n r = detector_postprocess(results_per_image, height, width)\n processed_results.append({\"instances\": r})\n # recover the model parameters for next training epoch\n self.num_queries = save_num_queries\n self.transformer.two_stage_num_proposals = save_two_stage_num_proposals\n return processed_results\n\n def train_hybrid(self, outputs, targets, k_one2many, criterion, lambda_one2many):\n # one-to-one-loss\n loss_dict = criterion(outputs, targets)\n multi_targets = copy.deepcopy(targets)\n # repeat the targets\n for target in multi_targets:\n target[\"boxes\"] = target[\"boxes\"].repeat(k_one2many, 1)\n target[\"labels\"] = target[\"labels\"].repeat(k_one2many)\n\n outputs_one2many = dict()\n outputs_one2many[\"pred_logits\"] = outputs[\"pred_logits_one2many\"]\n outputs_one2many[\"pred_boxes\"] = outputs[\"pred_boxes_one2many\"]\n outputs_one2many[\"aux_outputs\"] = outputs[\"aux_outputs_one2many\"]\n\n # one-to-many loss\n loss_dict_one2many = criterion(outputs_one2many, multi_targets)\n for key, value in loss_dict_one2many.items():\n if key + \"_one2many\" in loss_dict.keys():\n loss_dict[key + \"_one2many\"] += value * lambda_one2many\n else:\n loss_dict[key + \"_one2many\"] = value * lambda_one2many\n return loss_dict\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_class, outputs_coord):\n # this is a workaround to make torchscript happy, as torchscript\n # doesn't support dictionary with non-homogeneous values, such\n # as a dict having both a Tensor and a list.\n return [\n {\"pred_logits\": a, \"pred_boxes\": b}\n for a, b in zip(outputs_class[:-1], outputs_coord[:-1])\n ]\n\n def inference(self, box_cls, box_pred, image_sizes):\n \"\"\"\n Arguments:\n box_cls (Tensor): tensor of shape (batch_size, num_queries, K).\n The tensor predicts the classification probability for each query.\n box_pred (Tensor): tensors of shape (batch_size, num_queries, 4).\n The tensor predicts 4-vector (x,y,w,h) box\n regression values for every queryx\n image_sizes (List[torch.Size]): the input image sizes\n\n Returns:\n results (List[Instances]): a list of #images elements.\n \"\"\"\n assert len(box_cls) == len(image_sizes)\n results = []\n\n # Select top-k confidence boxes for inference\n prob = box_cls.sigmoid()\n topk_values, topk_indexes = torch.topk(\n prob.view(box_cls.shape[0], -1), self.select_box_nums_for_evaluation, dim=1\n )\n scores = topk_values\n topk_boxes = torch.div(topk_indexes, box_cls.shape[2], rounding_mode=\"floor\")\n labels = topk_indexes % box_cls.shape[2]\n\n boxes = torch.gather(box_pred, 1, topk_boxes.unsqueeze(-1).repeat(1, 1, 4))\n\n for (\n i,\n (scores_per_image, labels_per_image, box_pred_per_image, image_size),\n ) in enumerate(zip(scores, labels, boxes, image_sizes)):\n result = Instances(image_size)\n result.pred_boxes = Boxes(box_cxcywh_to_xyxy(box_pred_per_image))\n result.pred_boxes.scale(scale_x=image_size[1], scale_y=image_size[0])\n result.scores = scores_per_image\n result.pred_classes = labels_per_image\n results.append(result)\n return results\n\n def prepare_targets(self, targets):\n new_targets = []\n for targets_per_image in targets:\n h, w = targets_per_image.image_size\n image_size_xyxy = torch.as_tensor([w, h, w, h], dtype=torch.float, device=self.device)\n gt_classes = targets_per_image.gt_classes\n gt_boxes = targets_per_image.gt_boxes.tensor / image_size_xyxy\n gt_boxes = box_xyxy_to_cxcywh(gt_boxes)\n new_targets.append({\"labels\": gt_classes, \"boxes\": gt_boxes})\n return new_targets\n\n def preprocess_image(self, batched_inputs):\n images = [self.normalizer(x[\"image\"].to(self.device)) for x in batched_inputs]\n images = ImageList.from_tensors(images)\n return images" }, { "identifier": "RankDetrCriterion", "path": "projects/rank_detr/modeling/rankdetr_criterion.py", "snippet": "class RankDetrCriterion(SetCriterion):\n \"\"\"This class computes the loss for Deformable-DETR\n and two-stage Deformable-DETR\n \"\"\"\n\n def __init__(\n self,\n num_classes,\n matcher,\n weight_dict,\n losses: List[str] = [\"class\", \"boxes\"],\n eos_coef: float = 0.1,\n loss_class_type: str = \"focal_loss\",\n alpha: float = 0.25,\n gamma: float = 2.0,\n GIoU_aware_class_loss: bool = True,\n ):\n super(RankDetrCriterion, self).__init__(\n num_classes=num_classes,\n matcher=matcher,\n weight_dict=weight_dict,\n losses=losses,\n eos_coef=eos_coef,\n loss_class_type=loss_class_type,\n alpha=alpha,\n gamma=gamma,\n )\n self.GIoU_aware_class_loss = GIoU_aware_class_loss\n\n def loss_labels(self, outputs, targets, indices, num_boxes, GIoU_aware_class_loss):\n \"\"\"Classification loss (Binary focal loss)\n targets dicts must contain the key \"labels\" containing a tensor of dim [nb_target_boxes]\n \"\"\"\n assert \"pred_logits\" in outputs\n src_logits = outputs[\"pred_logits\"]\n\n idx = self._get_src_permutation_idx(indices)\n target_classes_o = torch.cat([t[\"labels\"][J] for t, (_, J) in zip(targets, indices)])\n target_classes = torch.full(\n src_logits.shape[:2],\n self.num_classes,\n dtype=torch.int64,\n device=src_logits.device,\n )\n target_classes[idx] = target_classes_o\n\n # Computation classification loss\n if self.loss_class_type == \"ce_loss\":\n loss_class = F.cross_entropy(\n src_logits.transpose(1, 2), target_classes, self.empty_weight\n )\n elif self.loss_class_type == \"focal_loss\":\n # src_logits: (b, num_queries, num_classes) = (2, 300, 80)\n # target_classes_one_hot = (2, 300, 80)\n target_classes_onehot = torch.zeros(\n [src_logits.shape[0], src_logits.shape[1], src_logits.shape[2] + 1],\n dtype=src_logits.dtype,\n layout=src_logits.layout,\n device=src_logits.device,\n )\n target_classes_onehot.scatter_(2, target_classes.unsqueeze(-1), 1)\n target_classes_onehot = target_classes_onehot[:, :, :-1]\n\n if GIoU_aware_class_loss:\n # get GIoU-aware classification target: t = (GIoU + 1) / 2\n\n # # get normed GIoU\n bs, n_query = outputs[\"pred_boxes\"].shape[:2]\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # tensor shape: [bs * n_query, 4]\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets]) # tensor shape: [gt number within a batch, 4]\n bbox_giou = generalized_box_iou(\n box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox)\n ) # tensor shape: [bs * n_query, gt number within a batch]\n bbox_giou_normed = (bbox_giou + 1) / 2.0\n bbox_giou_normed = bbox_giou_normed.reshape(bs, n_query, -1) # tensor shape: [bs, n_query, gt number within a batch]\n\n # # get matched gt indices: gt_indices\n for indices_idx, element in enumerate(indices):\n if indices_idx == 0:\n gt_indices = element[1]\n else:\n curr_length = gt_indices.shape[0]\n gt_indices = torch.cat((gt_indices, element[1] + curr_length), dim=0)\n\n # # get the supervision with a shape of [bs, n_query, num_classes]\n class_supervision = torch.zeros(\n [src_logits.shape[0], src_logits.shape[1]],\n dtype=src_logits.dtype,\n layout=src_logits.layout,\n device=src_logits.device,\n )\n class_supervision[idx] = bbox_giou_normed[(idx[0], idx[1], gt_indices)] # idx[0]: batch idx; idx[1]: query idx; gt_indices: matched gt idx\n class_supervision = class_supervision.detach()\n target_classes_onehot_GIoU_aware = target_classes_onehot * class_supervision.unsqueeze(-1)\n\n # sigmoid_focal_loss supervised by target_classes_onehot_GIoU_aware\n src_prob = src_logits.sigmoid()\n\n # # positive samples\n bce_loss_pos = F.binary_cross_entropy_with_logits(src_logits, target_classes_onehot_GIoU_aware, reduction=\"none\") * target_classes_onehot\n p_t_pos = torch.abs(target_classes_onehot_GIoU_aware - src_prob * target_classes_onehot) ** self.gamma\n\n # # negative samples\n bce_loss_neg = F.binary_cross_entropy_with_logits(src_logits, target_classes_onehot, reduction=\"none\") * (1 - target_classes_onehot)\n p_t_neg = torch.abs(src_prob * (1 - target_classes_onehot)) ** self.gamma\n\n # # total loss\n loss = p_t_pos * bce_loss_pos + p_t_neg * bce_loss_neg\n\n if self.alpha >= 0:\n alpha_t = self.alpha * target_classes_onehot + (1 - self.alpha) * (1 - target_classes_onehot)\n loss = alpha_t * loss\n\n loss_class = loss.mean(1).sum() / num_boxes\n loss_class = loss_class * src_logits.shape[1]\n else:\n loss_class = (\n sigmoid_focal_loss(\n src_logits,\n target_classes_onehot,\n num_boxes=num_boxes,\n alpha=self.alpha,\n gamma=self.gamma,\n )\n * src_logits.shape[1]\n )\n losses = {\"loss_class\": loss_class}\n\n return losses\n\n def forward(self, outputs, targets):\n outputs_without_aux = {\n k: v for k, v in outputs.items() if k != \"aux_outputs\" and k != \"enc_outputs\"\n }\n\n # Retrieve the matching between the outputs of the last layer and the targets\n indices = self.matcher(outputs_without_aux, targets)\n\n # Compute the average number of target boxes accross all nodes, for normalization purposes\n num_boxes = sum(len(t[\"labels\"]) for t in targets)\n num_boxes = torch.as_tensor(\n [num_boxes], dtype=torch.float, device=next(iter(outputs.values())).device\n )\n if is_dist_avail_and_initialized():\n torch.distributed.all_reduce(num_boxes)\n num_boxes = torch.clamp(num_boxes / get_world_size(), min=1).item()\n\n # Compute all the requested losses\n losses = {}\n for loss in self.losses:\n kwargs = {}\n if loss == \"class\":\n kwargs[\"GIoU_aware_class_loss\"] = True if (self.training and self.GIoU_aware_class_loss) else False\n losses.update(self.get_loss(loss, outputs, targets, indices, num_boxes, **kwargs))\n\n # In case of auxiliary losses, we repeat this process with the output of each intermediate layer.\n if \"aux_outputs\" in outputs:\n for i, aux_outputs in enumerate(outputs[\"aux_outputs\"]):\n indices = self.matcher(aux_outputs, targets)\n for loss in self.losses:\n kwargs = {}\n if loss == \"class\":\n kwargs[\"GIoU_aware_class_loss\"] = True if (self.training and self.GIoU_aware_class_loss) else False\n l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_boxes, **kwargs)\n l_dict = {k + f\"_{i}\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n # Compute losses for two-stage deformable-detr\n if \"enc_outputs\" in outputs:\n enc_outputs = outputs[\"enc_outputs\"]\n bin_targets = copy.deepcopy(targets)\n for bt in bin_targets:\n bt[\"labels\"] = torch.zeros_like(bt[\"labels\"])\n indices = self.matcher(enc_outputs, bin_targets)\n for loss in self.losses:\n kwargs = {}\n if loss == \"class\":\n kwargs[\"GIoU_aware_class_loss\"] = False\n l_dict = self.get_loss(loss, enc_outputs, bin_targets, indices, num_boxes, **kwargs)\n l_dict = {k + \"_enc\": v for k, v in l_dict.items()}\n losses.update(l_dict)\n\n return losses" }, { "identifier": "HighOrderMatcher", "path": "projects/rank_detr/modeling/high_order_matcher.py", "snippet": "class HighOrderMatcher(nn.Module):\n \"\"\"HighOrderMatcher which computes an assignment between targets and predictions.\n\n For efficiency reasons, the targets don't include the no_object. Because of this, in general,\n there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,\n while the others are un-matched (and thus treated as non-objects).\n\n Args:\n cost_class (float): The relative weight of the classification error\n in the matching cost. Default: 1.\n cost_bbox (float): The relative weight of the L1 error of the bounding box\n coordinates in the matching cost. Default: 1.\n cost_giou (float): This is the relative weight of the giou loss of\n the bounding box in the matching cost. Default: 1.\n cost_class_type (str): How the classification error is calculated.\n Choose from ``[\"ce_cost\", \"focal_loss_cost\"]``. Default: \"focal_loss_cost\".\n alpha (float): Weighting factor in range (0, 1) to balance positive vs\n negative examples in focal loss. Default: 0.25.\n gamma (float): Exponent of modulating factor (1 - p_t) to balance easy vs\n hard examples in focal loss. Default: 2.\n \"\"\"\n\n def __init__(\n self,\n cost_class: float = 1,\n cost_bbox: float = 1,\n cost_giou: float = 1,\n cost_class_type: str = \"focal_loss_cost\",\n alpha: float = 0.25,\n gamma: float = 2.0,\n iou_order_alpha: float = 4.0,\n matcher_change_iter: int = 67500,\n ):\n super().__init__()\n self.cost_class = cost_class\n self.cost_bbox = cost_bbox\n self.cost_giou = cost_giou\n self.cost_class_type = cost_class_type\n self.alpha = alpha\n self.gamma = gamma\n assert cost_class != 0 or cost_bbox != 0 or cost_giou != 0, \"all costs cant be 0\"\n assert cost_class_type in {\n \"ce_cost\",\n \"focal_loss_cost\",\n }, \"only support ce loss or focal loss for computing class cost\"\n self.iou_order_alpha = iou_order_alpha\n self.iter = -1\n self.matcher_change_iter = matcher_change_iter\n\n @torch.no_grad()\n def forward(self, outputs, targets):\n \"\"\"Forward function for `HungarianMatcher` which performs the matching.\n\n Args:\n outputs (Dict[str, torch.Tensor]): This is a dict that contains at least these entries:\n\n - ``\"pred_logits\"``: Tensor of shape (bs, num_queries, num_classes) with the classification logits.\n - ``\"pred_boxes\"``: Tensor of shape (bs, num_queries, 4) with the predicted box coordinates.\n\n targets (List[Dict[str, torch.Tensor]]): This is a list of targets (len(targets) = batch_size),\n where each target is a dict containing:\n\n - ``\"labels\"``: Tensor of shape (num_target_boxes, ) (where num_target_boxes is the number of ground-truth objects in the target) containing the class labels. # noqa\n - ``\"boxes\"``: Tensor of shape (num_target_boxes, 4) containing the target box coordinates.\n\n Returns:\n list[torch.Tensor]: A list of size batch_size, containing tuples of `(index_i, index_j)` where:\n\n - ``index_i`` is the indices of the selected predictions (in order)\n - ``index_j`` is the indices of the corresponding selected targets (in order)\n\n For each batch element, it holds: `len(index_i) = len(index_j) = min(num_queries, num_target_boxes)`\n \"\"\"\n bs, num_queries = outputs[\"pred_logits\"].shape[:2]\n\n # We flatten to compute the cost matrices in a batch\n if self.cost_class_type == \"ce_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).softmax(-1)\n ) # [batch_size * num_queries, num_classes]\n elif self.cost_class_type == \"focal_loss_cost\":\n out_prob = (\n outputs[\"pred_logits\"].flatten(0, 1).sigmoid()\n ) # [batch_size * num_queries, num_classes]\n\n out_bbox = outputs[\"pred_boxes\"].flatten(0, 1) # [batch_size * num_queries, 4]\n\n # Also concat the target labels and boxes\n tgt_ids = torch.cat([v[\"labels\"] for v in targets])\n tgt_bbox = torch.cat([v[\"boxes\"] for v in targets])\n\n if self.iter < self.matcher_change_iter:\n # Compute the classification cost.\n if self.cost_class_type == \"ce_cost\":\n # Compute the classification cost. Contrary to the loss, we don't use the NLL,\n # but approximate it in 1 - proba[target class].\n # The 1 is a constant that doesn't change the matching, it can be ommitted.\n cost_class = -out_prob[:, tgt_ids]\n elif self.cost_class_type == \"focal_loss_cost\":\n alpha = self.alpha\n gamma = self.gamma\n neg_cost_class = (1 - alpha) * (out_prob**gamma) * (-(1 - out_prob + 1e-8).log())\n pos_cost_class = alpha * ((1 - out_prob) ** gamma) * (-(out_prob + 1e-8).log())\n cost_class = pos_cost_class[:, tgt_ids] - neg_cost_class[:, tgt_ids]\n\n # Compute the L1 cost between boxes\n cost_bbox = torch.cdist(out_bbox, tgt_bbox, p=1)\n\n # Compute the giou cost betwen boxes\n cost_giou = -generalized_box_iou(box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox))\n\n # Final cost matrix\n C = self.cost_bbox * cost_bbox + self.cost_class * cost_class + self.cost_giou * cost_giou\n else:\n # high-order matching cost\n # # Compute the class_score\n class_score = out_prob[:, tgt_ids] # shape = [batch_size * num_queries, gt num within a batch]\n\n # # Compute iou\n bbox_iou, _ = box_iou(\n box_cxcywh_to_xyxy(out_bbox), box_cxcywh_to_xyxy(tgt_bbox)\n ) # shape = [batch_size * num_queries, gt num within a batch]\n\n # Final cost matrix\n C = (-1) * (\n class_score * torch.pow(bbox_iou, self.iou_order_alpha)\n )\n\n C = C.view(bs, num_queries, -1).cpu()\n\n sizes = [len(v[\"boxes\"]) for v in targets]\n indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]\n return [\n (torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64))\n for i, j in indices\n ]\n\n def __repr__(self, _repr_indent=4):\n head = \"Matcher \" + self.__class__.__name__\n body = [\n \"cost_class: {}\".format(self.cost_class),\n \"cost_bbox: {}\".format(self.cost_bbox),\n \"cost_giou: {}\".format(self.cost_giou),\n \"cost_class_type: {}\".format(self.cost_class_type),\n \"focal cost alpha: {}\".format(self.alpha),\n \"focal cost gamma: {}\".format(self.gamma),\n ]\n lines = [head] + [\" \" * _repr_indent + line for line in body]\n return \"\\n\".join(lines)" } ]
import torch.nn as nn from detectron2.modeling.backbone import ResNet, BasicStem from detectron2.layers import ShapeSpec from detectron2.config import LazyCall as L from detrex.modeling.matcher import HungarianMatcher from detrex.modeling.neck import ChannelMapper from detrex.layers import PositionEmbeddingSine from projects.rank_detr.modeling import ( RankDETR, RankDetrTransformerEncoder, RankDetrTransformerDecoder, RankDetrTransformer, RankDetrCriterion, HighOrderMatcher, )
18,287
model = L(RankDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(RankDetrTransformer)(
model = L(RankDETR)( backbone=L(ResNet)( stem=L(BasicStem)(in_channels=3, out_channels=64, norm="FrozenBN"), stages=L(ResNet.make_default_stages)( depth=50, stride_in_1x1=False, norm="FrozenBN", ), out_features=["res3", "res4", "res5"], freeze_at=1, ), position_embedding=L(PositionEmbeddingSine)( num_pos_feats=128, temperature=10000, normalize=True, offset=-0.5, ), neck=L(ChannelMapper)( input_shapes={ "res3": ShapeSpec(channels=512), "res4": ShapeSpec(channels=1024), "res5": ShapeSpec(channels=2048), }, in_features=["res3", "res4", "res5"], out_channels=256, num_outs=4, kernel_size=1, norm_layer=L(nn.GroupNorm)(num_groups=32, num_channels=256), ), transformer=L(RankDetrTransformer)(
encoder=L(RankDetrTransformerEncoder)(
3
2023-10-12 03:02:25+00:00
24k
sakemin/cog-musicgen-remixer
predict.py
[ { "identifier": "MultiBandDiffusion", "path": "audiocraft/models/multibanddiffusion.py", "snippet": "class MultiBandDiffusion:\n \"\"\"Sample from multiple diffusion models.\n\n Args:\n DPs (list of DiffusionProcess): Diffusion processes.\n codec_model (CompressionModel): Underlying compression model used to obtain discrete tokens.\n \"\"\"\n def __init__(self, DPs: tp.List[DiffusionProcess], codec_model: CompressionModel) -> None:\n self.DPs = DPs\n self.codec_model = codec_model\n self.device = next(self.codec_model.parameters()).device\n\n @property\n def sample_rate(self) -> int:\n return self.codec_model.sample_rate\n\n @staticmethod\n def get_mbd_musicgen(device=None):\n \"\"\"Load our diffusion models trained for MusicGen.\"\"\"\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n path = 'facebook/multiband-diffusion'\n filename = 'mbd_musicgen_32khz.th'\n name = 'facebook/musicgen-small'\n codec_model = load_compression_model(name, device=device)\n models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)\n DPs = []\n for i in range(len(models)):\n schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)\n DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))\n return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)\n\n @staticmethod\n def get_mbd_24khz(bw: float = 3.0, pretrained: bool = True,\n device: tp.Optional[tp.Union[torch.device, str]] = None,\n n_q: tp.Optional[int] = None):\n \"\"\"Get the pretrained Models for MultibandDiffusion.\n\n Args:\n bw (float): Bandwidth of the compression model.\n pretrained (bool): Whether to use / download if necessary the models.\n device (torch.device or str, optional): Device on which the models are loaded.\n n_q (int, optional): Number of quantizers to use within the compression model.\n \"\"\"\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n assert bw in [1.5, 3.0, 6.0], f\"bandwidth {bw} not available\"\n if n_q is not None:\n assert n_q in [2, 4, 8]\n assert {1.5: 2, 3.0: 4, 6.0: 8}[bw] == n_q, \\\n f\"bandwidth and number of codebooks missmatch to use n_q = {n_q} bw should be {n_q * (1.5 / 2)}\"\n n_q = {1.5: 2, 3.0: 4, 6.0: 8}[bw]\n codec_model = CompressionSolver.model_from_checkpoint(\n '//pretrained/facebook/encodec_24khz', device=device)\n codec_model.set_num_codebooks(n_q)\n codec_model = codec_model.to(device)\n path = 'facebook/multiband-diffusion'\n filename = f'mbd_comp_{n_q}.pt'\n models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)\n DPs = []\n for i in range(len(models)):\n schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)\n DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))\n return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)\n\n return MultiBandDiffusion(DPs, codec_model)\n\n @torch.no_grad()\n def get_condition(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor:\n \"\"\"Get the conditioning (i.e. latent reprentatios of the compression model) from a waveform.\n Args:\n wav (torch.Tensor): The audio that we want to extract the conditioning from\n sample_rate (int): sample rate of the audio\"\"\"\n if sample_rate != self.sample_rate:\n wav = julius.resample_frac(wav, sample_rate, self.sample_rate)\n codes, scale = self.codec_model.encode(wav)\n assert scale is None, \"Scaled compression models not supported.\"\n emb = self.get_emb(codes)\n return emb\n\n @torch.no_grad()\n def get_emb(self, codes: torch.Tensor):\n \"\"\"Get latent representation from the discrete codes\n Argrs:\n codes (torch.Tensor): discrete tokens\"\"\"\n emb = self.codec_model.decode_latent(codes)\n return emb\n\n def generate(self, emb: torch.Tensor, size: tp.Optional[torch.Size] = None,\n step_list: tp.Optional[tp.List[int]] = None):\n \"\"\"Generate Wavform audio from the latent embeddings of the compression model\n Args:\n emb (torch.Tensor): Conditioning embeddinds\n size (none torch.Size): size of the output\n if None this is computed from the typical upsampling of the model\n step_list (optional list[int]): list of Markov chain steps, defaults to 50 linearly spaced step.\n \"\"\"\n if size is None:\n upsampling = int(self.codec_model.sample_rate / self.codec_model.frame_rate)\n size = torch.Size([emb.size(0), self.codec_model.channels, emb.size(-1) * upsampling])\n assert size[0] == emb.size(0)\n out = torch.zeros(size).to(self.device)\n for DP in self.DPs:\n out += DP.generate(condition=emb, step_list=step_list, initial_noise=torch.randn_like(out))\n return out\n\n def re_eq(self, wav: torch.Tensor, ref: torch.Tensor, n_bands: int = 32, strictness: float = 1):\n \"\"\"match the eq to the encodec output by matching the standard deviation of some frequency bands\n Args:\n wav (torch.Tensor): audio to equalize\n ref (torch.Tensor):refenrence audio from which we match the spectrogram.\n n_bands (int): number of bands of the eq\n strictness (float): how strict the the matching. 0 is no matching, 1 is exact matching.\n \"\"\"\n split = julius.SplitBands(n_bands=n_bands, sample_rate=self.codec_model.sample_rate).to(wav.device)\n bands = split(wav)\n bands_ref = split(ref)\n out = torch.zeros_like(ref)\n for i in range(n_bands):\n out += bands[i] * (bands_ref[i].std() / bands[i].std()) ** strictness\n return out\n\n def regenerate(self, wav: torch.Tensor, sample_rate: int):\n \"\"\"Regenerate a wavform through compression and diffusion regeneration.\n Args:\n wav (torch.Tensor): Original 'ground truth' audio\n sample_rate (int): sample rate of the input (and output) wav\n \"\"\"\n if sample_rate != self.codec_model.sample_rate:\n wav = julius.resample_frac(wav, sample_rate, self.codec_model.sample_rate)\n emb = self.get_condition(wav, sample_rate=self.codec_model.sample_rate)\n size = wav.size()\n out = self.generate(emb, size=size)\n if sample_rate != self.codec_model.sample_rate:\n out = julius.resample_frac(out, self.codec_model.sample_rate, sample_rate)\n return out\n\n def tokens_to_wav(self, tokens: torch.Tensor, n_bands: int = 32):\n \"\"\"Generate Waveform audio with diffusion from the discrete codes.\n Args:\n tokens (torch.Tensor): discrete codes\n n_bands (int): bands for the eq matching.\n \"\"\"\n wav_encodec = self.codec_model.decode(tokens)\n condition = self.get_emb(tokens)\n wav_diffusion = self.generate(emb=condition, size=wav_encodec.size())\n return self.re_eq(wav=wav_diffusion, ref=wav_encodec, n_bands=n_bands)" }, { "identifier": "MusicGen", "path": "audiocraft/models/musicgen.py", "snippet": "class MusicGen:\n \"\"\"MusicGen main model with convenient generation API.\n\n Args:\n name (str): name of the model.\n compression_model (CompressionModel): Compression model\n used to map audio to invertible discrete representations.\n lm (LMModel): Language model over discrete representations.\n max_duration (float, optional): maximum duration the model can produce,\n otherwise, inferred from the training params.\n \"\"\"\n def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel,\n max_duration: tp.Optional[float] = None):\n self.name = name\n self.compression_model = compression_model\n self.lm = lm\n self.cfg: tp.Optional[omegaconf.DictConfig] = None\n # Just to be safe, let's put everything in eval mode.\n self.compression_model.eval()\n self.lm.eval()\n\n if hasattr(lm, 'cfg'):\n cfg = lm.cfg\n assert isinstance(cfg, omegaconf.DictConfig)\n self.cfg = cfg\n\n if self.cfg is not None:\n self.compression_model = get_wrapped_compression_model(self.compression_model, self.cfg)\n\n if max_duration is None:\n if self.cfg is not None:\n max_duration = lm.cfg.dataset.segment_duration # type: ignore\n else:\n raise ValueError(\"You must provide max_duration when building directly MusicGen\")\n assert max_duration is not None\n self.max_duration: float = max_duration\n self.device = next(iter(lm.parameters())).device\n\n self.generation_params: dict = {}\n self.set_generation_params(duration=15) # 15 seconds by default\n self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None\n if self.device.type == 'cpu':\n self.autocast = TorchAutocast(enabled=False)\n else:\n self.autocast = TorchAutocast(\n enabled=True, device_type=self.device.type, dtype=torch.float16)\n\n @property\n def frame_rate(self) -> float:\n \"\"\"Roughly the number of AR steps per seconds.\"\"\"\n return self.compression_model.frame_rate\n\n @property\n def sample_rate(self) -> int:\n \"\"\"Sample rate of the generated audio.\"\"\"\n return self.compression_model.sample_rate\n\n @property\n def audio_channels(self) -> int:\n \"\"\"Audio channels of the generated audio.\"\"\"\n return self.compression_model.channels\n\n @staticmethod\n def get_pretrained(name: str = 'facebook/musicgen-melody', device=None):\n \"\"\"Return pretrained model, we provide four models:\n - facebook/musicgen-small (300M), text to music,\n # see: https://huggingface.co/facebook/musicgen-small\n - facebook/musicgen-medium (1.5B), text to music,\n # see: https://huggingface.co/facebook/musicgen-medium\n - facebook/musicgen-melody (1.5B) text to music and text+melody to music,\n # see: https://huggingface.co/facebook/musicgen-melody\n - facebook/musicgen-large (3.3B), text to music,\n # see: https://huggingface.co/facebook/musicgen-large\n \"\"\"\n if device is None:\n if torch.cuda.device_count():\n device = 'cuda'\n else:\n device = 'cpu'\n\n if name == 'debug':\n # used only for unit tests\n compression_model = get_debug_compression_model(device)\n lm = get_debug_lm_model(device)\n return MusicGen(name, compression_model, lm, max_duration=30)\n\n if name in _HF_MODEL_CHECKPOINTS_MAP:\n warnings.warn(\n \"MusicGen pretrained model relying on deprecated checkpoint mapping. \" +\n f\"Please use full pre-trained id instead: facebook/musicgen-{name}\")\n name = _HF_MODEL_CHECKPOINTS_MAP[name]\n\n lm = load_lm_model(name, device=device)\n compression_model = load_compression_model(name, device=device)\n if 'self_wav' in lm.condition_provider.conditioners:\n lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True\n lm.condition_provider.conditioners['self_wav']._use_masking = False\n\n return MusicGen(name, compression_model, lm)\n\n def set_generation_params(self, use_sampling: bool = True, top_k: int = 250,\n top_p: float = 0.0, temperature: float = 1.0,\n duration: float = 30.0, cfg_coef: float = 3.0,\n two_step_cfg: bool = False, extend_stride: float = 18):\n \"\"\"Set the generation parameters for MusicGen.\n\n Args:\n use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True.\n top_k (int, optional): top_k used for sampling. Defaults to 250.\n top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0.\n temperature (float, optional): Softmax temperature parameter. Defaults to 1.0.\n duration (float, optional): Duration of the generated waveform. Defaults to 30.0.\n cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0.\n two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance,\n instead of batching together the two. This has some impact on how things\n are padded but seems to have little impact in practice.\n extend_stride: when doing extended generation (i.e. more than 30 seconds), by how much\n should we extend the audio each time. Larger values will mean less context is\n preserved, and shorter value will require extra computations.\n \"\"\"\n assert extend_stride < self.max_duration, \"Cannot stride by more than max generation duration.\"\n self.extend_stride = extend_stride\n self.duration = duration\n self.generation_params = {\n 'use_sampling': use_sampling,\n 'temp': temperature,\n 'top_k': top_k,\n 'top_p': top_p,\n 'cfg_coef': cfg_coef,\n 'two_step_cfg': two_step_cfg,\n }\n\n def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None):\n \"\"\"Override the default progress callback.\"\"\"\n self._progress_callback = progress_callback\n\n def generate_unconditional(self, num_samples: int, progress: bool = False,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples in an unconditional manner.\n\n Args:\n num_samples (int): Number of samples to be generated.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n descriptions: tp.List[tp.Optional[str]] = [None] * num_samples\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate(self, descriptions: tp.List[str], progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_with_chroma(self, descriptions: tp.List[str], melody_wavs: MelodyType,\n melody_sample_rate: int, progress: bool = False,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=melody_wavs)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int,\n descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n if descriptions is None:\n descriptions = [None] * len(prompt)\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt)\n assert prompt_tokens is not None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n def generate_continuation_with_audio_token(self, prompt, \n descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n assert prompt_tokens is None\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_chroma(self, prompt: torch.Tensor, prompt_sample_rate: int, melody_wavs: MelodyType,\n melody_sample_rate: int, descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n \n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=prompt, melody_wavs=melody_wavs)\n assert prompt_tokens is not None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_tokens_and_audio_chroma(self, prompt, melody_wavs: MelodyType,\n melody_sample_rate: int, descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n \n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None, melody_wavs=melody_wavs)\n assert prompt_tokens is None\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_text_chroma(self, prompt: torch.Tensor, prompt_sample_rate: int, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n\n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=prompt,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_tokens_and_text_chroma(self, prompt, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n \n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n def generate_with_text_chroma(self, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n @torch.no_grad()\n def _prepare_tokens_and_attributes(\n self,\n descriptions: tp.Sequence[tp.Optional[str]],\n prompt: tp.Optional[torch.Tensor],\n melody_wavs: tp.Optional[tp.Union[MelodyList,tp.List[str]]] = None, bpm: tp.Optional[tp.Union[float,int,tp.List[float],tp.List[int]]] = None, meter:tp.Optional[tp.Union[int,tp.List[int]]] = None\n ) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]:\n \"\"\"Prepare model inputs.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n melody_wavs (torch.Tensor, optional): A batch of waveforms\n used as melody conditioning. Defaults to None.\n \"\"\"\n attributes = [\n ConditioningAttributes(text={'description': description})\n for description in descriptions]\n\n if melody_wavs is None:\n for attr in attributes:\n attr.wav['self_wav'] = WavCondition(\n torch.zeros((1, 1, 1), device=self.device),\n torch.tensor([0], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None])\n else:\n if 'self_wav' not in self.lm.condition_provider.conditioners:\n raise RuntimeError(\"This model doesn't support melody conditioning. \"\n \"Use the `melody` model.\")\n assert len(melody_wavs) == len(descriptions), \\\n f\"number of melody wavs must match number of descriptions! \" \\\n f\"got melody len={len(melody_wavs)}, and descriptions len={len(descriptions)}\"\n\n if bpm is not None and (isinstance(bpm, int) or isinstance(bpm, float)):\n bpm = [bpm for i in range(len(melody_wavs))]\n elif bpm is not None and isinstance(bpm, tp.List):\n assert len(melody_wavs) == len(bpm)\n\n if meter is not None and (isinstance(meter, int) or isinstance(meter, float)):\n meter = [meter for i in range(len(melody_wavs))]\n elif meter is not None and isinstance(meter, tp.List):\n assert len(melody_wavs) == len(meter)\n\n for attr, melody, i in zip(attributes, melody_wavs, range(len(melody_wavs))):\n if melody is None:\n attr.wav['self_wav'] = WavCondition(\n torch.zeros((1, 1, 1), device=self.device),\n torch.tensor([0], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None])\n elif isinstance(melody, torch.Tensor):\n attr.wav['self_wav'] = WavCondition(\n melody[None].to(device=self.device),\n torch.tensor([melody.shape[-1]], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None],\n )\n else :\n attr.wav['self_wav'] = WavChordTextCondition(\n [melody],\n torch.tensor([self.duration*self.sample_rate], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None],\n bpm = [bpm[i]],\n meter = [meter[i]]\n )\n\n if prompt is not None:\n if descriptions is not None:\n assert len(descriptions) == len(prompt), \"Prompt and nb. descriptions doesn't match\"\n prompt = prompt.to(self.device)\n prompt_tokens, scale = self.compression_model.encode(prompt)\n assert scale is None\n else:\n prompt_tokens = None\n return attributes, prompt_tokens\n\n def _generate_tokens(self, attributes: tp.List[ConditioningAttributes],\n prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor:\n \"\"\"Generate discrete audio tokens given audio prompt and/or conditions.\n\n Args:\n attributes (list of ConditioningAttributes): Conditions used for generation (text/melody).\n prompt_tokens (torch.Tensor, optional): Audio prompt used for continuation.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n Returns:\n torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params.\n \"\"\"\n total_gen_len = int(self.duration * self.frame_rate)\n max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate)\n current_gen_offset: int = 0\n\n def _progress_callback(generated_tokens: int, tokens_to_generate: int):\n generated_tokens += current_gen_offset\n if self._progress_callback is not None:\n # Note that total_gen_len might be quite wrong depending on the\n # codebook pattern used, but with delay it is almost accurate.\n self._progress_callback(generated_tokens, total_gen_len)\n else:\n print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\\r')\n\n if prompt_tokens is not None:\n assert max_prompt_len >= prompt_tokens.shape[-1], \\\n \"Prompt is longer than audio to generate\"\n\n callback = None\n if progress:\n callback = _progress_callback\n\n if self.duration <= self.max_duration:\n # generate by sampling from LM, simple case.\n with self.autocast:\n gen_tokens = self.lm.generate(\n prompt_tokens, attributes,\n callback=callback, max_gen_len=total_gen_len, **self.generation_params)\n\n else:\n # now this gets a bit messier, we need to handle prompts,\n # melody conditioning etc.\n ref_wavs = [attr.wav['self_wav'] for attr in attributes]\n all_tokens = []\n if prompt_tokens is None:\n prompt_length = 0\n else:\n all_tokens.append(prompt_tokens)\n prompt_length = prompt_tokens.shape[-1]\n\n stride_tokens = int(self.frame_rate * self.extend_stride)\n step = 0\n\n while current_gen_offset + prompt_length < total_gen_len:\n self.lm.condition_provider.conditioners['self_wav'].set_continuation_count(self.extend_stride/self.max_duration, step) #For text based chord conditioning\n time_offset = current_gen_offset / self.frame_rate\n chunk_duration = min(self.duration - time_offset, self.max_duration)\n max_gen_len = int(chunk_duration * self.frame_rate)\n for attr, ref_wav in zip(attributes, ref_wavs):\n if isinstance(ref_wav, WavCondition):\n wav_length = ref_wav.length.item()\n if wav_length == 0:\n continue\n # We will extend the wav periodically if it not long enough.\n # we have to do it here rather than in conditioners.py as otherwise\n # we wouldn't have the full wav.\n initial_position = int(time_offset * self.sample_rate)\n wav_target_length = int(self.max_duration * self.sample_rate)\n positions = torch.arange(initial_position,\n initial_position + wav_target_length, device=self.device)\n attr.wav['self_wav'] = WavCondition(\n ref_wav[0][..., positions % wav_length],\n torch.full_like(ref_wav[1], wav_target_length),\n [self.sample_rate] * ref_wav[0].size(0),\n [None], [0.])\n with self.autocast:\n gen_tokens = self.lm.generate(\n prompt_tokens, attributes,\n callback=callback, max_gen_len=max_gen_len, **self.generation_params)\n if prompt_tokens is None:\n all_tokens.append(gen_tokens)\n else:\n all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:])\n prompt_tokens = gen_tokens[:, :, stride_tokens:]\n prompt_length = prompt_tokens.shape[-1]\n current_gen_offset += stride_tokens\n step = step + 1\n\n gen_tokens = torch.cat(all_tokens, dim=-1)\n return gen_tokens\n\n def generate_audio(self, gen_tokens: torch.Tensor):\n \"\"\"Generate Audio from tokens\"\"\"\n assert gen_tokens.dim() == 3\n with torch.no_grad():\n gen_audio = self.compression_model.decode(gen_tokens, None)\n return gen_audio" }, { "identifier": "CompressionSolver", "path": "audiocraft/solvers/compression.py", "snippet": "class CompressionSolver(base.StandardSolver):\n \"\"\"Solver for compression task.\n\n The compression task combines a set of perceptual and objective losses\n to train an EncodecModel (composed of an encoder-decoder and a quantizer)\n to perform high fidelity audio reconstruction.\n \"\"\"\n def __init__(self, cfg: omegaconf.DictConfig):\n super().__init__(cfg)\n self.rng: torch.Generator # set at each epoch\n self.adv_losses = builders.get_adversarial_losses(self.cfg)\n self.aux_losses = nn.ModuleDict()\n self.info_losses = nn.ModuleDict()\n assert not cfg.fsdp.use, \"FSDP not supported by CompressionSolver.\"\n loss_weights = dict()\n for loss_name, weight in self.cfg.losses.items():\n if loss_name in ['adv', 'feat']:\n for adv_name, _ in self.adv_losses.items():\n loss_weights[f'{loss_name}_{adv_name}'] = weight\n elif weight > 0:\n self.aux_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n loss_weights[loss_name] = weight\n else:\n self.info_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n self.balancer = builders.get_balancer(loss_weights, self.cfg.balancer)\n self.register_stateful('adv_losses')\n\n @property\n def best_metric_name(self) -> tp.Optional[str]:\n # best model is the last for the compression model\n return None\n\n def build_model(self):\n \"\"\"Instantiate model and optimizer.\"\"\"\n # Model and optimizer\n self.model = models.builders.get_compression_model(self.cfg).to(self.device)\n self.optimizer = builders.get_optimizer(self.model.parameters(), self.cfg.optim)\n self.register_stateful('model', 'optimizer')\n self.register_best_state('model')\n self.register_ema('model')\n\n def build_dataloaders(self):\n \"\"\"Instantiate audio dataloaders for each stage.\"\"\"\n self.dataloaders = builders.get_audio_datasets(self.cfg)\n\n def show(self):\n \"\"\"Show the compression model and employed adversarial loss.\"\"\"\n self.logger.info(f\"Compression model with {self.model.quantizer.total_codebooks} codebooks:\")\n self.log_model_summary(self.model)\n self.logger.info(\"Adversarial loss:\")\n self.log_model_summary(self.adv_losses)\n self.logger.info(\"Auxiliary losses:\")\n self.logger.info(self.aux_losses)\n self.logger.info(\"Info losses:\")\n self.logger.info(self.info_losses)\n\n def run_step(self, idx: int, batch: torch.Tensor, metrics: dict):\n \"\"\"Perform one training or valid step on a given batch.\"\"\"\n x = batch.to(self.device)\n y = x.clone()\n\n qres = self.model(x)\n assert isinstance(qres, quantization.QuantizedResult)\n y_pred = qres.x\n # Log bandwidth in kb/s\n metrics['bandwidth'] = qres.bandwidth.mean()\n\n if self.is_training:\n d_losses: dict = {}\n if len(self.adv_losses) > 0 and torch.rand(1, generator=self.rng).item() <= 1 / self.cfg.adversarial.every:\n for adv_name, adversary in self.adv_losses.items():\n disc_loss = adversary.train_adv(y_pred, y)\n d_losses[f'd_{adv_name}'] = disc_loss\n metrics['d_loss'] = torch.sum(torch.stack(list(d_losses.values())))\n metrics.update(d_losses)\n\n balanced_losses: dict = {}\n other_losses: dict = {}\n\n # penalty from quantization\n if qres.penalty is not None and qres.penalty.requires_grad:\n other_losses['penalty'] = qres.penalty # penalty term from the quantizer\n\n # adversarial losses\n for adv_name, adversary in self.adv_losses.items():\n adv_loss, feat_loss = adversary(y_pred, y)\n balanced_losses[f'adv_{adv_name}'] = adv_loss\n balanced_losses[f'feat_{adv_name}'] = feat_loss\n\n # auxiliary losses\n for loss_name, criterion in self.aux_losses.items():\n loss = criterion(y_pred, y)\n balanced_losses[loss_name] = loss\n\n # weighted losses\n metrics.update(balanced_losses)\n metrics.update(other_losses)\n metrics.update(qres.metrics)\n\n if self.is_training:\n # backprop losses that are not handled by balancer\n other_loss = torch.tensor(0., device=self.device)\n if 'penalty' in other_losses:\n other_loss += other_losses['penalty']\n if other_loss.requires_grad:\n other_loss.backward(retain_graph=True)\n ratio1 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio1, torch.Tensor)\n metrics['ratio1'] = ratio1.sqrt()\n\n # balancer losses backward, returns effective training loss\n # with effective weights at the current batch.\n metrics['g_loss'] = self.balancer.backward(balanced_losses, y_pred)\n # add metrics corresponding to weight ratios\n metrics.update(self.balancer.metrics)\n ratio2 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio2, torch.Tensor)\n metrics['ratio2'] = ratio2.sqrt()\n\n # optim\n flashy.distrib.sync_model(self.model)\n if self.cfg.optim.max_norm:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.cfg.optim.max_norm\n )\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n # informative losses only\n info_losses: dict = {}\n with torch.no_grad():\n for loss_name, criterion in self.info_losses.items():\n loss = criterion(y_pred, y)\n info_losses[loss_name] = loss\n\n metrics.update(info_losses)\n\n # aggregated GAN losses: this is useful to report adv and feat across different adversarial loss setups\n adv_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('adv')]\n if len(adv_losses) > 0:\n metrics['adv'] = torch.sum(torch.stack(adv_losses))\n feat_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('feat')]\n if len(feat_losses) > 0:\n metrics['feat'] = torch.sum(torch.stack(feat_losses))\n\n return metrics\n\n def run_epoch(self):\n # reset random seed at the beginning of the epoch\n self.rng = torch.Generator()\n self.rng.manual_seed(1234 + self.epoch)\n # run epoch\n super().run_epoch()\n\n def evaluate(self):\n \"\"\"Evaluate stage. Runs audio reconstruction evaluation.\"\"\"\n self.model.eval()\n evaluate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['evaluate']\n updates = len(loader)\n lp = self.log_progress(f'{evaluate_stage_name} inference', loader, total=updates, updates=self.log_updates)\n average = flashy.averager()\n\n pendings = []\n ctx = multiprocessing.get_context('spawn')\n with get_pool_executor(self.cfg.evaluate.num_workers, mp_context=ctx) as pool:\n for idx, batch in enumerate(lp):\n x = batch.to(self.device)\n with torch.no_grad():\n qres = self.model(x)\n\n y_pred = qres.x.cpu()\n y = batch.cpu() # should already be on CPU but just in case\n pendings.append(pool.submit(evaluate_audio_reconstruction, y_pred, y, self.cfg))\n\n metrics_lp = self.log_progress(f'{evaluate_stage_name} metrics', pendings, updates=self.log_updates)\n for pending in metrics_lp:\n metrics = pending.result()\n metrics = average(metrics)\n\n metrics = flashy.distrib.average_metrics(metrics, len(loader))\n return metrics\n\n def generate(self):\n \"\"\"Generate stage.\"\"\"\n self.model.eval()\n sample_manager = SampleManager(self.xp, map_reference_to_sample_id=True)\n generate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['generate']\n updates = len(loader)\n lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates)\n\n for batch in lp:\n reference, _ = batch\n reference = reference.to(self.device)\n with torch.no_grad():\n qres = self.model(reference)\n assert isinstance(qres, quantization.QuantizedResult)\n\n reference = reference.cpu()\n estimate = qres.x.cpu()\n sample_manager.add_samples(estimate, self.epoch, ground_truth_wavs=reference)\n\n flashy.distrib.barrier()\n\n def load_from_pretrained(self, name: str) -> dict:\n model = models.CompressionModel.get_pretrained(name)\n if isinstance(model, models.DAC):\n raise RuntimeError(\"Cannot fine tune a DAC model.\")\n elif isinstance(model, models.HFEncodecCompressionModel):\n self.logger.warning('Trying to automatically convert a HuggingFace model '\n 'to AudioCraft, this might fail!')\n state = model.model.state_dict()\n new_state = {}\n for k, v in state.items():\n if k.startswith('decoder.layers') and '.conv.' in k and '.block.' not in k:\n # We need to determine if this a convtr or a regular conv.\n layer = int(k.split('.')[2])\n if isinstance(model.model.decoder.layers[layer].conv, torch.nn.ConvTranspose1d):\n\n k = k.replace('.conv.', '.convtr.')\n k = k.replace('encoder.layers.', 'encoder.model.')\n k = k.replace('decoder.layers.', 'decoder.model.')\n k = k.replace('conv.', 'conv.conv.')\n k = k.replace('convtr.', 'convtr.convtr.')\n k = k.replace('quantizer.layers.', 'quantizer.vq.layers.')\n k = k.replace('.codebook.', '._codebook.')\n new_state[k] = v\n state = new_state\n elif isinstance(model, models.EncodecModel):\n state = model.state_dict()\n else:\n raise RuntimeError(f\"Cannot fine tune model type {type(model)}.\")\n return {\n 'best_state': {'model': state}\n }\n\n @staticmethod\n def model_from_checkpoint(checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a CompressionModel from a given checkpoint path or dora sig.\n This method is a convenient endpoint to load a CompressionModel to use in other solvers.\n\n Args:\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n This also supports pre-trained models by using a path of the form //pretrained/NAME.\n See `model_from_pretrained` for a list of supported pretrained models.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n checkpoint_path = str(checkpoint_path)\n if checkpoint_path.startswith('//pretrained/'):\n name = checkpoint_path.split('/', 3)[-1]\n return models.CompressionModel.get_pretrained(name, device)\n logger = logging.getLogger(__name__)\n logger.info(f\"Loading compression model from checkpoint: {checkpoint_path}\")\n _checkpoint_path = checkpoint.resolve_checkpoint_path(checkpoint_path, use_fsdp=False)\n assert _checkpoint_path is not None, f\"Could not resolve compression model checkpoint path: {checkpoint_path}\"\n state = checkpoint.load_checkpoint(_checkpoint_path)\n assert state is not None and 'xp.cfg' in state, f\"Could not load compression model from ckpt: {checkpoint_path}\"\n cfg = state['xp.cfg']\n cfg.device = device\n compression_model = models.builders.get_compression_model(cfg).to(device)\n assert compression_model.sample_rate == cfg.sample_rate, \"Compression model sample rate should match\"\n\n assert 'best_state' in state and state['best_state'] != {}\n assert 'exported' not in state, \"When loading an exported checkpoint, use the //pretrained/ prefix.\"\n compression_model.load_state_dict(state['best_state']['model'])\n compression_model.eval()\n logger.info(\"Compression model loaded!\")\n return compression_model\n\n @staticmethod\n def wrapped_model_from_checkpoint(cfg: omegaconf.DictConfig,\n checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a wrapped CompressionModel from a given checkpoint path or dora sig.\n\n Args:\n cfg (omegaconf.DictConfig): Configuration to read from for wrapped mode.\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n compression_model = CompressionSolver.model_from_checkpoint(checkpoint_path, device)\n compression_model = models.builders.get_wrapped_compression_model(compression_model, cfg)\n return compression_model" }, { "identifier": "load_compression_model", "path": "audiocraft/models/loaders.py", "snippet": "def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):\n pkg = load_compression_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)\n if 'pretrained' in pkg:\n return CompressionModel.get_pretrained(pkg['pretrained'], device=device)\n cfg = OmegaConf.create(pkg['xp.cfg'])\n cfg.device = str(device)\n model = builders.get_compression_model(cfg)\n model.load_state_dict(pkg['best_state'])\n model.eval()\n return model" }, { "identifier": "load_lm_model", "path": "audiocraft/models/loaders.py", "snippet": "def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):\n pkg = load_lm_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)\n cfg = OmegaConf.create(pkg['xp.cfg'])\n cfg.device = str(device)\n if cfg.device == 'cpu':\n cfg.dtype = 'float32'\n else:\n cfg.dtype = 'float16'\n _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path')\n _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path')\n _delete_param(cfg, 'conditioners.args.merge_text_conditions_p')\n _delete_param(cfg, 'conditioners.args.drop_desc_p')\n model = builders.get_lm_model(cfg)\n model.load_state_dict(pkg['best_state'])\n model.eval()\n model.cfg = cfg\n return model" }, { "identifier": "audio_write", "path": "audiocraft/data/audio.py", "snippet": "def audio_write(stem_name: tp.Union[str, Path],\n wav: torch.Tensor, sample_rate: int,\n format: str = 'wav', mp3_rate: int = 320, ogg_rate: tp.Optional[int] = None,\n normalize: bool = True, strategy: str = 'peak', peak_clip_headroom_db: float = 1,\n rms_headroom_db: float = 18, loudness_headroom_db: float = 14,\n loudness_compressor: bool = False,\n log_clipping: bool = True, make_parent_dir: bool = True,\n add_suffix: bool = True) -> Path:\n \"\"\"Convenience function for saving audio to disk. Returns the filename the audio was written to.\n\n Args:\n stem_name (str or Path): Filename without extension which will be added automatically.\n wav (torch.Tensor): Audio data to save.\n sample_rate (int): Sample rate of audio data.\n format (str): Either \"wav\", \"mp3\", \"ogg\", or \"flac\".\n mp3_rate (int): kbps when using mp3s.\n ogg_rate (int): kbps when using ogg/vorbis. If not provided, let ffmpeg decide for itself.\n normalize (bool): if `True` (default), normalizes according to the prescribed\n strategy (see after). If `False`, the strategy is only used in case clipping\n would happen.\n strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',\n i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square\n with extra headroom to avoid clipping. 'clip' just clips.\n peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.\n rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger\n than the `peak_clip` one to avoid further clipping.\n loudness_headroom_db (float): Target loudness for loudness normalization.\n loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'.\n when strategy is 'loudness' log_clipping (bool): If True, basic logging on stderr when clipping still\n occurs despite strategy (only for 'rms').\n make_parent_dir (bool): Make parent directory if it doesn't exist.\n Returns:\n Path: Path of the saved audio.\n \"\"\"\n assert wav.dtype.is_floating_point, \"wav is not floating point\"\n if wav.dim() == 1:\n wav = wav[None]\n elif wav.dim() > 2:\n raise ValueError(\"Input wav should be at most 2 dimension.\")\n assert wav.isfinite().all()\n wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db,\n rms_headroom_db, loudness_headroom_db, loudness_compressor,\n log_clipping=log_clipping, sample_rate=sample_rate,\n stem_name=str(stem_name))\n if format == 'mp3':\n suffix = '.mp3'\n flags = ['-f', 'mp3', '-c:a', 'libmp3lame', '-b:a', f'{mp3_rate}k']\n elif format == 'wav':\n suffix = '.wav'\n flags = ['-f', 'wav', '-c:a', 'pcm_s16le']\n elif format == 'ogg':\n suffix = '.ogg'\n flags = ['-f', 'ogg', '-c:a', 'libvorbis']\n if ogg_rate is not None:\n flags += ['-b:a', f'{ogg_rate}k']\n elif format == 'flac':\n suffix = '.flac'\n flags = ['-f', 'flac']\n else:\n raise RuntimeError(f\"Invalid format {format}. Only wav or mp3 are supported.\")\n if not add_suffix:\n suffix = ''\n path = Path(str(stem_name) + suffix)\n if make_parent_dir:\n path.parent.mkdir(exist_ok=True, parents=True)\n try:\n _piping_to_ffmpeg(path, wav, sample_rate, flags)\n except Exception:\n if path.exists():\n # we do not want to leave half written files around.\n path.unlink()\n raise\n return path" }, { "identifier": "get_lm_model", "path": "audiocraft/models/builders.py", "snippet": "def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel:\n \"\"\"Instantiate a transformer LM.\"\"\"\n if cfg.lm_model == 'transformer_lm':\n kwargs = dict_from_config(getattr(cfg, 'transformer_lm'))\n n_q = kwargs['n_q']\n q_modeling = kwargs.pop('q_modeling', None)\n codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern')\n attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout'))\n cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance'))\n cfg_prob, cfg_coef = cls_free_guidance['training_dropout'], cls_free_guidance['inference_coef']\n fuser = get_condition_fuser(cfg)\n condition_provider = get_conditioner_provider(kwargs[\"dim\"], cfg).to(cfg.device)\n if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programmatically\n kwargs['cross_attention'] = True\n if codebooks_pattern_cfg.modeling is None:\n assert q_modeling is not None, \\\n \"LM model should either have a codebook pattern defined or transformer_lm.q_modeling\"\n codebooks_pattern_cfg = omegaconf.OmegaConf.create(\n {'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}}\n )\n pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg)\n return LMModel(\n pattern_provider=pattern_provider,\n condition_provider=condition_provider,\n fuser=fuser,\n cfg_dropout=cfg_prob,\n cfg_coef=cfg_coef,\n attribute_dropout=attribute_dropout,\n dtype=getattr(torch, cfg.dtype),\n device=cfg.device,\n **kwargs\n ).to(cfg.device)\n else:\n raise KeyError(f\"Unexpected LM model {cfg.lm_model}\")" } ]
import os import random import torchaudio import typing as tp import numpy as np import torch import librosa import subprocess import math import allin1 import pytsmod as tsm import shutil import shutil from typing import Optional from cog import BasePredictor, Input, Path from audiocraft.models import MusicGen, MultiBandDiffusion from audiocraft.solvers.compression import CompressionSolver from audiocraft.models.loaders import ( load_compression_model, load_lm_model, ) from audiocraft.data.audio import audio_write from audiocraft.models.builders import get_lm_model from omegaconf import OmegaConf from audiocraft.modules.btc.btc_model import BTC_model from audiocraft.modules.btc.utils.mir_eval_modules import idx2chord from demucs.audio import convert_audio from demucs.apply import apply_model
14,708
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu"
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.mbd = MultiBandDiffusion.get_mbd_musicgen()
0
2023-10-09 09:55:24+00:00
24k
oracle/guardian-ai
tests/unitary/test_fairness_bias_mitigation.py
[ { "identifier": "ModelBiasMitigator", "path": "guardian_ai/fairness/bias_mitigation/sklearn.py", "snippet": "class ModelBiasMitigator:\n r\"\"\"\n Class to mitigate the bias of an already fitted machine learning model.\n\n The mitigation procedure works by multiplying the majority class label\n by a different scalar for every population subgroup and then rescaling\n the prediction probabilities, producing tweaked label probabilities.\n\n The different multiplying scalars are searched in order to find the\n best possible trade-offs between any fairness and accuracy metrics\n passed as input.\n\n This object produces a set of optimal fairness-accuracy trade-offs,\n which can be visualized using the `show_tradeoff` method.\n\n A default best multiplier is selected according to parametrizable input\n constraints. It is possible to select any other multiplier on the trade-off\n using the ``select_model`` method and inputting the index of the\n preferred multiplier, as shown when hovering over multipliers in\n ``show_tradeoff``.\n\n Parameters\n ----------\n base_estimator : model object\n The base estimator on which we want to mitigate bias.\n protected_attribute_names: str, List[str]\n The protected attribute names to use to compute fairness metrics.\n These should always be a part of any input dataset passed.\n fairness_metric : str, callable\n The fairness metric to mitigate bias for.\n\n - If str, it is the name of the scoring metric. Available metrics are:\n [%s]\n - If callable, it has to have the\n ``fairness_metric(y_true, y_pred, subgroups)`` signature.\n accuracy_metric : str, callable\n The accuracy metric to optimize for while mitigating bias.\n\n - If str, it is the name of the scoring metric. Available metrics are:\n [%s]\n - If callable, it has to have the\n ``accuracy_metric(y_true, y_pred)`` signature.\n higher_fairness_is_better : bool, 'auto', default='auto'\n Whether a higher fairness score with respect to `fairness_metric`\n is better. Needs to be set to \"auto\" if `fairness_metric` is a str,\n in which case it is set automatically.\n higher_accuracy_is_better : bool, 'auto', default='auto'\n Whether a higher accuracy score with respect to `accuracy_metric`\n is better. Needs to be set to \"auto\" if `accuracy_metric` is a str,\n in which case it is set automatically.\n fairness_metric_uses_probas: bool, 'auto', default='auto'\n Whether or not the fairness metric should be given label probabilities\n or actual labels as input. Needs to be set to \"auto\" if\n `fairness_metric` is a str, in which case it is set automatically.\n accuracy_metric_uses_probas: bool, 'auto', default='auto'\n Whether or not the accuracy metric should be given label probabilities\n or actual labels as input. Needs to be set to \"auto\" if\n `accuracy_metric` is a str, in which case it is set automatically.\n constraint_target: str, default='accuracy'\n On which metric should the constraint be applied for default\n model selection.\n Possible values are ``'fairness'`` and ``'accuracy'``.\n constraint_type: str, default='relative'\n Which type of constraint should be used to select the default\n model.\n Possible values are:\n\n - ``'relative'``: Apply a constraint relative to the best found\n models. A relative constraint on accuracy with F1 metric would\n look at the best F1 model found and tolerate a ``constraint_value``\n relative deviation to it at max, returning the model with\n the best fairness within that constraint.\n - ``'absolute'``: Apply an absolute constraint to best found\n models. An absolute constraint on fairness with Equalized Odds\n metric would only consider models with Equalized Odds below\n ``constraint_value``, returning the model with\n the best accuracy within that constraint.\n\n constraint_value: float, default=0.05\n What value to apply the constraint with when selecting the default\n model. Look at ``constraint_type``'s documentation for more\n details.\n base_estimator_uses_protected_attributes: bool, default=True\n Whether or not ``base_estimator`` uses the protected attributes for\n inference. If set to ``False``, protected attributes will be removed\n from any input dataset before being collecting predictions from ``base_estimator``.\n n_trials_per_group: int, default=100\n Number of different multiplying scalars to consider. Scales\n linearly with the number of groups in the data, i.e.\n ``n_trials = n_trials_per_group * n_groups``.\n When both ``n_trials_per_group`` and ``time_limit`` are specified,\n the first occurrence will stop the search procedure.\n time_limit: float or None, default=None\n Number of seconds to spend in search at maximum. ``None`` value\n means no time limit is set.\n When both ``n_trials_per_group`` and ``time_limit`` are specified,\n the first occurrence will stop the search procedure.\n subsampling: int, default=50000\n The number of rows to subsample the dataset to when tuning.\n This parameter drastically improves running time on large datasets\n with little decrease in overall performance. Can be deactivated\n by passing ``numpy.inf``.\n regularization_factor: float, default=0.001\n The amount of regularization to be applied when selecting multipliers.\n favorable_label_idx: int, default=1\n Index of the favorable label to use when computing metrics.\n random_seed: int, default=0\n Random seed to ensure reproducible outcome.\n\n Attributes\n ----------\n tradeoff_summary_: pd.DataFrame\n DataFrame containing the optimal fairness-accuracy trade-off\n models with only the most relevant information.\n selected_multipliers_idx_: int\n Index of the currently selected model for ``self._best_trials_detailed``.\n selected_multipliers_: pd.DataFrame\n DataFrame containing the multipliers for each sensitive group\n that are currently used for inference.\n constrained_metric_: str\n Name of the metric on which the constraint is applied.\n unconstrained_metric_: str\n Name of the metric on which no constraint is applied.\n constraint_criterion_value_: float\n Value of the constraint being currently applied.\n\n Raises\n ------\n GuardianAITypeError, GuardianAIValueError\n Will be raised when one input argument is invalid\n\n\n Examples\n --------\n\n .. code-block:: python\n\n from guardian_ai.fairness.bias_mitigation import ModelBiasMitigator\n\n bias_mitigated_model = ModelBiasMitigator(model,\n protected_attribute_names='sex',\n fairness_metric='equalized_odds',\n accuracy_metric='balanced_accuracy')\n\n # Scikit-learn like API supported\n bias_mitigated_model.fit(X_val, y_val)\n y_pred_proba = bias_mitigated_model.predict_proba(X_test)\n y_pred_labels = bias_mitigated_model.predict(X_test)\n\n # Use show_tradeoff() to display all available models\n bias_mitigated_model.show_tradeoff()\n\n # Can select a specific model manually\n bias_mitigated_model.select_model(1)\n\n # Predictions are now made with new model\n y_pred_proba = bias_mitigated_model.predict_proba(X_test)\n y_pred_labels = bias_mitigated_model.predict(X_test)\n \"\"\" # noqa D412\n\n def __init__(\n self,\n base_estimator: BaseEstimator,\n protected_attribute_names: Union[str, List[str]],\n fairness_metric: Union[str, Callable],\n accuracy_metric: Union[str, Callable],\n higher_fairness_is_better: Union[bool, str] = \"auto\",\n higher_accuracy_is_better: Union[bool, str] = \"auto\",\n fairness_metric_uses_probas: Union[bool, str] = \"auto\",\n accuracy_metric_uses_probas: Union[bool, str] = \"auto\",\n constraint_target: str = \"accuracy\",\n constraint_type: str = \"relative\",\n constraint_value: float = 0.05,\n base_estimator_uses_protected_attributes: bool = True,\n n_trials_per_group: int = 100,\n time_limit: Optional[float] = None,\n subsampling: int = 50000,\n regularization_factor: float = 1e-3,\n favorable_label_idx: int = 1,\n random_seed: int = 0,\n ):\n optuna.logging.set_verbosity(optuna.logging.WARNING)\n self._base_estimator = base_estimator\n self._protected_attribute_names = protected_attribute_names\n self._higher_fairness_is_better = higher_fairness_is_better\n self._higher_accuracy_is_better = higher_accuracy_is_better\n self._fairness_metric_uses_probas = fairness_metric_uses_probas\n self._accuracy_metric_uses_probas = accuracy_metric_uses_probas\n self._constraint_target = constraint_target\n self._constraint_type = constraint_type\n self._constraint_value = constraint_value\n self._base_estimator_uses_protected_attributes = (\n base_estimator_uses_protected_attributes\n )\n self._n_trials_per_group = n_trials_per_group\n self._time_limit = time_limit\n self._subsampling = subsampling\n self._regularization_factor = regularization_factor\n self._favorable_label_idx = favorable_label_idx\n self._random_seed = random_seed\n\n self._set_metric_names_and_callables(fairness_metric, accuracy_metric)\n\n # Public attributes to be set by `fit`\n self.tradeoff_summary_: Optional[pd.DataFrame] = None\n self.selected_multipliers_idx_: Optional[int] = None\n self.constrained_metric_: Optional[str] = None\n self.unconstrained_metric_: Optional[str] = None\n self.constraint_criterion_value_: Optional[float] = None\n\n # Private attributes to be set by `fit`\n self._best_trials_detailed: Optional[pd.DataFrame] = None\n self._accuracy_base_: Optional[float] = None\n self._fairness_base_: Optional[float] = None\n self._unique_groups_: Optional[np.ndarray] = None\n self._unique_group_names_: Optional[list] = None\n self._multiplier_names_: Optional[List[str]] = None\n self._admissible_trials_mask_: Optional[pd.DataFrame] = None\n\n self._validate_current_state()\n\n def _validate_current_state(self):\n \"\"\"\n Validate current attributes have valid values.\n\n Raises\n ------\n GuardianAITypeError\n Will be raised when one input argument has invalid type\n GuardianAIValueError\n Will be raised when one input argument has invalid value\n \"\"\"\n if isinstance(self._protected_attribute_names, str):\n self._protected_attribute_names = [self._protected_attribute_names]\n\n if self._higher_accuracy_is_better != \"auto\":\n if not isinstance(self._higher_accuracy_is_better, bool):\n raise GuardianAIValueError(\n \"`higher_accuracy_is_better` should be a bool or 'auto'\"\n f\", received {self._higher_accuracy_is_better} instead.\"\n )\n\n if self._higher_fairness_is_better != \"auto\":\n if not isinstance(self._higher_fairness_is_better, bool):\n raise GuardianAIValueError(\n \"`higher_fairness_is_better` should be a bool or 'auto'\"\n f\", received {self._higher_fairness_is_better} instead.\"\n )\n\n if self._fairness_metric_uses_probas != \"auto\":\n if not isinstance(self._fairness_metric_uses_probas, bool):\n raise GuardianAIValueError(\n \"`_fairness_metric_uses_probas` should be a bool or 'auto'\"\n f\", received {self._fairness_metric_uses_probas} instead.\"\n )\n\n if self._accuracy_metric_uses_probas != \"auto\":\n if not isinstance(self._accuracy_metric_uses_probas, bool):\n raise GuardianAIValueError(\n \"`_accuracy_metric_uses_probas` should be a bool or 'auto'\"\n f\", received {self._accuracy_metric_uses_probas} instead.\"\n )\n\n supported_constraint_targets = [\"accuracy\", \"fairness\"]\n if self._constraint_target not in supported_constraint_targets:\n raise GuardianAIValueError(\n f\"Received `{self._constraint_target}` for `constraint_target`. \"\n f\"Supported values are {supported_constraint_targets}\"\n )\n\n supported_constraint_types = [\"absolute\", \"relative\"]\n if self._constraint_type not in supported_constraint_types:\n raise GuardianAIValueError(\n f\"Received `{self._constraint_type}` for `constraint_type`. \"\n f\"Supported values are {supported_constraint_types}\"\n )\n\n if not isinstance(self._constraint_value, (float, int)):\n raise GuardianAITypeError(\n \"`constraint_value` should be a float, received \"\n f\"{self._constraint_type} instead.\"\n )\n\n if not isinstance(self._base_estimator_uses_protected_attributes, bool):\n raise GuardianAITypeError(\n \"`base_estimator_uses_protected_attributes` should be a bool\"\n f\", received {self._base_estimator_uses_protected_attributes} instead.\"\n )\n\n if self._n_trials_per_group is not None:\n if (\n not isinstance(self._n_trials_per_group, int)\n or self._n_trials_per_group <= 0\n ):\n raise GuardianAIValueError(\n \"`n_trials_per_group` should be a positive integer or None, received \"\n f\"{self._n_trials_per_group} instead.\"\n )\n\n if self._time_limit is not None:\n if (\n not isinstance(self._time_limit, (float, int))\n or self._time_limit <= 0.0\n ):\n raise GuardianAIValueError(\n \"`time_limit` should be a positive float or None, received \"\n f\"{self._time_limit} instead.\"\n )\n\n if self._n_trials_per_group is None and self._time_limit is None:\n raise GuardianAIValueError(\n \"`n_trials_per_group` and `time_limit` cannot both be None.\"\n )\n\n if not isinstance(self._subsampling, int) or self._subsampling <= 0:\n if not np.isinf(self._subsampling):\n raise GuardianAIValueError(\n \"`subsampling` should be a positive integer or `np.inf`, received \"\n f\"{self._subsampling} instead.\"\n )\n\n if (\n not isinstance(self._regularization_factor, (float, int))\n or self._regularization_factor < 0\n ):\n raise GuardianAIValueError(\n \"`regularization_factor` should be a non-negative float, received \"\n f\"{self._regularization_factor} instead.\"\n )\n\n if (\n not isinstance(self._favorable_label_idx, int)\n or self._favorable_label_idx < 0\n ):\n raise GuardianAIValueError(\n \"`favorable_label_idx` should be a non-negative integer, received \"\n f\"{self._favorable_label_idx} instead.\"\n )\n\n if self._random_seed is not None:\n if not isinstance(self._random_seed, int) or self._random_seed < 0:\n raise GuardianAIValueError(\n \"`random_seed` should be a non-negative integer or None, received \"\n f\"{self._random_seed} instead.\"\n )\n\n def _set_metric_names_and_callables(\n self,\n fairness_metric: Union[str, Callable],\n accuracy_metric: Union[str, Callable],\n ):\n \"\"\"\n Grab fairness and accuracy metrics from input arguments.\n\n Set values to _callable and _name attributes for fairness and\n accuracy metrics.\n\n Arguments\n ---------\n fairness_metric: str, Callable\n The fairness metric to use.\n accuracy_metric: str, Callable\n The accuracy metric to use.\n\n Raises\n ------\n GuardianAITypeError\n Will be raised if one the metrics is not a str or callable\n GuardianAIValueError\n Will be raised if there is an invalid combination of a metric and\n its `higher_is_better` and `uses_probas` attributes.\n \"\"\"\n self.accuracy_metric_callable: Callable\n self.accuracy_metric_name: str\n\n self.fairness_metric_callable: Callable\n self.fairness_metric_name: str\n\n if isinstance(accuracy_metric, str):\n if self._higher_accuracy_is_better != \"auto\":\n raise GuardianAIValueError(\n '`higher_accuracy_is_better` should be set to \"auto\" when'\n \"`accuracy_metric` is a str.\"\n )\n\n if self._accuracy_metric_uses_probas != \"auto\":\n raise GuardianAIValueError(\n '`accuracy_metric_uses_probas` should be set to \"auto\" when'\n \"`accuracy_metric` is a str.\"\n )\n\n metric_object = skl_metrics.get_scorer(accuracy_metric)\n\n self.accuracy_metric_callable = _PredictionScorer(metric_object)\n self.accuracy_metric_name = accuracy_metric\n # Always true because scores are inverted by sklearn when needed\n self._higher_accuracy_is_better = True\n self._accuracy_metric_uses_probas = isinstance(\n metric_object,\n (\n skl_metrics._scorer._ProbaScorer,\n skl_metrics._scorer._ThresholdScorer,\n ),\n )\n elif callable(accuracy_metric):\n if self._higher_accuracy_is_better == \"auto\":\n raise GuardianAIValueError(\n \"`higher_accuracy_is_better` should be manually set when\"\n \"`accuracy_metric` is a callable.\"\n )\n\n if self._accuracy_metric_uses_probas == \"auto\":\n raise GuardianAIValueError(\n \"`accuracy_metric_uses_probas` should be manually set when\"\n \"`accuracy_metric` is a callable.\"\n )\n\n self.accuracy_metric_callable = accuracy_metric\n self.accuracy_metric_name = accuracy_metric.__name__\n else:\n raise GuardianAITypeError(\n \"`accuracy_metric` should be a `str` or callable. Received \"\n f\"{accuracy_metric} instead.\"\n )\n\n if isinstance(fairness_metric, str):\n if self._higher_fairness_is_better != \"auto\":\n raise GuardianAIValueError(\n '`higher_fairness_is_better` should be set to \"auto\" when'\n \"`fairness_metric` is a str.\"\n )\n\n if self._fairness_metric_uses_probas != \"auto\":\n raise GuardianAIValueError(\n '`fairness_metric_uses_probas` should be set to \"auto\" when'\n \"`fairness_metric` is a str.\"\n )\n\n self.fairness_metric_callable = _get_fairness_metric(fairness_metric)\n self.fairness_metric_name = fairness_metric\n self._higher_fairness_is_better = False\n self._fairness_metric_uses_probas = False\n elif callable(fairness_metric):\n if self._higher_fairness_is_better == \"auto\":\n raise GuardianAIValueError(\n \"`higher_fairness_is_better` should be manually set when\"\n \"`fairness_metric` is a callable.\"\n )\n\n if self._fairness_metric_uses_probas == \"auto\":\n raise GuardianAIValueError(\n \"`fairness_metric_uses_probas` should be manually set when\"\n \"`fairness_metric` is a callable.\"\n )\n\n self.fairness_metric_callable = fairness_metric\n self.fairness_metric_name = fairness_metric.__name__\n else:\n raise GuardianAITypeError(\n \"`fairness_metric` should be a `str` or callable. Received \"\n f\"{fairness_metric} instead.\"\n )\n\n def _get_fairness_score(\n self, y_true: np.ndarray, y_probas: np.ndarray, groups: pd.DataFrame\n ) -> float:\n \"\"\"\n Get fairness score.\n\n Arguments\n ---------\n y_true: np.ndarray\n True labels\n y_probas: np.ndarray\n Label probabilities\n groups: pd.DataFrame\n Protected attribute(s) value(s) for every sample.\n\n Returns\n -------\n float: score\n The fairness score\n \"\"\"\n if self._fairness_metric_uses_probas:\n y_pred = y_probas[:, self._favorable_label_idx]\n else:\n y_pred = y_probas.argmax(-1)\n\n return self.fairness_metric_callable(y_true, y_pred, groups)\n\n def _get_accuracy_score(self, y_true: np.ndarray, y_probas: np.ndarray) -> float:\n \"\"\"\n Get accuracy score.\n\n Arguments\n ---------\n y_true: np.ndarray\n True labels\n y_probas: np.ndarray\n Label probabilities\n\n Returns\n -------\n float: score\n The accuracy score\n \"\"\"\n if self._accuracy_metric_uses_probas:\n y_pred = y_probas\n else:\n y_pred = y_probas.argmax(-1)\n\n return self.accuracy_metric_callable(y_true, y_pred)\n\n def fit(self, X: pd.DataFrame, y: Union[pd.DataFrame, pd.Series, np.ndarray]):\n \"\"\"\n Apply bias mitigation to the base estimator given a dataset and labels.\n\n Note that it is highly recommended you use a validation set for this\n method, so as to have a more representative range of probabilities\n for the model instead of the potentially skewed probabilities on\n training samples.\n\n Parameters\n ----------\n X: pd.DataFrame\n The dataset on which to mitigate the estimator's bias.\n y: pd.DataFrame, pd.Series, np.ndarray\n The labels for which to mitigate the estimator's bias.\n\n Returns\n -------\n self: ModelBiasMitigator\n The fitted ModelBiasMitigator object.\n\n Raises\n ------\n GuardianAIValueError\n Raised when an invalid value is encountered.\n \"\"\"\n groups, group_names = self._prepare_subgroups(X)\n\n X, y, group_names, groups = self._apply_subsampling(X, y, group_names, groups)\n\n probas_predicted = self._get_base_probas(X)\n\n group_ranges = self._get_group_ranges(\n probas_predicted, groups, self._unique_groups_, self._unique_group_names_\n )\n\n self._accuracy_base_ = self._get_accuracy_score(y, probas_predicted)\n self._fairness_base_ = self._get_fairness_score(y, probas_predicted, groups)\n\n def objective_fn(trial):\n probas = probas_predicted.copy()\n\n multipliers = {}\n for group_name, multiplier_name in zip(\n self._unique_group_names_, self._multiplier_names_\n ):\n min_val, max_val = group_ranges[group_name]\n multipliers[multiplier_name] = trial.suggest_float(\n multiplier_name, min_val, max_val, log=True\n )\n\n penalty_acc, penalty_fairness = self._get_multiplier_penalty(\n multipliers, group_ranges, self._unique_group_names_\n )\n\n probas = _apply_multiplier(\n probas,\n groups,\n self._unique_groups_,\n self._unique_group_names_,\n multipliers,\n self._favorable_label_idx,\n )\n\n perf = self._get_accuracy_score(y, probas)\n fairness = self._get_fairness_score(y, probas, groups)\n\n return perf + penalty_acc, fairness + penalty_fairness\n\n sampler = optuna.samplers.NSGAIISampler(seed=self._random_seed)\n study = optuna.create_study(\n directions=self._get_optimization_directions(), sampler=sampler\n )\n\n if self._unique_group_names_ is None:\n raise GuardianAIValueError(\"_unique_group_names cannot be None!\")\n\n study.enqueue_trial(\n {\n f\"multiplier_{group_name}\": 1.0\n for group_name in self._unique_group_names_\n }\n )\n\n study.optimize(\n objective_fn,\n n_trials=self._n_trials_per_group * len(self._unique_group_names_),\n timeout=self._time_limit,\n show_progress_bar=True,\n )\n\n self._produce_best_trials_frame(study, group_ranges)\n\n self._select_best_model_from_constraints()\n\n return self\n\n def _prepare_subgroups(self, X: pd.DataFrame) -> Tuple[pd.DataFrame, pd.Series]:\n \"\"\"\n Handle protected subgroups logic.\n\n Sets the `_unique_groups`, `_unique_group_names` and `_multiplier_names`\n attributes.\n\n Arguments\n ---------\n X: pd.DataFrame\n Dataset to prepare subgroups for.\n\n Returns\n -------\n (pd.DataFrame, pd.Series)\n Tuple containing\n groups: pd.DataFrame\n DataFrame mapping every sample to its protected attribute(s) ]\n value(s).\n group_names: pd.Series\n Series mapping every sample to its unique group name.\n\n Raises\n ------\n GuardianAIValueError\n Raised when an invalid value is encountered.\n \"\"\"\n groups = X[self._protected_attribute_names].astype(\"category\")\n\n unique_groups_and_counts = groups.value_counts().reset_index(name=\"count\")\n\n unique_groups = unique_groups_and_counts[self._protected_attribute_names]\n self._unique_groups_ = unique_groups.values\n\n unique_groups[\"name\"] = unique_groups.apply(\n lambda x: \"--\".join(\n [f\"{attr}={x[attr]}\" for attr in self._protected_attribute_names]\n ),\n axis=1,\n )\n self._unique_group_names_ = unique_groups[\"name\"].tolist()\n self._multiplier_names_ = [\n f\"multiplier_{group_name}\" for group_name in self._unique_group_names_ # type: ignore\n ]\n\n if self._unique_group_names_ is None:\n raise GuardianAIValueError(\"_unique_groupe_names cannot be None!\")\n if self._multiplier_names_ is None:\n raise GuardianAIValueError(\"_multiplier_names cannot be None!\")\n\n groups[\"name\"] = \"\"\n for group, group_name in zip(\n self._unique_groups_, self._unique_group_names_ # type: ignore\n ): # type: ignore\n mask = (groups.drop(columns=\"name\") == group).all(1).to_numpy().squeeze()\n\n groups[\"name\"][mask] = group_name\n\n return groups.drop(columns=\"name\"), groups[\"name\"]\n\n def _apply_subsampling(\n self,\n X: pd.DataFrame,\n y: pd.Series,\n group_names: pd.Series,\n groups: pd.DataFrame,\n ) -> Tuple[pd.DataFrame, pd.Series, pd.Series, pd.DataFrame]:\n \"\"\"\n Apply subsampling on the input dataset.\n\n The subsampling applied is stratified sampling from the groundtruth\n labels.\n\n Arguments\n ---------\n X: pd.DataFrame\n The dataset on which to apply subsampling.\n y: pd.Series\n The labels on which to apply subsampling.\n group_names: pd.Series\n Series mapping every sample to its unique group name. Used to\n apply subsampling and also to return subsampled version.\n groups: pd.DataFrame\n DataFrame mapping every sample to its protected attribute(s)\n value(s). Subsampling is applied to it.\n\n Returns\n -------\n pd.DataFrame, pd.Series, pd.Series, pd.DataFrame\n Tuple containing\n X_subsampled: pd.DataFrame\n Subsampled version of input X\n y_subsampled: pd.Series\n Subsampled version of input y\n group_names_subsampled: pd.Series\n Subsampled version of input group_names\n groups_subsampled: pd.DataFrame\n Subsampled version of input groups\n \"\"\"\n if len(X) <= self._subsampling:\n return X, y, group_names, groups\n else:\n n_samples = min(len(X), self._subsampling)\n\n sss = StratifiedShuffleSplit(\n n_splits=1, test_size=n_samples / len(X), random_state=self._random_seed\n )\n\n stratas = pd.concat((groups, y), axis=1)\n stratas = OrdinalEncoder().fit_transform(stratas)\n\n _, idxs = next(\n iter(sss.split(np.arange(0, len(X)).reshape(-1, 1), y=stratas))\n )\n\n return X.iloc[idxs], y.iloc[idxs], group_names.iloc[idxs], groups.iloc[idxs]\n\n def _get_group_ranges(\n self,\n probas: np.ndarray,\n groups: pd.DataFrame,\n unique_groups: Optional[np.ndarray],\n unique_group_names: Optional[List],\n ) -> Dict:\n \"\"\"\n Return the range for which to search multipliers for each sensitive\n group.\n\n The logic is that if probabilities are constrained to [0.45, 0.55]\n range, we should be looking at multipliers much closer to 1 than if\n the probabilities are in [0.05, 0.95] range. In the former case, a\n multiplier of 1.25 suffices to flip all predictions while in the latter\n a multiplier of 10 is not enough to flip all predictions.\n\n The returned ranges are set to ensure that total prediction flips are\n possible and that we constrain the search to relevant multipliers (e.g.\n it's pointless to test a value of 1.5 if 1.25 already suffices to flip\n all predictions).\n\n If there is already a large probability coverage (e.g. [0.05, 0.95]),\n we constraint the multipliers search range for this group to [0.1, 10]\n as a reasonable enough default.\n\n Arguments\n ---------\n probas: pd.DataFrame\n The probabilities used to collect group-specific probability ranges.\n groups: pd.DataFrame\n The groups used to separate samples.\n unique_groups: np.ndarray or None\n Array of all possible unique groups.\n unique_group_names: List or None\n Array of all unique group names.\n\n Returns\n -------\n Dict: group_ranges\n Dictionary mapping every group name to its (min, max) range\n to consider for multipliers.\n\n Raises\n ------\n GuardianAIValueError\n Raised when an invalid value is encountered.\n \"\"\"\n if unique_groups is None:\n raise GuardianAIValueError(\"unique_groups cannot be None!\")\n if unique_group_names is None:\n raise GuardianAIValueError(\"unique_group_names cannot be None!\")\n\n group_ranges = {}\n\n for group, group_name in zip(unique_groups, unique_group_names):\n mask = (groups == group).all(1).to_numpy().squeeze()\n\n min_proba = probas[mask].min()\n max_proba = probas[mask].max()\n ratio = max_proba / (min_proba + 1e-6)\n\n ratio = min(ratio, 10)\n\n group_ranges[group_name] = (1 / ratio, ratio)\n\n return group_ranges\n\n def _get_multiplier_penalty(\n self, multipliers: Dict, group_ranges: Dict, unique_group_names: Optional[List]\n ) -> Tuple[float, float]:\n \"\"\"\n Get the multiplier penalty for both the fairness and accuracy metrics.\n\n Returned values are already adjusted to be either negative or positive\n penalties so they can be added directly to the scores.\n\n Arguments\n ---------\n multipliers: Dict\n Mapping from multiplier name to multiplier value.\n group_ranges: Dict\n Mapping from group name to group range (min_val, max_val).\n unique_group_names: List or None\n Array of all unique group names.\n\n Returns\n -------\n (float, float)\n Tuple containing\n accuracy_penalty: float\n The penalty to be applied on the accuracy score.\n fairness_penalty: float\n The penalty to be applied on the fairness score.\n\n Raises\n ------\n GuardianAIValueError\n Raised when an invalid value is encountered.\n \"\"\"\n if unique_group_names is None:\n raise GuardianAIValueError(\"unique_group_names cannot be None!\")\n\n multiplier_reg_penalty = []\n for group_name in unique_group_names:\n multiplier = multipliers[f\"multiplier_{group_name}\"]\n _, max_val = group_ranges[group_name]\n\n penalty = np.abs(np.log(multiplier)) / np.log(max_val)\n multiplier_reg_penalty.append(penalty)\n\n penalty = self._regularization_factor * np.mean(multiplier_reg_penalty)\n\n penalty_direction_acc = -1 if self._higher_accuracy_is_better else 1\n penalty_direction_fairness = -1 if self._higher_fairness_is_better else 1\n\n return penalty * penalty_direction_acc, penalty * penalty_direction_fairness\n\n def _produce_best_trials_frame(self, study: optuna.Study, group_ranges: Dict):\n \"\"\"\n Produce _best_trials_detailed dataframe from optuna Study object.\n\n Arguments\n ---------\n study: optuna.Study\n The completed study object.\n group_ranges: Dict\n Mapping from group name to group range (min_val, max_val).\n\n Raises\n ------\n GuardianAIValueError\n Raised when an invalid value is encountered.\n \"\"\"\n regularized_metric_names = [\n f\"{metric}_regularized\"\n for metric in [self.accuracy_metric_name, self.fairness_metric_name]\n ]\n\n if self._multiplier_names_ is None:\n raise GuardianAIValueError(\"_multiplier_names_ cannot be None!\")\n\n df = pd.DataFrame(\n [(*trial.values, *trial.params.values()) for trial in study.best_trials],\n columns=regularized_metric_names + self._multiplier_names_,\n )\n\n # Unwrap regularization factors\n regularization_factors = np.array(\n [\n self._get_multiplier_penalty(\n multipliers, group_ranges, self._unique_group_names_\n )\n for _, multipliers in df[self._multiplier_names_].iterrows()\n ],\n dtype=float,\n )\n df[\"regularization_accuracy\"] = regularization_factors[:, 0]\n df[\"regularization_fairness\"] = regularization_factors[:, 1]\n\n df[self.accuracy_metric_name] = (\n df[f\"{self.accuracy_metric_name}_regularized\"]\n - df[\"regularization_accuracy\"]\n )\n df[self.fairness_metric_name] = (\n df[f\"{self.fairness_metric_name}_regularized\"]\n - df[\"regularization_fairness\"]\n )\n\n # Remove possible multipliers duplicates\n df = df.drop_duplicates(self._multiplier_names_)\n\n # Sort best trials by fairness\n df = df.sort_values(by=self.fairness_metric_name)\n\n # Need to reset index so that it's ordered by fairness\n df = df.reset_index(drop=True)\n\n self._best_trials_detailed = df\n self.tradeoff_summary_ = df.drop(\n [col for col in df.columns if \"regulariz\" in col], axis=1\n )\n self.tradeoff_summary_ = self.tradeoff_summary_[\n self.tradeoff_summary_.columns[::-1]\n ]\n\n def _get_optimization_directions(self) -> List[str]:\n \"\"\"\n Return optimization direction list used by Optuna to optimize\n fairness-accuracy trade-off.\n\n Returns\n -------\n optimization_directions: List[str]\n List of str corresponding to optimization directions for the\n two metrics.\n \"\"\"\n\n def _get_optimization_direction(higher_is_better: Union[bool, str]):\n return \"maximize\" if higher_is_better else \"minimize\"\n\n return [\n _get_optimization_direction(self._higher_accuracy_is_better),\n _get_optimization_direction(self._higher_fairness_is_better),\n ]\n\n def _get_base_probas(self, X: pd.DataFrame) -> np.ndarray:\n \"\"\"\n Get the probabilities from the base estimator on a dataset.\n\n Is in charge of removing the protected attributes if needed.\n\n Arguments\n ---------\n X: pd.DataFrame\n The dataset for which to collect label probabilities.\n\n Returns\n -------\n probas: np.ndarray\n Label probabilities for every row in X.\n \"\"\"\n if self._base_estimator_uses_protected_attributes:\n return self._base_estimator.predict_proba(X) # type: ignore\n else:\n return self._base_estimator.predict_proba( # type: ignore\n X.drop(columns=self._protected_attribute_names) # type: ignore\n )\n\n def _select_best_model_from_constraints(self):\n \"\"\"\n Select best model from the available trade-offs according to\n constraint.\n\n Calls `select_best_model` with best found model.\n\n Raises\n ------\n GuardianAIValueError\n If ``constraint_target`` or ``constraint_type`` have invalid\n values.\n \"\"\"\n if self._constraint_target == \"fairness\":\n self.constrained_metric_ = self.fairness_metric_name\n self.unconstrained_metric_ = self.accuracy_metric_name\n constrained_higher_is_better = self._higher_fairness_is_better\n unconstrained_higher_is_better = self._higher_accuracy_is_better\n elif self._constraint_target == \"accuracy\":\n self.constrained_metric_ = self.accuracy_metric_name\n self.unconstrained_metric_ = self.fairness_metric_name\n constrained_higher_is_better = self._higher_accuracy_is_better\n unconstrained_higher_is_better = self._higher_fairness_is_better\n else:\n raise GuardianAIValueError(\n \"Only `accuracy` and `fairness` are supported for \"\n f\"`constraint_target`. Received {self._constraint_target}\"\n )\n\n if self._constraint_type == \"relative\":\n if constrained_higher_is_better:\n ref_val = self._best_trials_detailed[self.constrained_metric_].max()\n relative_ratio = (\n (1 - self._constraint_value)\n if ref_val > 0\n else (1 + self._constraint_value)\n )\n self.constraint_criterion_value_ = relative_ratio * ref_val\n self._admissible_trials_mask_ = (\n self._best_trials_detailed[self.constrained_metric_]\n >= self.constraint_criterion_value_\n )\n else:\n ref_val = self._best_trials_detailed[self.constrained_metric_].min()\n relative_ratio = (\n (1 + self._constraint_value)\n if ref_val > 0\n else (1 - self._constraint_value)\n )\n self.constraint_criterion_value_ = relative_ratio * ref_val\n self._admissible_trials_mask_ = (\n self._best_trials_detailed[self.constrained_metric_]\n <= self.constraint_criterion_value_\n )\n elif self._constraint_type == \"absolute\":\n self.constraint_criterion_value_ = self._constraint_value\n if constrained_higher_is_better:\n self._admissible_trials_mask_ = (\n self._best_trials_detailed[self.constrained_metric_]\n >= self.constraint_criterion_value_\n )\n else:\n self._admissible_trials_mask_ = (\n self._best_trials_detailed[self.constrained_metric_]\n <= self.constraint_criterion_value_\n )\n else:\n raise GuardianAIValueError(\n \"Only `relative` and `absolute` are supported for \"\n f\"`constraint_type`. Received {self._constraint_type}\"\n )\n\n admissible = self._best_trials_detailed[self._admissible_trials_mask_]\n\n if unconstrained_higher_is_better:\n best_model = admissible[self.unconstrained_metric_].idxmax()\n else:\n best_model = admissible[self.unconstrained_metric_].idxmin()\n\n self.select_model(best_model)\n\n def predict(self, X: pd.DataFrame) -> np.ndarray:\n \"\"\"\n Predict class for input dataset X.\n\n Parameters\n ----------\n X: pd.DataFrame\n The dataset for which to collect labels.\n\n Returns\n -------\n labels: np.ndarray\n The labels for every sample.\n \"\"\"\n return self.predict_proba(X).argmax(-1)\n\n def predict_proba(self, X: pd.DataFrame) -> np.ndarray:\n \"\"\"\n Predict class probabilities for input dataset X.\n\n Parameters\n ----------\n X: pd.DataFrame\n The dataset for which to collect label probabilities.\n\n Returns\n -------\n probabilities: np.ndarray\n The label probabilities for every sample.\n \"\"\"\n probas = self._get_base_probas(X)\n\n groups = X[self._protected_attribute_names].astype(\"category\")\n\n self._unique_groups_ = cast(np.ndarray, self._unique_groups_)\n return _apply_multiplier(\n probas,\n groups,\n self._unique_groups_,\n self._unique_group_names_,\n self.selected_multipliers_,\n self._favorable_label_idx,\n )\n\n def show_tradeoff(self, hide_inadmissible: bool = False):\n \"\"\"\n Show the models representing the best fairness-accuracy trade-off\n found.\n\n Arguments\n ---------\n hide_inadmissible: bool, default=False\n Whether or not to hide the models that don't satisfy the\n constraint.\n \"\"\"\n df = self._best_trials_detailed\n\n if hide_inadmissible:\n df = df[self._admissible_trials_mask_] # type: ignore\n\n df = df.reset_index() # type: ignore\n\n fig = go.Figure()\n\n fig.add_trace(\n go.Scatter(\n x=df[self.fairness_metric_name],\n y=df[self.accuracy_metric_name],\n text=df[\"index\"],\n line_shape=\"vh\" if self._higher_fairness_is_better else \"hv\",\n mode=\"markers+lines\",\n hovertemplate=f\"{self.fairness_metric_name}\"\n + \": %{x:.4f}\"\n + f\"<br>{self.accuracy_metric_name}\"\n + \": %{y:.4f}</br>\"\n + \"Index: %{text}\",\n name=\"Multiplier Tuning (Best Models)\",\n )\n )\n\n fig.add_trace(\n go.Scatter(\n x=[self._fairness_base_],\n y=[self._accuracy_base_],\n mode=\"markers\",\n marker_symbol=\"cross\",\n marker_color=\"green\",\n marker_size=10,\n hovertemplate=f\"{self.fairness_metric_name}\"\n + \": %{x:.4f}\"\n + f\"<br>{self.accuracy_metric_name}\"\n + \": %{y:.4f}</br>\",\n name=\"Base Estimator\",\n )\n )\n\n fig.add_trace(\n go.Scatter(\n x=[\n self._best_trials_detailed[self.fairness_metric_name].iloc[ # type: ignore\n self.selected_multipliers_idx_\n ]\n ],\n y=[\n self._best_trials_detailed[self.accuracy_metric_name].iloc[ # type: ignore\n self.selected_multipliers_idx_\n ]\n ],\n mode=\"markers\",\n marker_symbol=\"circle-open\",\n marker_color=\"red\",\n marker_line_width=3,\n marker_size=15,\n hoverinfo=\"skip\",\n name=\"Currently Selected Model\",\n )\n )\n\n # Constraint\n fig.add_trace(self._get_constraint_line(df))\n\n fig.update_xaxes(gridwidth=1, gridcolor=\"LightGrey\", zerolinecolor=\"LightGrey\")\n fig.update_yaxes(gridwidth=1, gridcolor=\"LightGrey\", zerolinecolor=\"LightGrey\")\n\n fig.update_layout(\n title=\"Bias Mitigation Best Models Found\",\n xaxis_title=self.fairness_metric_name,\n yaxis_title=self.accuracy_metric_name,\n legend_title=\"Models\",\n plot_bgcolor=\"rgba(0,0,0,0)\",\n width=None,\n height=600,\n margin={\"t\": 50, \"b\": 50},\n )\n\n fig.show()\n\n def _get_constraint_line(self, df: pd.DataFrame) -> Any:\n \"\"\"\n Return the Plotly Line object that represents the constraint on\n the figure.\n\n Arguments\n ---------\n df: pd.DataFrame\n DataFrame of trials that will be plotted. Used to determine the\n range the constraint line has to cover.\n\n Returns\n -------\n Any: line\n The plotly line representing the constraint.\n \"\"\"\n x_min = min(df[self.fairness_metric_name].min(), self._fairness_base_)\n x_max = max(df[self.fairness_metric_name].max(), self._fairness_base_)\n\n y_min = min(df[self.accuracy_metric_name].min(), self._accuracy_base_)\n y_max = max(df[self.accuracy_metric_name].max(), self._accuracy_base_)\n\n if self._constraint_target == \"fairness\":\n x = [self.constraint_criterion_value_] * 2\n y = [y_min, y_max]\n elif self._constraint_target == \"accuracy\":\n x = [x_min, x_max]\n y = [self.constraint_criterion_value_] * 2\n\n if self._constraint_type == \"relative\":\n name = f\"{self._constraint_value * 100:.1f}% relative {self.constrained_metric_} drop\"\n elif self._constraint_type == \"absolute\":\n name = f\"{self.constrained_metric_} value of {self.constraint_criterion_value_:.2f}\"\n\n return go.Scatter(\n x=x,\n y=y,\n mode=\"lines\",\n line=dict(color=\"black\", width=4, dash=\"dash\"),\n name=name,\n )\n\n def select_model(self, model_idx: int):\n \"\"\"\n Select the multipliers to use for inference.\n\n Arguments\n ---------\n model_idx: int\n The index of the multipliers in `self.best_trials_` to use for\n inference, as displayed by `show_tradeoff`.\n\n Raises\n ------\n GuardianAIValueError\n Raised when the passed model_idx is invalid.\n \"\"\"\n if model_idx < 0 or model_idx >= len(self._best_trials_detailed): # type: ignore\n raise GuardianAIValueError(f\"Invalid `model_idx` received: {model_idx}\")\n\n self.selected_multipliers_idx_ = model_idx\n\n @property\n def selected_multipliers_(self): # noqa D102\n if self._best_trials_detailed is None or self.selected_multipliers_idx_ is None:\n return None\n else:\n return self._best_trials_detailed[self._multiplier_names_].iloc[\n self.selected_multipliers_idx_\n ]" }, { "identifier": "model_statistical_parity", "path": "guardian_ai/fairness/metrics/model.py", "snippet": "def model_statistical_parity(\n y_true: Optional[Union[pd.Series, np.ndarray, List]] = None,\n y_pred: Optional[Union[pd.Series, np.ndarray, List]] = None,\n subgroups: Optional[pd.DataFrame] = None,\n distance_measure: str = DEFAULT_DISTANCE,\n reduction: Optional[str] = DEFAULT_REDUCTION,\n):\n \"\"\"\n Measure the statistical parity of a model's output between all subgroup pairs.\n\n For more details, refer to :class:`.ModelStatisticalParityScorer`.\n\n Parameters\n ----------\n y_true : pandas.Series, numpy.ndarray, list or None, default=None\n Array of groundtruth labels.\n y_pred : pandas.Series, numpy.ndarray, list or None, default=None\n Array of model predictions.\n subgroups : pandas.DataFrame or None, default=None\n Dataframe containing protected attributes for each instance.\n distance_measure : str, default='diff'\n Determines the distance used to compare a subgroup's metric against\n the rest of the subgroups. Possible values are:\n\n * ``'ratio'``: Uses ``(subgroup1_val / subgroup2_val)``. Inverted to always be >= 1 if needed.\n * ``'diff'``: Uses ``| subgroup1_val - subgroup2_val |``.\n\n reduction : str or None, default='mean'\n Determines how to reduce scores on all subgroups to a single output.\n Possible values are:\n\n * ``'max'``: Returns the maximal value among all subgroup metrics.\n * ``'mean'``: Returns the mean over all subgroup metrics.\n * ``None``: Returns a ``{subgroup_pair: subgroup_pair_metric, ...}`` dict.\n\n Returns\n -------\n float, dict\n The computed metric value, with format according to `reduction`.\n\n Raises\n ------\n GuardianAIValueError\n If Value of None is received for either `y_pred` or `subgroups`.\n\n Examples\n --------\n\n .. code-block:: python\n\n from guardian_ai.fairness.metrics import model_statistical_parity\n subgroups = X[['race', 'sex']]\n model_statistical_parity(y_true, y_pred, subgroups)\n\n This metric does not require `y_true`. It can also be called using\n\n .. code-block:: python\n\n model_statistical_parity(None, y_pred, subgroups)\n model_statistical_parity(y_pred=y_pred, subgroups=subgroups)\n \"\"\" # noqa: D412\n\n if y_pred is None or subgroups is None:\n raise GuardianAIValueError(\n \"Value of None was received for either `y_pred` or `subgroups`. \"\n \"This may be due to calling the metric using only 2 positional \"\n \"arguments. If this is the case, either call the function by \"\n \"passing ``None`` as the first argument or use named arguments for \"\n \"`y_pred` and `subgroups`.\"\n )\n\n return _model_metric(\n None,\n y_pred,\n subgroups,\n metric=\"selection_rate\",\n distance_measure=distance_measure,\n reduction=reduction,\n allow_y_true_none=True,\n allow_distance_measure_none=False,\n )" }, { "identifier": "GuardianAITypeError", "path": "guardian_ai/utils/exception.py", "snippet": "class GuardianAITypeError(TypeError, GuardianAIError):\n \"\"\"Exception raised for generic type issues.\"\"\"\n\n pass" }, { "identifier": "GuardianAIValueError", "path": "guardian_ai/utils/exception.py", "snippet": "class GuardianAIValueError(ValueError, GuardianAIError):\n \"\"\"Exception raised for unexpected values.\"\"\"\n\n pass" }, { "identifier": "get_dummy_dataset", "path": "tests/utils.py", "snippet": "def get_dummy_dataset(\n n_samples=5000,\n n_features=10,\n n_classes=2,\n types=[str, float, bool, int],\n content=[],\n contain_null=False,\n null_ratio=0.3,\n dtime_types=[],\n tz_aware=False,\n reg_range=10.0,\n cat_range=30,\n random_seed=9999,\n imb_factor=1.0,\n task=\"classification\",\n **kwargs,\n):\n \"\"\"\n Generates a dummy dataset and returns its corresponding ope/oml\n dataframe:\n dataset shape n_samples x n_features.\n\n types: column types you wish to generate (random number of columns=\n n_features types are generated, with at least one of each type).\n\n content: list of tuples (dtype, feature) specifying bad column\n features. Features can be 'const' - to make all values in column\n constant, or value between 0 and 1 which indicates percentage of\n missing values in a column\n\n dtime_types: datetime column types to generate. Acceptable types\n are: ['datetime', 'date', 'time', 'timedelta', 'datetimetz']\n\n n_classes: number of target classes (only used for classification)\n\n reg_range: range of target for regression datasets, not used for\n classification\n\n cat_range: maximum number of unique values for the categorical\n features\n\n imb_factor: ~ class_ratio = minority_class_size/majority_class_size\n approximately controls dataset target imbalance\n (only used for classification).\n\n \"\"\"\n np.random.seed(random_seed)\n allowed_dtime_types = [\n \"datetime\",\n \"date\",\n \"time\",\n \"timedelta\",\n \"datetimez\",\n \"Timestamp\",\n ]\n\n # sanity checks\n assert (\n n_samples >= n_classes\n ), \"Number of samples has to be greater than num of classes\"\n assert (imb_factor > 0) and (\n imb_factor <= 1.0\n ), \"imb_factor has to be in range of (0, 1.0]\"\n assert len(types) == len(set(types)), \"types inside the list must be unique\"\n assert len(dtime_types) == len(\n set(dtime_types)\n ), \"dtime_types inside the list must be unique\"\n assert (\n len(dtime_types) + len(types) <= n_features\n ), \"provided number of feature types is more than n_features\"\n assert task in [\n \"classification\",\n \"regression\",\n \"anomaly_detection\",\n ], \"Task must be one of classification or regression\"\n assert all(\n x for x in dtime_types if x in allowed_dtime_types\n ), \"dtime_types: {} outside of allowed: {}\".format(dtime_types, allowed_dtime_types)\n\n extra_types, extra_feats, extra_cols = [], [], 0\n if content != []:\n extra_cols = len(content)\n extra_types = [x for x, _ in content]\n extra_feats = [x for _, x in content]\n\n # target labels for the dataset\n if task == \"classification\" or task == \"anomaly_detection\":\n # assign class counts based on geometric distribution of classes based on imb_factor\n class_weights = np.geomspace(imb_factor, 1.0, num=n_classes)\n class_counts = [\n max(1, int(n_samples * x / np.sum(class_weights))) for x in class_weights\n ]\n class_excess = np.sum(class_counts) - n_samples\n class_counts[-1] -= class_excess\n\n # create labels based on class counts and shuffle them\n y = np.hstack(\n [np.full((1, count), cl) for cl, count in enumerate(class_counts)]\n ).ravel()\n np.random.shuffle(y.astype(int))\n y = y.tolist()\n elif task == \"regression\":\n # noise between (-reg_range/2, reg_range/2) for regression\n y = reg_range * np.random.random(size=(1, n_samples, 1)) + reg_range / 2.0\n y = y.reshape(1, n_samples).ravel().tolist()\n\n # tally total number of features\n all_feat_types = types + dtime_types + extra_types\n total_feat_types = len(types) + len(dtime_types)\n if total_feat_types > 0:\n feat_col_types = np.random.choice(\n range(0, total_feat_types), size=n_features - total_feat_types\n ).tolist()\n feat_col_types += list(\n range(0, total_feat_types)\n ) # to ensure at least one of each type\n\n else:\n feat_col_types = []\n feat_col_types += list(range(total_feat_types, total_feat_types + len(extra_types)))\n features = []\n col_types = []\n tz = {}\n # extra_features provided in content, and certain datetime columns are handled differently\n # they get added as pandas Series or DataFrames to rest of features in the end\n special_cols_num, special_pd_df = [], []\n extra_features = pd.DataFrame()\n for i, t in enumerate(feat_col_types):\n assert t < total_feat_types + len(extra_types)\n typ = all_feat_types[t]\n if typ is str:\n high_val = np.random.randint(3, cat_range)\n feat = np.random.randint(0, high_val, size=n_samples).tolist()\n feat = [\"STR{}\".format(val) for val in feat]\n elif typ is int:\n low_val = np.random.randint(-50000, -10)\n high_val = np.random.randint(10, 50000)\n feat = np.random.randint(low_val, high_val, size=n_samples).tolist()\n elif typ is float:\n feat = np.random.rand(n_samples).tolist()\n elif typ is bool:\n feat = np.random.randint(0, 2, size=n_samples).tolist()\n feat = [bool(val) for val in feat]\n elif typ in allowed_dtime_types:\n if typ == \"datetime\":\n # generating random datetime\n deltas = random.sample(range(1, 172800000), n_samples)\n d1 = datetime.datetime.now() - datetime.timedelta(days=2000)\n d2 = datetime.datetime.now()\n generated_datetime = []\n for d in deltas:\n generated_datetime.append(d1 + datetime.timedelta(seconds=d))\n feat = generated_datetime\n elif typ == \"timedelta\":\n feat = n_samples * [datetime.timedelta()]\n elif typ == \"time\":\n feat = n_samples * [datetime.time()]\n elif typ == \"date\":\n feat = n_samples * [datetime.date(2019, 9, 11)]\n elif typ == \"datetimez\":\n special_cols_num.append(i)\n special_pd_df.append(\n pd.date_range(start=0, periods=n_samples, tz=\"UTC\")\n )\n feat = n_samples * [\n datetime.date(2019, 9, 11)\n ] # needs to be handled in special way b/c it's already pandas obj\n else:\n raise Exception(\"Unrecognized datetime type of column\")\n else:\n raise Exception(\"Unrecognized type of column\")\n\n # If index reached the last extra_col number of feature types, start modifying features\n # and adding them to extra_features DataFrame instead of list of features\n if extra_cols > 0 and i >= (len(feat_col_types) - extra_cols):\n feat_idx = i - (len(feat_col_types) - extra_cols)\n if isinstance(extra_feats[feat_idx], numbers.Number):\n # missing values given by extra_feats[feat_idx] percentage of instances\n assert (\n extra_feats[feat_idx] <= 1.0 and extra_feats[feat_idx] >= 0\n ), \"feature in content has to be ratio between 0 and 1\"\n ids = np.random.choice(\n range(0, n_samples), size=int(extra_feats[feat_idx] * n_samples)\n ).astype(int)\n dtype = map_col_types([extra_types[feat_idx].__name__])[0]\n feat = pd.Series(data=np.array(feat), dtype=dtype)\n feat[ids] = np.nan\n elif extra_feats[feat_idx] == \"const\":\n # constant column, set all rows to be same as the first instance\n dtype = map_col_types([extra_types[feat_idx].__name__])[0]\n feat = pd.Series(data=np.array(feat), dtype=dtype)\n feat = feat[0]\n extra_features[i] = feat\n else: # add features to the list\n features.append(feat)\n col_types.append(type(feat[0]).__name__)\n\n # if task == 'regression':\n # # Add scaled target column for regression so that score is positive\n # features.append([-0.5*x for x in y])\n # col_types.append('float') # target column type is int\n\n # Add target column and convert all types to pandas dtypes\n features.append(y)\n col_types.append(\n \"int\" if task == \"classification\" else \"float\"\n ) # target column type is int\n pd_col_types = map_col_types(col_types)\n pd_df = pd.DataFrame(features).T # transpose to get samples x features\n num_feats = len(features) - 1\n columns = list(range(0, num_feats)) if num_feats > 0 else []\n columns = columns + [\"target\"]\n pd_df.columns = columns # rename columns\n\n # handle special column from datettime: replace placeholder with pandas.date_range columns\n for i, col in enumerate(special_cols_num):\n pd_df[col] = special_pd_df[i]\n pd_col_types[col] = pd_df.dtypes[col]\n\n # assign datatypes to pd dataframe for non-datetime types\n columns_types_all = list(zip(columns, pd_col_types))\n columns_types_nodtime = [\n (name, typ)\n for (name, typ) in columns_types_all\n if typ not in allowed_dtime_types\n ]\n columns_types_dtime = [\n (name, typ) for (name, typ) in columns_types_all if typ in allowed_dtime_types\n ]\n pd_df = pd_df.astype(dict(columns_types_nodtime)) # cast types on non-dtime columns\n\n # assign datatypes to pd dataframe only for datetime types\n for col, col_type in columns_types_dtime:\n if col_type == \"timedelta\":\n pd_df[col] = pd.to_timedelta(pd_df[col], errors=\"coerce\")\n elif col_type == \"datetimez\":\n pd_df[col] = pd_df[col]\n elif col_type == \"datetime\":\n pd_df[col] = pd.to_datetime(pd_df[col], errors=\"coerce\")\n if contain_null:\n pd_df[col] = generate_null(pd_df[col], null_ratio)\n if tz_aware:\n tz[str(col)] = pytz.all_timezones[\n np.random.randint(len(pytz.all_timezones))\n ]\n else:\n pd_df[col] = pd.to_timedelta(pd_df[col], errors=\"coerce\")\n\n # add extra features columns that were provided by content\n pd_df[pd_df.shape[1] + extra_features.columns] = extra_features\n\n # Convert all the column names to string type (mainly for FS min_features [] tests)\n pd_df.columns = [str(col) for col in pd_df.columns]\n\n if tz_aware:\n return pd_df.drop([\"target\"], axis=1), pd_df[\"target\"], tz\n else:\n return pd_df.drop([\"target\"], axis=1), pd_df[\"target\"]" } ]
import math import os import pickle import tempfile import numpy as np import pandas as pd import pytest from sklearn.metrics import balanced_accuracy_score, log_loss, roc_auc_score from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder from sklearn.linear_model import LogisticRegression from guardian_ai.fairness.bias_mitigation import ModelBiasMitigator from guardian_ai.fairness.metrics import model_statistical_parity from guardian_ai.utils.exception import GuardianAITypeError, GuardianAIValueError from tests.utils import get_dummy_dataset
16,715
@pytest.fixture(scope="module", params=METRIC_COMBOS.values(), ids=METRIC_COMBOS.keys()) def responsible_model_and_metrics(sensitive_dataset_and_model, request): X, y, model, sensitive_attr_names = sensitive_dataset_and_model fairness_metric, accuracy_metric = request.param ( fairness_name, fairness_callable, fairness_hib, fairness_uses_probas, ) = fairness_metric ( accuracy_name, accuracy_callable, accuracy_hib, accuracy_uses_probas, ) = accuracy_metric resp_model = ModelBiasMitigator( model, sensitive_attr_names, fairness_metric=fairness_name, accuracy_metric=accuracy_name, n_trials_per_group=5, random_seed=RANDOM_SEED, ) # limit number of trials for faster tests resp_model.fit(X, y) return X, y, sensitive_attr_names, resp_model, fairness_metric, accuracy_metric def test_sanity_checks(responsible_model_and_metrics): ( X, y, sensitive_attr_names, resp_model, fairness_metric, accuracy_metric, ) = responsible_model_and_metrics assert len(resp_model.predict(X)) == len(X) assert len(resp_model.predict_proba(X)) == len(X) assert resp_model._best_trials_detailed is not None def test_display(responsible_model_and_metrics): ( X, y, sensitive_attr_names, resp_model, fairness_metric, accuracy_metric, ) = responsible_model_and_metrics resp_model.show_tradeoff() # Assert that displays worked correctly (best we can do automatically currently) assert True def test_group_ranges(sensitive_dataset_and_model): X, y, model, sensitive_attr_names = sensitive_dataset_and_model group_small_range = np.array([[0.4, 0.6], [0.6, 0.4]]) group_big_range = np.array([[0.05, 0.95], [0.95, 0.05]]) probas = np.vstack((group_small_range, group_big_range)) groups = ["small"] * len(group_small_range) + ["big"] * len(group_big_range) groups = pd.DataFrame(groups, columns=["group_val"]) unique_groups = groups["group_val"].unique() unique_group_names = groups["group_val"].unique().tolist() resp_model = ModelBiasMitigator( model, sensitive_attr_names, fairness_metric=A_FAIRNESS_METRIC, accuracy_metric=AN_ACCURACY_METRIC, random_seed=RANDOM_SEED, ) group_ranges = resp_model._get_group_ranges( probas, groups, unique_groups, unique_group_names ) small_ratio = 0.6 / (0.4 + 1e-6) expected_small = (1 / small_ratio, small_ratio) expected_big = (0.1, 10.0) for received, expected in zip(group_ranges["small"], expected_small): assert is_close(received, expected) for received, expected in zip(group_ranges["big"], expected_big): assert is_close(received, expected) def test_accepted_inputs(sensitive_dataset_and_model): X, y, model, sensitive_attr_names = sensitive_dataset_and_model ### Bool or 'auto' attributes # Sanity checks ModelBiasMitigator( model, sensitive_attr_names, fairness_metric=A_FAIRNESS_METRIC, accuracy_metric=AN_ACCURACY_METRIC, higher_accuracy_is_better="auto", higher_fairness_is_better="auto", fairness_metric_uses_probas="auto", accuracy_metric_uses_probas="auto", ) def test_bool_auto_attr(attr_name): # Only 'auto' supported str
#!/usr/bin/env python # -*- coding: utf-8 -*-- # Copyright (c) 2023 Oracle and/or its affiliates. # Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl/ # Constants used when any metric is needed A_FAIRNESS_METRIC = "equalized_odds" AN_ACCURACY_METRIC = "accuracy" RANDOM_SEED = 12345 @pytest.fixture(scope="module", autouse=True) def init(): np.random.seed(RANDOM_SEED) def is_close(a, b): return math.isclose(a, b, rel_tol=1e-5) def approx_dict(d): return pytest.approx(d, rel=1e-5) class DummyBinaryStochasticModel: def predict(self, X): return np.random.randint(0, 2, size=X.shape[0]) def create_concat_sensitive_attrs(dataset, n_classes): if not isinstance(n_classes, list): n_classes = list(n_classes) sensitive_dataset = dataset.copy() sensitive_attrs_names = [] for i, n_classes_i in enumerate(n_classes): sensitive_vals = np.array( [f"sensitive_val_{idx}" for idx in range(n_classes_i)] ) attr_name = f"sensitive_attr_{i}" sensitive_dataset = concat_sensitive_attr_column( sensitive_vals, sensitive_dataset, attr_name ) sensitive_attrs_names.append(attr_name) return sensitive_dataset, sensitive_attrs_names def concat_sensitive_attr_column(vals, dataset, attr_name): sensitive_vals = np.random.choice(vals, size=len(dataset)) sensitive_feats = pd.DataFrame(np.transpose(sensitive_vals), columns=[attr_name]) return pd.concat([dataset, sensitive_feats], axis=1) @pytest.fixture(scope="module") def model_type(): return "LogisticRegression" @pytest.fixture(scope="module") def base_dataset(): return get_dummy_dataset(n_samples=500, n_features=5, n_classes=2) # By default, all tests are ran with (1 protected attr with 2 groups), # (1 protected attr with more than 2 groups), and (more than 2 protected attr) SENSITIVE_FEATURES_VARIATIONS = { "one_attr_two_classes": {"n_classes": (2,)}, "one_attr_n_classes": {"n_classes": (4,)}, "n_attrs": {"n_classes": (3, 4)}, } @pytest.fixture( scope="module", params=SENSITIVE_FEATURES_VARIATIONS.values(), ids=SENSITIVE_FEATURES_VARIATIONS.keys(), ) def sensitive_dataset_and_model(model_type, base_dataset, request): dataset, target = base_dataset dataset, sensitive_attr_names = create_concat_sensitive_attrs( dataset, **request.param ) model = Pipeline( steps=[ ("preprocessor", OneHotEncoder(handle_unknown="ignore")), ("classifier", LogisticRegression(random_state=0)), ] ) model.fit(dataset, target) return dataset, target, model, sensitive_attr_names # (metric_name, callable, higher_is_better, requires_proba) dict FAIRNESS_METRICS = { "statistical_parity": ( "statistical_parity", model_statistical_parity, False, False, ), } def neg_log_loss_score(y_true, y_pred, **kwargs): return -log_loss(y_true, y_pred, **kwargs) # (metric_name, callable, higher_is_better, requires_proba) dict ACCURACY_METRICS = { "roc_auc": ("roc_auc", roc_auc_score, True, True), "balanced_accuracy": ("balanced_accuracy", balanced_accuracy_score, True, False), "neg_log_loss": ("neg_log_loss", neg_log_loss_score, False, True), } METRIC_COMBOS = { f"{fair_name}--{acc_name}": (fair_metric, acc_metric) for fair_name, fair_metric in FAIRNESS_METRICS.items() for acc_name, acc_metric in ACCURACY_METRICS.items() } @pytest.fixture(scope="module", params=METRIC_COMBOS.values(), ids=METRIC_COMBOS.keys()) def responsible_model_and_metrics(sensitive_dataset_and_model, request): X, y, model, sensitive_attr_names = sensitive_dataset_and_model fairness_metric, accuracy_metric = request.param ( fairness_name, fairness_callable, fairness_hib, fairness_uses_probas, ) = fairness_metric ( accuracy_name, accuracy_callable, accuracy_hib, accuracy_uses_probas, ) = accuracy_metric resp_model = ModelBiasMitigator( model, sensitive_attr_names, fairness_metric=fairness_name, accuracy_metric=accuracy_name, n_trials_per_group=5, random_seed=RANDOM_SEED, ) # limit number of trials for faster tests resp_model.fit(X, y) return X, y, sensitive_attr_names, resp_model, fairness_metric, accuracy_metric def test_sanity_checks(responsible_model_and_metrics): ( X, y, sensitive_attr_names, resp_model, fairness_metric, accuracy_metric, ) = responsible_model_and_metrics assert len(resp_model.predict(X)) == len(X) assert len(resp_model.predict_proba(X)) == len(X) assert resp_model._best_trials_detailed is not None def test_display(responsible_model_and_metrics): ( X, y, sensitive_attr_names, resp_model, fairness_metric, accuracy_metric, ) = responsible_model_and_metrics resp_model.show_tradeoff() # Assert that displays worked correctly (best we can do automatically currently) assert True def test_group_ranges(sensitive_dataset_and_model): X, y, model, sensitive_attr_names = sensitive_dataset_and_model group_small_range = np.array([[0.4, 0.6], [0.6, 0.4]]) group_big_range = np.array([[0.05, 0.95], [0.95, 0.05]]) probas = np.vstack((group_small_range, group_big_range)) groups = ["small"] * len(group_small_range) + ["big"] * len(group_big_range) groups = pd.DataFrame(groups, columns=["group_val"]) unique_groups = groups["group_val"].unique() unique_group_names = groups["group_val"].unique().tolist() resp_model = ModelBiasMitigator( model, sensitive_attr_names, fairness_metric=A_FAIRNESS_METRIC, accuracy_metric=AN_ACCURACY_METRIC, random_seed=RANDOM_SEED, ) group_ranges = resp_model._get_group_ranges( probas, groups, unique_groups, unique_group_names ) small_ratio = 0.6 / (0.4 + 1e-6) expected_small = (1 / small_ratio, small_ratio) expected_big = (0.1, 10.0) for received, expected in zip(group_ranges["small"], expected_small): assert is_close(received, expected) for received, expected in zip(group_ranges["big"], expected_big): assert is_close(received, expected) def test_accepted_inputs(sensitive_dataset_and_model): X, y, model, sensitive_attr_names = sensitive_dataset_and_model ### Bool or 'auto' attributes # Sanity checks ModelBiasMitigator( model, sensitive_attr_names, fairness_metric=A_FAIRNESS_METRIC, accuracy_metric=AN_ACCURACY_METRIC, higher_accuracy_is_better="auto", higher_fairness_is_better="auto", fairness_metric_uses_probas="auto", accuracy_metric_uses_probas="auto", ) def test_bool_auto_attr(attr_name): # Only 'auto' supported str
with pytest.raises(GuardianAIValueError):
3
2023-10-09 09:48:50+00:00
24k
jiangjiechen/auction-arena
app.py
[ { "identifier": "create_items", "path": "src/item_base.py", "snippet": "def create_items(item_info_jsl):\n '''\n item_info: a list of dict (name, price, desc, id)\n '''\n item_info_jsl = LoadJsonL(item_info_jsl)\n item_list = []\n for info in item_info_jsl:\n item_list.append(Item(**info))\n return item_list" }, { "identifier": "Bidder", "path": "src/bidder_base.py", "snippet": "class Bidder(BaseModel):\n name: str\n model_name: str \n budget: int \n desire: str\n plan_strategy: str\n temperature: float = 0.7\n overestimate_percent: int = 10\n correct_belief: bool\n enable_learning: bool = False\n \n llm: BaseLanguageModel = None\n openai_cost = 0\n llm_token_count = 0\n \n verbose: bool = False\n auction_hash: str = ''\n\n system_message: str = ''\n original_budget: int = 0\n\n # working memory\n profit: int = 0\n cur_item_id = 0\n items: list = []\n dialogue_history: list = [] # for gradio UI display\n llm_prompt_history: list = [] # for tracking llm calling\n items_won = []\n bid_history: list = [] # history of the bidding of a single item\n plan_instruct: str = '' # instruction for planning\n cur_plan: str = '' # current plan\n status_quo: dict = {} # belief of budget and profit, self and others\n withdraw: bool = False # state of withdraw\n learnings: str = '' # learnings from previous biddings. If given, then use it to guide the rest of the auction.\n max_bid_cnt: int = 4 # Rule Bidder: maximum number of bids on one item (K = 1 starting bid + K-1 increase bid)\n rule_bid_cnt: int = 0 # Rule Bidder: count of bids on one item\n\n # belief tracking\n failed_bid_cnt: int = 0 # count of failed bids (overspending)\n total_bid_cnt: int = 0 # count of total bids\n self_belief_error_cnt: int = 0\n total_self_belief_cnt: int = 0\n other_belief_error_cnt: int = 0\n total_other_belief_cnt: int = 0\n \n engagement_count: int = 0\n budget_history = []\n profit_history = []\n budget_error_history = []\n profit_error_history = []\n win_bid_error_history = []\n engagement_history = defaultdict(int)\n all_bidders_status = {} # track others' profit\n changes_of_plan = []\n \n # not used\n input_box: str = None\n need_input = False\n semaphore = 0\n\n class Config:\n arbitrary_types_allowed = True\n\n def __repr__(self):\n return self.name\n\n def __str__(self):\n return self.name\n \n @classmethod\n def create(cls, **data):\n instance = cls(**data)\n instance._post_init()\n return instance\n\n def _post_init(self):\n self.original_budget = self.budget\n self.system_message = SYSTEM_MESSAGE.format(\n name=self.name,\n desire_desc=DESIRE_DESC[self.desire],\n )\n self._parse_llm()\n self.dialogue_history += [\n SystemMessage(content=self.system_message), \n AIMessage(content='')\n ]\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n\n def _parse_llm(self):\n if 'gpt-' in self.model_name:\n self.llm = ChatOpenAI(model=self.model_name, temperature=self.temperature, max_retries=30, request_timeout=1200)\n elif 'claude' in self.model_name:\n self.llm = ChatAnthropic(model=self.model_name, temperature=self.temperature, default_request_timeout=1200)\n elif 'bison' in self.model_name:\n self.llm = ChatGooglePalm(model_name=f'models/{self.model_name}', temperature=self.temperature)\n elif 'rule' in self.model_name or 'human' in self.model_name:\n self.llm = None\n else:\n raise NotImplementedError(self.model_name)\n \n # def _rotate_openai_org(self):\n # # use two organizations to avoid rate limit\n # if os.environ.get('OPENAI_ORGANIZATION_1') and os.environ.get('OPENAI_ORGANIZATION_2'):\n # return random.choice([os.environ.get('OPENAI_ORGANIZATION_1'), os.environ.get('OPENAI_ORGANIZATION_2')])\n # else:\n # return None\n \n def _run_llm_standalone(self, messages: list):\n \n with get_openai_callback() as cb:\n for i in range(6):\n try:\n input_token_num = self.llm.get_num_tokens_from_messages(messages)\n if 'claude' in self.model_name: # anthropic's claude\n result = self.llm(messages, max_tokens_to_sample=2048)\n elif 'bison' in self.model_name: # google's palm-2\n max_tokens = min(max(3900 - input_token_num, 192), 2048)\n if isinstance(self.llm, ChatVertexAI):\n result = self.llm(messages, max_output_tokens=max_tokens)\n else:\n result = self.llm(messages)\n elif 'gpt' in self.model_name: # openai\n if 'gpt-3.5-turbo' in self.model_name and '16k' not in self.model_name:\n max_tokens = max(3900 - input_token_num, 192)\n else:\n # gpt-4\n # self.llm.openai_organization = self._rotate_openai_org()\n max_tokens = max(8000 - input_token_num, 192)\n result = self.llm(messages, max_tokens=max_tokens)\n elif 'llama' in self.model_name.lower():\n raise NotImplementedError\n else:\n raise NotImplementedError\n break\n except:\n print(f'Retrying for {self.model_name} ({i+1}/6), wait for {2**(i+1)} sec...')\n time.sleep(2**(i+1))\n self.openai_cost += cb.total_cost\n self.llm_token_count = self.llm.get_num_tokens_from_messages(messages)\n return result.content\n\n def _get_estimated_value(self, item):\n value = item.true_value * (1 + self.overestimate_percent / 100)\n return int(value)\n \n def _get_cur_item(self, key=None):\n if self.cur_item_id < len(self.items):\n if key is not None:\n return self.items[self.cur_item_id].__dict__[key]\n else:\n return self.items[self.cur_item_id]\n else:\n return 'no item left'\n \n def _get_next_item(self, key=None):\n if self.cur_item_id + 1 < len(self.items):\n if key is not None:\n return self.items[self.cur_item_id + 1].__dict__[key]\n else:\n return self.items[self.cur_item_id + 1]\n else:\n return 'no item left'\n \n def _get_remaining_items(self, as_str=False):\n remain_items = self.items[self.cur_item_id + 1:]\n if as_str:\n return ', '.join([item.name for item in remain_items])\n else:\n return remain_items\n \n def _get_items_value_str(self, items: List[Item]):\n if not isinstance(items, list):\n items = [items]\n items_info = ''\n for i, item in enumerate(items):\n estimated_value = self._get_estimated_value(item)\n _info = f\"{i+1}. {item}, starting price is ${item.price}. Your estimated value for this item is ${estimated_value}.\\n\"\n items_info += _info\n return items_info.strip()\n \n # ********** Main Instructions and Functions ********** #\n \n def learn_from_prev_auction(self, past_learnings, past_auction_log):\n if not self.enable_learning or 'rule' in self.model_name or 'human' in self.model_name:\n return ''\n \n instruct_learn = INSTRUCT_LEARNING_TEMPLATE.format(\n past_auction_log=past_auction_log,\n past_learnings=past_learnings)\n\n result = self._run_llm_standalone([HumanMessage(content=instruct_learn)])\n self.dialogue_history += [\n HumanMessage(content=instruct_learn),\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in [HumanMessage(content=instruct_learn)]],\n 'result': result,\n 'tag': 'learn_0'\n })\n \n self.learnings = '\\n'.join(extract_numbered_list(result))\n if self.learnings != '':\n self.system_message += f\"\\n\\nHere are your key learning points and practical tips from a previous auction. You can use them to guide this auction:\\n```\\n{self.learnings}\\n```\"\n \n if self.verbose:\n print(f\"Learn from previous auction: {self.name} ({self.model_name}).\")\n return result\n\n def _choose_items(self, budget, items: List[Item]):\n '''\n Choose items within budget for rule bidders.\n Cheap ones first if maximize_items, expensive ones first if maximize_profit.\n '''\n sorted_items = sorted(items, key=lambda x: self._get_estimated_value(x), \n reverse=self.desire == 'maximize_profit')\n \n chosen_items = []\n i = 0\n while budget >= 0 and i < len(sorted_items):\n item = sorted_items[i]\n if item.price <= budget:\n chosen_items.append(item)\n budget -= item.price\n i += 1\n \n return chosen_items\n \n def get_plan_instruct(self, items: List[Item]):\n self.items = items\n plan_instruct = INSTRUCT_PLAN_TEMPLATE.format(\n bidder_name=self.name, \n budget=self.budget, \n item_num=len(items), \n items_info=self._get_items_value_str(items), \n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n return plan_instruct\n \n def init_plan(self, plan_instruct: str):\n '''\n Plan for bidding with auctioneer's instruction and items information for customize estimated value.\n plan = plan(system_message, instruct_plan)\n '''\n if 'rule' in self.model_name: \n # self.cur_plan = ', '.join([x.name for x in self._choose_items(self.budget, self.items)])\n # self.dialogue_history += [\n # HumanMessage(content=plan_instruct),\n # AIMessage(content=self.cur_plan),\n # ]\n # return self.cur_plan\n return ''\n\n self.status_quo = {\n 'remaining_budget': self.budget,\n 'total_profits': {bidder: 0 for bidder in self.all_bidders_status.keys()},\n 'winning_bids': {bidder: {} for bidder in self.all_bidders_status.keys()},\n }\n\n if self.plan_strategy == 'none':\n self.plan_instruct = ''\n self.cur_plan = ''\n return None\n\n system_msg = SystemMessage(content=self.system_message)\n plan_msg = HumanMessage(content=plan_instruct)\n messages = [system_msg, plan_msg]\n result = self._run_llm_standalone(messages)\n \n if self.verbose:\n print(get_colored_text(plan_msg.content, 'red'))\n print(get_colored_text(result, 'green'))\n \n self.dialogue_history += [\n plan_msg,\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': 'plan_0'\n })\n self.cur_plan = result\n self.plan_instruct = plan_instruct\n \n self.changes_of_plan.append([\n f\"{self.cur_item_id} (Initial)\", \n False, \n json.dumps(extract_jsons_from_text(result)[-1]),\n ])\n \n if self.verbose:\n print(f\"Plan: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n return result\n \n def get_rebid_instruct(self, auctioneer_msg: str):\n self.dialogue_history += [\n HumanMessage(content=auctioneer_msg),\n AIMessage(content='')\n ]\n return auctioneer_msg\n\n def get_bid_instruct(self, auctioneer_msg: str, bid_round: int):\n auctioneer_msg = auctioneer_msg.replace(self.name, f'You ({self.name})')\n \n bid_instruct = INSTRUCT_BID_TEMPLATE.format(\n auctioneer_msg=auctioneer_msg, \n bidder_name=self.name,\n cur_item=self._get_cur_item(),\n estimated_value=self._get_estimated_value(self._get_cur_item()),\n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n if bid_round == 0:\n if self.plan_strategy in ['static', 'none']:\n # if static planner, then no replanning is needed. status quo is updated in replanning. thus need to add status quo in bid instruct.\n bid_instruct = f\"\"\"The status quo of this auction so far is:\\n\"{json.dumps(self.status_quo, indent=4)}\"\\n\\n{bid_instruct}\\n---\\n\"\"\"\n else:\n bid_instruct = f'Now, the auctioneer says: \"{auctioneer_msg}\"'\n \n self.dialogue_history += [\n HumanMessage(content=bid_instruct),\n AIMessage(content='')\n ]\n return bid_instruct\n \n def bid_rule(self, cur_bid: int, min_markup_pct: float = 0.1):\n '''\n :param cur_bid: current highest bid\n :param min_markup_pct: minimum percentage for bid increase\n :param max_bid_cnt: maximum number of bids on one item (K = 1 starting bid + K-1 increase bid)\n '''\n # dialogue history already got bid_instruction.\n cur_item = self._get_cur_item()\n \n if cur_bid <= 0:\n next_bid = cur_item.price\n else:\n next_bid = cur_bid + min_markup_pct * cur_item.price\n \n if self.budget - next_bid >= 0 and self.rule_bid_cnt < self.max_bid_cnt:\n msg = int(next_bid)\n self.rule_bid_cnt += 1\n else:\n msg = -1\n \n content = f'The current highest bid for {cur_item.name} is ${cur_bid}. '\n content += \"I'm out!\" if msg < 0 else f\"I bid ${msg}! (Rule generated)\"\n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=content)\n ]\n \n return msg\n \n def bid(self, bid_instruct):\n '''\n Bid for an item with auctioneer's instruction and bidding history.\n bid_history = bid(system_message, instruct_plan, plan, bid_history)\n '''\n if self.model_name == 'rule':\n return ''\n \n bid_msg = HumanMessage(content=bid_instruct)\n \n if self.plan_strategy == 'none':\n messages = [SystemMessage(content=self.system_message)]\n else:\n messages = [SystemMessage(content=self.system_message),\n HumanMessage(content=self.plan_instruct),\n AIMessage(content=self.cur_plan)]\n \n self.bid_history += [bid_msg]\n messages += self.bid_history\n \n result = self._run_llm_standalone(messages)\n \n self.bid_history += [AIMessage(content=result)]\n\n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=result)\n ]\n \n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': f'bid_{self.cur_item_id}'\n })\n \n if self.verbose:\n print(get_colored_text(bid_instruct, 'yellow'))\n print(get_colored_text(result, 'green'))\n \n print(f\"Bid: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n self.total_bid_cnt += 1\n \n return result\n\n def get_summarize_instruct(self, bidding_history: str, hammer_msg: str, win_lose_msg: str):\n instruct = INSTRUCT_SUMMARIZE_TEMPLATE.format(\n cur_item=self._get_cur_item(), \n bidding_history=bidding_history, \n hammer_msg=hammer_msg.strip(), \n win_lose_msg=win_lose_msg.strip(), \n bidder_name=self.name,\n prev_status=self._status_json_to_text(self.status_quo),\n )\n return instruct\n\n def summarize(self, instruct_summarize: str):\n '''\n Update belief/status quo\n status_quo = summarize(system_message, bid_history, prev_status + instruct_summarize)\n '''\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n \n if self.model_name == 'rule': \n self.rule_bid_cnt = 0 # reset bid count for rule bidder\n return ''\n \n messages = [SystemMessage(content=self.system_message)]\n # messages += self.bid_history\n summ_msg = HumanMessage(content=instruct_summarize)\n messages.append(summ_msg)\n\n status_quo_text = self._run_llm_standalone(messages)\n \n self.dialogue_history += [summ_msg, AIMessage(content=status_quo_text)]\n self.bid_history += [summ_msg, AIMessage(content=status_quo_text)]\n \n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': status_quo_text,\n 'tag': f'summarize_{self.cur_item_id}'\n })\n\n cnt = 0\n while cnt <= 3:\n sanity_msg = self._sanity_check_status_json(extract_jsons_from_text(status_quo_text)[-1])\n if sanity_msg == '':\n # pass sanity check then track beliefs\n consistency_msg = self._belief_tracking(status_quo_text)\n else:\n sanity_msg = f'- {sanity_msg}'\n consistency_msg = ''\n \n if sanity_msg != '' or (consistency_msg != '' and self.correct_belief):\n err_msg = f\"As {self.name}, here are some error(s) of your summary of the status JSON:\\n{sanity_msg.strip()}\\n{consistency_msg.strip()}\\n\\nPlease revise the status JSON based on the errors. Don't apologize. Just give me the revised status JSON.\".strip()\n \n # print(f\"{self.name}: revising status quo for the {cnt} time:\")\n # print(get_colored_text(err_msg, 'green'))\n # print(get_colored_text(status_quo_text, 'red'))\n \n messages += [AIMessage(content=status_quo_text), \n HumanMessage(content=err_msg)]\n status_quo_text = self._run_llm_standalone(messages)\n self.dialogue_history += [\n HumanMessage(content=err_msg),\n AIMessage(content=status_quo_text),\n ]\n cnt += 1\n else:\n break\n \n self.status_quo = extract_jsons_from_text(status_quo_text)[-1]\n\n if self.verbose:\n print(get_colored_text(instruct_summarize, 'blue'))\n print(get_colored_text(status_quo_text, 'green'))\n \n print(f\"Summarize: {self.name} ({self.model_name}) for {self._get_cur_item()}.\")\n \n return status_quo_text\n \n def get_replan_instruct(self):\n instruct = INSTRUCT_REPLAN_TEMPLATE.format(\n status_quo=self._status_json_to_text(self.status_quo),\n remaining_items_info=self._get_items_value_str(self._get_remaining_items()),\n bidder_name=self.name,\n desire_desc=DESIRE_DESC[self.desire],\n learning_statement='' if not self.enable_learning else _LEARNING_STATEMENT\n )\n return instruct\n\n def replan(self, instruct_replan: str):\n '''\n plan = replan(system_message, instruct_plan, prev_plan, status_quo + (learning) + instruct_replan)\n '''\n if self.model_name == 'rule': \n self.withdraw = False\n self.cur_item_id += 1\n return ''\n \n if self.plan_strategy in ['none', 'static']:\n self.bid_history = [] # clear bid history\n self.cur_item_id += 1\n self.withdraw = False\n return 'Skip replanning for bidders with static or no plan.'\n \n replan_msg = HumanMessage(content=instruct_replan)\n \n messages = [SystemMessage(content=self.system_message),\n HumanMessage(content=self.plan_instruct),\n AIMessage(content=self.cur_plan)]\n messages.append(replan_msg)\n\n result = self._run_llm_standalone(messages)\n \n new_plan_dict = extract_jsons_from_text(result)[-1]\n cnt = 0\n while len(new_plan_dict) == 0 and cnt < 2:\n err_msg = 'Your response does not contain a JSON-format priority list for items. Please revise your plan.'\n messages += [\n AIMessage(content=result),\n HumanMessage(content=err_msg),\n ]\n result = self._run_llm_standalone(messages)\n new_plan_dict = extract_jsons_from_text(result)[-1]\n \n self.dialogue_history += [\n HumanMessage(content=err_msg),\n AIMessage(content=result),\n ]\n cnt += 1\n \n old_plan_dict = extract_jsons_from_text(self.cur_plan)[-1]\n self.changes_of_plan.append([\n f\"{self.cur_item_id + 1} ({self._get_cur_item('name')})\", \n self._change_of_plan(old_plan_dict, new_plan_dict),\n json.dumps(new_plan_dict)\n ])\n \n self.plan_instruct = instruct_replan\n self.cur_plan = result\n self.withdraw = False\n self.bid_history = [] # clear bid history\n self.cur_item_id += 1\n\n self.dialogue_history += [\n replan_msg,\n AIMessage(content=result),\n ]\n self.llm_prompt_history.append({\n 'messages': [{x.type: x.content} for x in messages],\n 'result': result,\n 'tag': f'plan_{self.cur_item_id}'\n })\n \n if self.verbose:\n print(get_colored_text(instruct_replan, 'blue'))\n print(get_colored_text(result, 'green'))\n\n print(f\"Replan: {self.name} ({self.model_name}).\")\n return result\n \n def _change_of_plan(self, old_plan: dict, new_plan: dict):\n for k in new_plan:\n if new_plan[k] != old_plan.get(k, None):\n return True\n return False\n \n # *********** Belief Tracking and Sanity Check *********** #\n \n def bid_sanity_check(self, bid_price, prev_round_max_bid, min_markup_pct):\n # can't bid more than budget or less than previous highest bid\n if bid_price < 0:\n msg = None\n else:\n min_bid_increase = int(min_markup_pct * self._get_cur_item('price'))\n if bid_price > self.budget:\n msg = f\"you don't have insufficient budget (${self.budget} left)\"\n elif bid_price < self._get_cur_item('price'):\n msg = f\"your bid is lower than the starting bid (${self._get_cur_item('price')})\"\n elif bid_price < prev_round_max_bid + min_bid_increase:\n msg = f\"you must advance previous highest bid (${prev_round_max_bid}) by at least ${min_bid_increase} ({int(100 * min_markup_pct)}%).\"\n else:\n msg = None\n return msg\n\n def rebid_for_failure(self, fail_instruct: str):\n result = self.bid(fail_instruct)\n self.failed_bid_cnt += 1\n return result\n \n def _sanity_check_status_json(self, data: dict):\n if data == {}:\n return \"Error: No parsible JSON in your response. Possibly due to missing a closing curly bracket '}', or unpasible values (e.g., 'profit': 1000 + 400, instead of 'profit': 1400).\"\n\n # Check if all expected top-level keys are present\n expected_keys = [\"remaining_budget\", \"total_profits\", \"winning_bids\"]\n for key in expected_keys:\n if key not in data:\n return f\"Error: Missing '{key}' field in the status JSON.\"\n\n # Check if \"remaining_budget\" is a number\n if not isinstance(data[\"remaining_budget\"], (int, float)):\n return \"Error: 'remaining_budget' should be a number, and only about your remaining budget.\"\n\n # Check if \"total_profits\" is a dictionary with numbers as values\n if not isinstance(data[\"total_profits\"], dict):\n return \"Error: 'total_profits' should be a dictionary of every bidder.\"\n for bidder, profit in data[\"total_profits\"].items():\n if not isinstance(profit, (int, float)):\n return f\"Error: Profit for {bidder} should be a number.\"\n\n # Check if \"winning_bids\" is a dictionary and that each bidder's entry is a dictionary with numbers\n if not isinstance(data[\"winning_bids\"], dict):\n return \"Error: 'winning_bids' should be a dictionary.\"\n for bidder, bids in data[\"winning_bids\"].items():\n if not isinstance(bids, dict):\n return f\"Error: Bids for {bidder} should be a dictionary.\"\n for item, amount in bids.items():\n if not isinstance(amount, (int, float)):\n return f\"Error: Amount for {item} under {bidder} should be a number.\"\n\n # If everything is fine\n return \"\"\n \n def _status_json_to_text(self, data: dict):\n if 'rule' in self.model_name: return ''\n \n # Extract and format remaining budget\n structured_text = f\"* Remaining Budget: ${data.get('remaining_budget', 'unknown')}\\n\\n\"\n \n # Extract and format total profits for each bidder\n structured_text += \"* Total Profits:\\n\"\n if data.get('total_profits'):\n for bidder, profit in data['total_profits'].items():\n structured_text += f\" * {bidder}: ${profit}\\n\"\n \n # Extract and list the winning bids for each item by each bidder\n structured_text += \"\\n* Winning Bids:\\n\"\n if data.get('winning_bids'):\n for bidder, bids in data['winning_bids'].items():\n structured_text += f\" * {bidder}:\\n\"\n if bids:\n for item, amount in bids.items():\n structured_text += f\" * {item}: ${amount}\\n\"\n else:\n structured_text += f\" * No winning bids\\n\"\n \n return structured_text.strip()\n\n def _belief_tracking(self, status_text: str):\n '''\n Parse status quo and check if the belief is correct.\n '''\n belief_json = extract_jsons_from_text(status_text)[-1]\n # {\"remaining_budget\": 8000, \"total_profits\": {\"Bidder 1\": 1300, \"Bidder 2\": 1800, \"Bidder 3\": 0}, \"winning_bids\": {\"Bidder 1\": {\"Item 2\": 1200, \"Item 3\": 1000}, \"Bidder 2\": {\"Item 1\": 2000}, \"Bidder 3\": {}}}\n budget_belief = belief_json['remaining_budget']\n profits_belief = belief_json['total_profits']\n winning_bids = belief_json['winning_bids']\n\n msg = ''\n # track belief of budget\n self.total_self_belief_cnt += 1\n if budget_belief != self.budget:\n msg += f'- Your belief of budget is wrong: you have ${self.budget} left, but you think you have ${budget_belief} left.\\n'\n self.self_belief_error_cnt += 1\n self.budget_error_history.append([\n self._get_cur_item('name'),\n budget_belief,\n self.budget,\n ])\n \n # track belief of profits\n for bidder_name, profit in profits_belief.items():\n if self.all_bidders_status.get(bidder_name) is None:\n # due to a potentially unreasonable parsing\n continue\n \n if self.name in bidder_name: \n bidder_name = self.name\n self.total_self_belief_cnt += 1\n else:\n self.total_other_belief_cnt += 1\n \n real_profit = self.all_bidders_status[bidder_name]['profit']\n \n if profit != real_profit:\n if self.name == bidder_name:\n self.self_belief_error_cnt += 1\n else:\n self.other_belief_error_cnt += 1\n\n msg += f'- Your belief of total profit of {bidder_name} is wrong: {bidder_name} has earned ${real_profit} so far, but you think {bidder_name} has earned ${profit}.\\n'\n\n # add to history\n self.profit_error_history.append([\n f\"{bidder_name} ({self._get_cur_item('name')})\",\n profit,\n real_profit\n ])\n\n # track belief of winning bids\n for bidder_name, items_won_dict in winning_bids.items():\n if self.all_bidders_status.get(bidder_name) is None:\n # due to a potentially unreasonable parsing\n continue\n\n real_items_won = self.all_bidders_status[bidder_name]['items_won']\n # items_won = [(item, bid_price), ...)]\n \n items_won_list = list(items_won_dict.keys())\n real_items_won_list = [str(x) for x, _ in real_items_won]\n \n if self.name in bidder_name:\n self.total_self_belief_cnt += 1\n else:\n self.total_other_belief_cnt += 1\n \n if not item_list_equal(items_won_list, real_items_won_list):\n if bidder_name == self.name:\n self.self_belief_error_cnt += 1\n _bidder_name = f'you'\n else:\n self.other_belief_error_cnt += 1\n _bidder_name = bidder_name\n \n msg += f\"- Your belief of winning items of {bidder_name} is wrong: {bidder_name} won {real_items_won}, but you think {bidder_name} won {items_won_dict}.\\n\"\n\n self.win_bid_error_history.append([\n f\"{_bidder_name} ({self._get_cur_item('name')})\",\n ', '.join(items_won_list),\n ', '.join(real_items_won_list)\n ])\n \n return msg\n \n def win_bid(self, item: Item, bid: int):\n self.budget -= bid\n self.profit += item.true_value - bid\n self.items_won += [[item, bid]]\n msg = f\"Congratuations! You won {item} at ${bid}.\"# Now you have ${self.budget} left. Your total profit so far is ${self.profit}.\"\n return msg\n \n def lose_bid(self, item: Item):\n return f\"You lost {item}.\"# Now, you have ${self.budget} left. Your total profit so far is ${self.profit}.\"\n \n # set the profit information of other bidders\n def set_all_bidders_status(self, all_bidders_status: dict):\n self.all_bidders_status = all_bidders_status.copy()\n\n def set_withdraw(self, bid: int):\n if bid < 0: # withdraw\n self.withdraw = True\n elif bid == 0: # enable discount and bid again\n self.withdraw = False\n else: # normal bid\n self.withdraw = False\n self.engagement_count += 1\n self.engagement_history[self._get_cur_item('name')] += 1\n \n # ****************** Logging ****************** #\n \n # def _parse_hedging(self, plan: str): # deprecated\n # prompt = PARSE_HEDGE_INSTRUCTION.format(\n # item_name=self._get_cur_item(), \n # plan=plan)\n \n # with get_openai_callback() as cb:\n # llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0)\n # result = llm([HumanMessage(content=prompt)]).content\n # self.openai_cost += cb.total_cost\n # # parse a number, which could be a digit\n # hedge_percent = re.findall(r'\\d+\\.?\\d*%', result)\n # if len(hedge_percent) > 0:\n # hedge_percent = hedge_percent[0].replace('%', '')\n # else:\n # hedge_percent = 0\n # return float(hedge_percent)\n \n def profit_report(self):\n '''\n Personal profit report at the end of an auction.\n '''\n msg = f\"* {self.name}, starting with ${self.original_budget}, has won {len(self.items_won)} items in this auction, with a total profit of ${self.profit}.:\\n\"\n profit = 0\n for item, bid in self.items_won:\n profit += item.true_value - bid\n msg += f\" * Won {item} at ${bid} over ${item.price}, with a true value of ${item.true_value}.\\n\"\n return msg.strip()\n \n def to_monitors(self, as_json=False):\n # budget, profit, items_won, tokens\n if len(self.items_won) == 0 and not as_json: \n items_won = [['', 0, 0]]\n else:\n items_won = []\n for item, bid in self.items_won:\n items_won.append([str(item), bid, item.true_value])\n \n profit_error_history = self.profit_error_history if self.profit_error_history != [] or as_json else [['', '', '']]\n win_bid_error_history = self.win_bid_error_history if self.win_bid_error_history != [] or as_json else [['', '', '']]\n budget_error_history = self.budget_error_history if self.budget_error_history != [] or as_json else [['', '']]\n changes_of_plan = self.changes_of_plan if self.changes_of_plan != [] or as_json else [['', '', '']]\n \n if as_json:\n return {\n 'auction_hash': self.auction_hash,\n 'bidder_name': self.name,\n 'model_name': self.model_name,\n 'desire': self.desire,\n 'plan_strategy': self.plan_strategy,\n 'overestimate_percent': self.overestimate_percent,\n 'temperature': self.temperature,\n 'correct_belief': self.correct_belief,\n 'enable_learning': self.enable_learning,\n 'budget': self.original_budget,\n 'money_left': self.budget,\n 'profit': self.profit,\n 'items_won': items_won,\n 'tokens_used': self.llm_token_count,\n 'openai_cost': round(self.openai_cost, 2),\n 'failed_bid_cnt': self.failed_bid_cnt,\n 'self_belief_error_cnt': self.self_belief_error_cnt,\n 'other_belief_error_cnt': self.other_belief_error_cnt,\n 'failed_bid_rate': round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2),\n 'self_error_rate': round(self.self_belief_error_cnt / (self.total_self_belief_cnt+1e-8), 2),\n 'other_error_rate': round(self.other_belief_error_cnt / (self.total_other_belief_cnt+1e-8), 2),\n 'engagement_count': self.engagement_count,\n 'engagement_history': self.engagement_history,\n 'changes_of_plan': changes_of_plan,\n 'budget_error_history': budget_error_history,\n 'profit_error_history': profit_error_history,\n 'win_bid_error_history': win_bid_error_history,\n 'history': self.llm_prompt_history\n }\n else:\n return [\n self.budget, \n self.profit, \n items_won, \n self.llm_token_count, \n round(self.openai_cost, 2), \n round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2), \n round(self.self_belief_error_cnt / (self.total_self_belief_cnt+1e-8), 2), \n round(self.other_belief_error_cnt / (self.total_other_belief_cnt+1e-8), 2), \n self.engagement_count,\n draw_plot(f\"{self.name} ({self.model_name})\", self.budget_history, self.profit_history), \n changes_of_plan,\n budget_error_history,\n profit_error_history, \n win_bid_error_history\n ]\n\n def dialogue_to_chatbot(self):\n # chatbot: [[Human, AI], [], ...]\n # only dialogue will be sent to LLMs. chatbot is just for display.\n assert len(self.dialogue_history) % 2 == 0\n chatbot = []\n for i in range(0, len(self.dialogue_history), 2):\n # if exceeds the length of dialogue, append the last message\n human_msg = self.dialogue_history[i].content\n ai_msg = self.dialogue_history[i+1].content\n if ai_msg == '': ai_msg = None\n if human_msg == '': human_msg = None\n chatbot.append([human_msg, ai_msg])\n return chatbot" }, { "identifier": "HumanBidder", "path": "src/human_bidder.py", "snippet": "class HumanBidder(Bidder):\n name: str\n human_name: str = \"Adam\"\n budget: int\n auction_hash: str\n \n cur_item_id = 0\n items: list = []\n withdraw: bool = False\n \n engagement_count: int = 0\n original_budget: int = 0\n profit: int = 0\n items_won = []\n \n all_bidders_status = {} # track others' profit\n \n # essential for demo\n need_input: bool = False\n semaphore: int = 0 # if needs input, then semaphore is set as 1, else waits.\n input_box: str = None # global variable for accepting user input\n \n # not used\n model_name: str = 'human'\n openai_cost = 0\n desire = ''\n plan_strategy = ''\n correct_belief = True\n \n class Config:\n arbitrary_types_allowed = True\n \n def get_plan_instruct(self, items: List[Item]):\n self.items = items\n plan_instruct = \"As {bidder_name}, you have a total budget of ${budget}. This auction has a total of {item_num} items to be sequentially presented, they are:\\n{items_info}\".format(\n bidder_name=self.name, \n budget=self.budget, \n item_num=len(items), \n items_info=self._get_items_value_str(items)\n )\n return plan_instruct\n \n def init_plan(self, plan_instruct: str):\n # Human = auctioneer, AI = bidder\n self.dialogue_history += [\n HumanMessage(content=plan_instruct),\n AIMessage(content='(Getting ready...)')\n ]\n return ''\n \n def get_bid_instruct(self, auctioneer_msg, bid_round):\n self.dialogue_history += [\n HumanMessage(content=auctioneer_msg), \n AIMessage(content='')\n ]\n return auctioneer_msg\n \n def bid(self, bid_instruct):\n # wait for the cue to handle user input\n while self.semaphore <= 0:\n time.sleep(1)\n \n self.dialogue_history += [\n HumanMessage(content=''),\n AIMessage(content=self.input_box)\n ]\n self.semaphore -= 1\n self.need_input = False\n return self.input_box\n \n def get_summarize_instruct(self, bidding_history: str, hammer_msg: str, win_lose_msg: str):\n instruct_summarize = f\"{bidding_history}\\n\\n{hammer_msg}\\n{win_lose_msg}\"\n return instruct_summarize\n \n def summarize(self, instruct_summarize: str):\n self.dialogue_history += [\n HumanMessage(content=instruct_summarize),\n AIMessage(content='(Taking notes...)')\n ]\n self.budget_history.append(self.budget)\n self.profit_history.append(self.profit)\n return ''\n \n def get_replan_instruct(self):\n return ''\n\n def replan(self, instruct_replan):\n self.withdraw = False\n self.cur_item_id += 1\n return ''\n \n def to_monitors(self, as_json=False):\n items_won = []\n for item, bid in self.items_won:\n items_won.append([str(item), bid, item.true_value])\n if as_json:\n return {\n 'auction_hash': self.auction_hash,\n 'bidder_name': self.name,\n 'human_name': self.human_name,\n 'model_name': self.model_name,\n 'budget': self.original_budget,\n 'money_left': self.budget,\n 'profit': self.profit,\n 'items_won': items_won,\n 'engagement_count': self.engagement_count,\n }\n else:\n return [\n self.budget, \n self.profit, \n items_won, \n 0, \n 0, \n round(self.failed_bid_cnt / (self.total_bid_cnt+1e-8), 2), \n 0, \n 0, \n self.engagement_count,\n draw_plot(f\"{self.name} ({self.model_name})\", self.budget_history, self.profit_history), \n [],\n [],\n [], \n []\n ]" }, { "identifier": "Auctioneer", "path": "src/auctioneer_base.py", "snippet": "class Auctioneer(BaseModel):\n enable_discount: bool = False\n items: List[Item] = []\n cur_item: Item = None\n highest_bidder: Bidder = None\n highest_bid: int = -1\n bidding_history = defaultdict(list) # history about the bidding war of one item\n items_queue: List[Item] = [] # updates when a item is taken.\n auction_logs = defaultdict(list) # history about the bidding war of all items\n openai_cost = 0\n prev_round_max_bid: int = -1\n min_bid: int = 0\n fail_to_sell = False\n min_markup_pct = 0.1\n\n class Config:\n arbitrary_types_allowed = True\n \n def init_items(self, items: List[Item]):\n for item in items:\n # reset discounted price\n item.reset_price()\n self.items = items\n self.items_queue = items.copy()\n\n def summarize_items_info(self):\n desc = ''\n for item in self.items:\n desc += f\"- {item.get_desc()}\\n\"\n return desc.strip()\n \n def present_item(self):\n cur_item = self.items_queue.pop(0)\n self.cur_item = cur_item\n return cur_item\n \n def shuffle_items(self):\n random.shuffle(self.items)\n self.items_queue = self.items.copy()\n \n def record_bid(self, bid_info: dict, bid_round: int):\n '''\n Save the bidding history for each round, log the highest bidder and highest bidding\n '''\n # bid_info: {'bidder': xxx, 'bid': xxx, 'raw_msg': xxx}\n self.bidding_history[bid_round].append(bid_info)\n for hist in self.bidding_history[bid_round]:\n if hist['bid'] > 0:\n if self.highest_bid < hist['bid']:\n self.highest_bid = hist['bid']\n self.highest_bidder = hist['bidder']\n elif self.highest_bid == hist['bid']:\n # random if there's a tie\n self.highest_bidder = random.choice([self.highest_bidder, hist['bidder']])\n self.auction_logs[f\"{self.cur_item.get_desc()}\"].append(\n {'bidder': bid_info['bidder'], \n 'bid': bid_info['bid'], \n 'bid_round': bid_round})\n\n def _biddings_to_string(self, bid_round: int):\n '''\n Return a string that summarizes the bidding history in a round\n '''\n # bid_hist_text = '' if bid_round == 0 else f'- {self.highest_bidder}: ${self.highest_bid}\\n'\n bid_hist_text = ''\n for js in self.bidding_history[bid_round]:\n if js['bid'] < 0:\n bid_hist_text += f\"- {js['bidder']} withdrew\\n\"\n else:\n bid_hist_text += f\"- {js['bidder']}: ${js['bid']}\\n\"\n return bid_hist_text.strip()\n \n def all_bidding_history_to_string(self):\n bid_hist_text = ''\n for bid_round in self.bidding_history:\n bid_hist_text += f\"Round {bid_round}:\\n{self._biddings_to_string(bid_round)}\\n\\n\"\n return bid_hist_text.strip()\n\n def ask_for_bid(self, bid_round: int):\n '''\n Ask for bid, return the message to be sent to bidders\n '''\n if self.highest_bidder is None:\n if bid_round > 0:\n msg = f\"Seeing as we've had no takers at the initial price, we're going to lower the starting bid to ${self.cur_item.price} for {self.cur_item.name} to spark some interest! Do I have any takers?\"\n else:\n remaining_items = [self.cur_item.name] + [item.name for item in self.items_queue]\n msg = f\"Attention, bidders! {len(remaining_items)} item(s) left, they are: {', '.join(remaining_items)}.\\n\\nNow, please bid on {self.cur_item}. The starting price for bidding for {self.cur_item} is ${self.cur_item.price}. Anyone interested in this item?\"\n else:\n bidding_history = self._biddings_to_string(bid_round - 1)\n msg = f\"Thank you! This is the {p.ordinal(bid_round)} round of bidding for this item:\\n{bidding_history}\\n\\nNow we have ${self.highest_bid} from {self.highest_bidder.name} for {self.cur_item.name}. The minimum increase over this highest bid is ${int(self.cur_item.price * self.min_markup_pct)}. Do I have any advance on ${self.highest_bid}?\"\n return msg\n \n def ask_for_rebid(self, fail_msg: str, bid_price: int):\n return f\"Your bid of ${bid_price} failed, because {fail_msg}: You must reconsider your bid.\"\n\n def get_hammer_msg(self):\n if self.highest_bidder is None:\n return f\"Since no one bid on {self.cur_item.name}, we'll move on to the next item.\"\n else:\n return f\"Sold! {self.cur_item} to {self.highest_bidder} at ${self.highest_bid}! The true value for {self.cur_item} is ${self.cur_item.true_value}.\"# Thus {self.highest_bidder}'s profit by winning this item is ${self.cur_item.true_value - self.highest_bid}.\"\n\n def check_hammer(self, bid_round: int):\n # check if the item is sold\n self.fail_to_sell = False\n num_bid = self._num_bids_in_round(bid_round)\n\n # highest_bidder has already been updated in record_bid().\n # so when num_bid == 0 & highest_bidder is None, it means no one bid on this item\n if self.highest_bidder is None:\n if num_bid == 0:\n # failed to sell, as there is no highest bidder\n self.fail_to_sell = True\n if self.enable_discount and bid_round < 3:\n # lower the starting price by 50%. discoutn only applies to the first 3 rounds\n self.cur_item.lower_price(0.5)\n is_sold = False\n else:\n is_sold = True\n else:\n # won't happen\n raise ValueError(f\"highest_bidder is None but num_bid is {num_bid}\")\n else:\n if self.prev_round_max_bid < 0 and num_bid == 1:\n # only one bidder in the first round \n is_sold = True\n else:\n self.prev_round_max_bid = self.highest_bid\n is_sold = self._num_bids_in_round(bid_round) == 0\n return is_sold\n \n def _num_bids_in_round(self, bid_round: int):\n # check if there is no bid in the current round\n cnt = 0\n for hist in self.bidding_history[bid_round]:\n if hist['bid'] > 0:\n cnt += 1\n return cnt\n\n def hammer_fall(self):\n print(f'* Sold! {self.cur_item} (${self.cur_item.true_value}) goes to {self.highest_bidder} at ${self.highest_bid}.')\n self.auction_logs[f\"{self.cur_item.get_desc()}\"].append({\n 'bidder': self.highest_bidder, \n 'bid': f\"{self.highest_bid} (${self.cur_item.true_value})\", # no need for the first $, as it will be added in the self.log()\n 'bid_round': 'Hammer price (true value)'})\n self.cur_item = None\n self.highest_bidder = None\n self.highest_bid = -1\n self.bidding_history = defaultdict(list)\n self.prev_round_max_bid = -1\n self.fail_to_sell = False\n\n def end_auction(self):\n return len(self.items_queue) == 0\n \n def gather_all_status(self, bidders: List[Bidder]):\n status = {}\n for bidder in bidders:\n status[bidder.name] = {\n 'profit': bidder.profit, \n 'items_won': bidder.items_won\n }\n return status\n\n def parse_bid(self, text: str):\n prompt = PARSE_BID_INSTRUCTION.format(response=text)\n with get_openai_callback() as cb:\n llm = ChatOpenAI(model='gpt-3.5-turbo-0613', temperature=0)\n result = llm([HumanMessage(content=prompt)]).content\n self.openai_cost += cb.total_cost\n \n bid_number = re.findall(r'\\$?\\d+', result.replace(',', ''))\n # find number in the result\n if '-1' in result:\n return -1\n elif len(bid_number) > 0:\n return int(bid_number[-1].replace('$', ''))\n else:\n print('* Rebid:', text)\n return None\n\n def log(self, bidder_personal_reports: list = [], show_model_name=True):\n ''' example\n Apparatus H, starting at $1000.\n\n 1st bid:\n Bidder 1 (gpt-3.5-turbo-16k-0613): $1200\n Bidder 2 (gpt-3.5-turbo-16k-0613): $1100\n Bidder 3 (gpt-3.5-turbo-16k-0613): Withdrawn\n Bidder 4 (gpt-3.5-turbo-16k-0613): $1200\n \n 2nd bid:\n Bidder 1 (gpt-3.5-turbo-16k-0613): Withdrawn\n Bidder 2 (gpt-3.5-turbo-16k-0613): Withdrawn\n \n Hammer price:\n Bidder 4 (gpt-3.5-turbo-16k-0613): $1200\n '''\n markdown_output = \"## Auction Log\\n\\n\"\n for i, (item, bids) in enumerate(self.auction_logs.items()):\n markdown_output += f\"### {i+1}. {item}\\n\\n\"\n cur_bid_round = -1\n for i, bid in enumerate(bids):\n if bid['bid_round'] != cur_bid_round:\n cur_bid_round = bid['bid_round']\n if isinstance(bid['bid_round'], int):\n markdown_output += f\"\\n#### {p.ordinal(bid['bid_round']+1)} bid:\\n\\n\"\n else:\n markdown_output += f\"\\n#### {bid['bid_round']}:\\n\\n\"\n bid_price = f\"${bid['bid']}\" if bid['bid'] != -1 else 'Withdrew'\n if isinstance(bid['bidder'], Bidder) or isinstance(bid['bidder'], HumanBidder):\n if show_model_name:\n markdown_output += f\"* {bid['bidder']} ({bid['bidder'].model_name}): {bid_price}\\n\"\n else:\n markdown_output += f\"* {bid['bidder']}: {bid_price}\\n\"\n else:\n markdown_output += f\"* None bid\\n\"\n markdown_output += \"\\n\"\n \n if len(bidder_personal_reports) != 0:\n markdown_output += f\"\\n## Personal Report\"\n for report in bidder_personal_reports:\n markdown_output += f\"\\n\\n{report}\"\n return markdown_output.strip()\n \n def finish_auction(self):\n self.auction_logs = defaultdict(list)\n self.cur_item = None\n self.highest_bidder = None\n self.highest_bid = -1\n self.bidding_history = defaultdict(list)\n self.items_queue = []\n self.items = []\n self.prev_round_max_bid = -1\n self.fail_to_sell = False\n self.min_bid = 0" }, { "identifier": "run_auction", "path": "auction_workflow.py", "snippet": "def run_auction(\n auction_hash: str, \n auctioneer: Auctioneer, \n bidder_list: List[Bidder], \n thread_num: int, \n yield_for_demo=True,\n log_dir=LOG_DIR,\n repeat_num=0,\n memo_file=None):\n \n # bidder_list[0].verbose=True\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n # ***************** Learn Round ****************\n for bidder in bidder_list:\n if bidder.enable_learning and memo_file:\n # if no prev memo file, then no need to learn.\n if os.path.exists(memo_file):\n with open(memo_file) as f:\n data = json.load(f)\n past_learnings = data['learnings'][bidder.name]\n past_auction_log = data['auction_log']\n bidder.learn_from_prev_auction(past_learnings, past_auction_log)\n \n # ***************** Plan Round *****************\n # init bidder profit\n bidder_profit_info = auctioneer.gather_all_status(bidder_list)\n for bidder in bidder_list:\n bidder.set_all_bidders_status(bidder_profit_info)\n\n plan_instructs = [bidder.get_plan_instruct(auctioneer.items) for bidder in bidder_list]\n\n bidding_multithread(bidder_list, plan_instructs, func_type='plan', thread_num=thread_num)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n bar = tqdm(total=len(auctioneer.items_queue), desc='Auction Progress')\n while not auctioneer.end_auction():\n cur_item = auctioneer.present_item()\n \n bid_round = 0\n while True:\n # ***************** Bid Round ***************** \n auctioneer_msg = auctioneer.ask_for_bid(bid_round)\n _bidder_list = []\n _bid_instruct_list = []\n # remove highest bidder and withdrawn bidders\n for bidder in bidder_list:\n if bidder is auctioneer.highest_bidder or bidder.withdraw:\n bidder.need_input = False\n continue\n else:\n bidder.need_input = True # enable input from demo\n instruct = bidder.get_bid_instruct(auctioneer_msg, bid_round)\n _bidder_list.append(bidder)\n _bid_instruct_list.append(instruct)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + enable_human_box(bidder_list)\n \n _msgs = bidding_multithread(_bidder_list, _bid_instruct_list, func_type='bid', thread_num=thread_num)\n\n for i, (msg, bidder) in enumerate(zip(_msgs, _bidder_list)):\n if bidder.model_name == 'rule':\n bid_price = bidder.bid_rule(auctioneer.prev_round_max_bid, auctioneer.min_markup_pct)\n else:\n bid_price = parse_bid_price(auctioneer, bidder, msg)\n\n # can't bid more than budget or less than previous highest bid\n while True:\n fail_msg = bidder.bid_sanity_check(bid_price, auctioneer.prev_round_max_bid, auctioneer.min_markup_pct)\n if fail_msg is None: \n break\n else:\n bidder.need_input = True # enable input from demo\n auctioneer_msg = auctioneer.ask_for_rebid(fail_msg=fail_msg, bid_price=bid_price)\n rebid_instruct = bidder.get_rebid_instruct(auctioneer_msg)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n msg = bidder.rebid_for_failure(rebid_instruct)\n bid_price = parse_bid_price(auctioneer, bidder, msg)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n bidder.set_withdraw(bid_price)\n auctioneer.record_bid({'bidder': bidder, 'bid': bid_price, 'raw_msg': msg}, bid_round)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n \n is_sold = auctioneer.check_hammer(bid_round)\n bid_round += 1\n if is_sold: \n break\n else:\n if auctioneer.fail_to_sell and auctioneer.enable_discount:\n for bidder in bidder_list:\n bidder.set_withdraw(0) # back in the game\n\n # ***************** Summarize ***************** \n summarize_instruct_list = []\n for bidder in bidder_list:\n if bidder is auctioneer.highest_bidder:\n win_lose_msg = bidder.win_bid(cur_item, auctioneer.highest_bid)\n else:\n win_lose_msg = bidder.lose_bid(cur_item)\n msg = bidder.get_summarize_instruct(\n bidding_history=auctioneer.all_bidding_history_to_string(),\n hammer_msg=auctioneer.get_hammer_msg(),\n win_lose_msg=win_lose_msg\n )\n summarize_instruct_list.append(msg)\n\n # record profit information of all bidders for each bidder\n # (not used in the auction, just for belief tracking evaluation)\n bidder_profit_info = auctioneer.gather_all_status(bidder_list)\n for bidder in bidder_list:\n bidder.set_all_bidders_status(bidder_profit_info)\n \n bidding_multithread(bidder_list, summarize_instruct_list, func_type='summarize', thread_num=thread_num)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n\n # ***************** Replan *****************\n if len(auctioneer.items_queue) > 0: # no need to replan if all items are sold\n replan_instruct_list = [bidder.get_replan_instruct(\n # bidding_history=auctioneer.all_bidding_history_to_string(), \n # hammer_msg=auctioneer.get_hammer_msg()\n ) for bidder in bidder_list]\n bidding_multithread(bidder_list, replan_instruct_list, func_type='replan', thread_num=thread_num)\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log()] + [disable_gr, disable_gr] + disable_all_box(bidder_list)\n\n auctioneer.hammer_fall()\n bar.update(1)\n\n total_cost = sum([b.openai_cost for b in bidder_list]) + auctioneer.openai_cost\n bidder_reports = [bidder.profit_report() for bidder in bidder_list]\n \n if yield_for_demo:\n chatbot_list = bidders_to_chatbots(bidder_list, profit_report=True)\n yield [bidder_list] + chatbot_list + monitor_all(bidder_list) + [auctioneer.log(bidder_reports) + f'\\n## Total Cost: ${total_cost}'] + [disable_gr, enable_gr] + disable_all_box(bidder_list)\n \n memo = {'auction_log': auctioneer.log(show_model_name=False),\n 'memo_text': bidder_reports,\n 'profit': {bidder.name: bidder.profit for bidder in bidder_list},\n 'total_cost': total_cost,\n 'learnings': {bidder.name: bidder.learnings for bidder in bidder_list},\n 'model_info': {bidder.name: bidder.model_name for bidder in bidder_list}}\n log_bidders(log_dir, auction_hash, bidder_list, repeat_num, memo)\n \n auctioneer.finish_auction()\n \n if not yield_for_demo:\n yield total_cost" }, { "identifier": "make_auction_hash", "path": "auction_workflow.py", "snippet": "def make_auction_hash():\n return str(int(time.time()))" }, { "identifier": "chunks", "path": "utils.py", "snippet": "def chunks(lst, n):\n \"\"\"Yield successive n-sized chunks from lst.\"\"\"\n for i in range(0, len(lst), n):\n yield lst[i : i + n]" }, { "identifier": "reset_state_list", "path": "utils.py", "snippet": "def reset_state_list(*states):\n empty = [None for _ in states[1:]]\n return [[]] + empty" } ]
import os import gradio as gr from app_modules.presets import * from app_modules.overwrites import * from app_modules.utils import * from src.item_base import create_items from src.bidder_base import Bidder from src.human_bidder import HumanBidder from src.auctioneer_base import Auctioneer from auction_workflow import run_auction, make_auction_hash from utils import chunks, reset_state_list
15,646
BIDDER_NUM = 4 items = create_items('data/items_demo.jsonl') def auction_loop_app(*args): global items bidder_list = args[0] # gr.State() -> session state items_id = args[1] os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '') os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '') thread_num = args[4] item_shuffle = args[5] enable_discount = args[6] min_markup_pct = args[7] args = args[8:] auction_hash = make_auction_hash() items_to_bid = [items[i] for i in items_id] auctioneer = Auctioneer(enable_discount=enable_discount, min_markup_pct=min_markup_pct) auctioneer.init_items(items_to_bid) if item_shuffle: auctioneer.shuffle_items() # must correspond to the order in app's parameters input_keys = [ 'chatbot', 'model_name', 'desire', 'plan_strategy', 'budget', 'correct_belief', 'enable_learning', 'temperature', 'overestimate_percent', ] # convert flatten list into a json list input_jsl = []
BIDDER_NUM = 4 items = create_items('data/items_demo.jsonl') def auction_loop_app(*args): global items bidder_list = args[0] # gr.State() -> session state items_id = args[1] os.environ['OPENAI_API_KEY'] = args[2] if args[2] != '' else os.environ.get('OPENAI_API_KEY', '') os.environ['ANTHROPIC_API_KEY'] = args[3] if args[3] != '' else os.environ.get('ANTHROPIC_API_KEY', '') thread_num = args[4] item_shuffle = args[5] enable_discount = args[6] min_markup_pct = args[7] args = args[8:] auction_hash = make_auction_hash() items_to_bid = [items[i] for i in items_id] auctioneer = Auctioneer(enable_discount=enable_discount, min_markup_pct=min_markup_pct) auctioneer.init_items(items_to_bid) if item_shuffle: auctioneer.shuffle_items() # must correspond to the order in app's parameters input_keys = [ 'chatbot', 'model_name', 'desire', 'plan_strategy', 'budget', 'correct_belief', 'enable_learning', 'temperature', 'overestimate_percent', ] # convert flatten list into a json list input_jsl = []
for i, chunk in enumerate(chunks(args, len(input_keys))):
6
2023-10-08 09:30:57+00:00
24k
sakemin/cog-musicgen-chord
predict.py
[ { "identifier": "CompressionSolver", "path": "audiocraft/solvers/compression.py", "snippet": "class CompressionSolver(base.StandardSolver):\n \"\"\"Solver for compression task.\n\n The compression task combines a set of perceptual and objective losses\n to train an EncodecModel (composed of an encoder-decoder and a quantizer)\n to perform high fidelity audio reconstruction.\n \"\"\"\n def __init__(self, cfg: omegaconf.DictConfig):\n super().__init__(cfg)\n self.rng: torch.Generator # set at each epoch\n self.adv_losses = builders.get_adversarial_losses(self.cfg)\n self.aux_losses = nn.ModuleDict()\n self.info_losses = nn.ModuleDict()\n assert not cfg.fsdp.use, \"FSDP not supported by CompressionSolver.\"\n loss_weights = dict()\n for loss_name, weight in self.cfg.losses.items():\n if loss_name in ['adv', 'feat']:\n for adv_name, _ in self.adv_losses.items():\n loss_weights[f'{loss_name}_{adv_name}'] = weight\n elif weight > 0:\n self.aux_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n loss_weights[loss_name] = weight\n else:\n self.info_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n self.balancer = builders.get_balancer(loss_weights, self.cfg.balancer)\n self.register_stateful('adv_losses')\n\n @property\n def best_metric_name(self) -> tp.Optional[str]:\n # best model is the last for the compression model\n return None\n\n def build_model(self):\n \"\"\"Instantiate model and optimizer.\"\"\"\n # Model and optimizer\n self.model = models.builders.get_compression_model(self.cfg).to(self.device)\n self.optimizer = builders.get_optimizer(self.model.parameters(), self.cfg.optim)\n self.register_stateful('model', 'optimizer')\n self.register_best_state('model')\n self.register_ema('model')\n\n def build_dataloaders(self):\n \"\"\"Instantiate audio dataloaders for each stage.\"\"\"\n self.dataloaders = builders.get_audio_datasets(self.cfg)\n\n def show(self):\n \"\"\"Show the compression model and employed adversarial loss.\"\"\"\n self.logger.info(f\"Compression model with {self.model.quantizer.total_codebooks} codebooks:\")\n self.log_model_summary(self.model)\n self.logger.info(\"Adversarial loss:\")\n self.log_model_summary(self.adv_losses)\n self.logger.info(\"Auxiliary losses:\")\n self.logger.info(self.aux_losses)\n self.logger.info(\"Info losses:\")\n self.logger.info(self.info_losses)\n\n def run_step(self, idx: int, batch: torch.Tensor, metrics: dict):\n \"\"\"Perform one training or valid step on a given batch.\"\"\"\n x = batch.to(self.device)\n y = x.clone()\n\n qres = self.model(x)\n assert isinstance(qres, quantization.QuantizedResult)\n y_pred = qres.x\n # Log bandwidth in kb/s\n metrics['bandwidth'] = qres.bandwidth.mean()\n\n if self.is_training:\n d_losses: dict = {}\n if len(self.adv_losses) > 0 and torch.rand(1, generator=self.rng).item() <= 1 / self.cfg.adversarial.every:\n for adv_name, adversary in self.adv_losses.items():\n disc_loss = adversary.train_adv(y_pred, y)\n d_losses[f'd_{adv_name}'] = disc_loss\n metrics['d_loss'] = torch.sum(torch.stack(list(d_losses.values())))\n metrics.update(d_losses)\n\n balanced_losses: dict = {}\n other_losses: dict = {}\n\n # penalty from quantization\n if qres.penalty is not None and qres.penalty.requires_grad:\n other_losses['penalty'] = qres.penalty # penalty term from the quantizer\n\n # adversarial losses\n for adv_name, adversary in self.adv_losses.items():\n adv_loss, feat_loss = adversary(y_pred, y)\n balanced_losses[f'adv_{adv_name}'] = adv_loss\n balanced_losses[f'feat_{adv_name}'] = feat_loss\n\n # auxiliary losses\n for loss_name, criterion in self.aux_losses.items():\n loss = criterion(y_pred, y)\n balanced_losses[loss_name] = loss\n\n # weighted losses\n metrics.update(balanced_losses)\n metrics.update(other_losses)\n metrics.update(qres.metrics)\n\n if self.is_training:\n # backprop losses that are not handled by balancer\n other_loss = torch.tensor(0., device=self.device)\n if 'penalty' in other_losses:\n other_loss += other_losses['penalty']\n if other_loss.requires_grad:\n other_loss.backward(retain_graph=True)\n ratio1 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio1, torch.Tensor)\n metrics['ratio1'] = ratio1.sqrt()\n\n # balancer losses backward, returns effective training loss\n # with effective weights at the current batch.\n metrics['g_loss'] = self.balancer.backward(balanced_losses, y_pred)\n # add metrics corresponding to weight ratios\n metrics.update(self.balancer.metrics)\n ratio2 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio2, torch.Tensor)\n metrics['ratio2'] = ratio2.sqrt()\n\n # optim\n flashy.distrib.sync_model(self.model)\n if self.cfg.optim.max_norm:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.cfg.optim.max_norm\n )\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n # informative losses only\n info_losses: dict = {}\n with torch.no_grad():\n for loss_name, criterion in self.info_losses.items():\n loss = criterion(y_pred, y)\n info_losses[loss_name] = loss\n\n metrics.update(info_losses)\n\n # aggregated GAN losses: this is useful to report adv and feat across different adversarial loss setups\n adv_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('adv')]\n if len(adv_losses) > 0:\n metrics['adv'] = torch.sum(torch.stack(adv_losses))\n feat_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('feat')]\n if len(feat_losses) > 0:\n metrics['feat'] = torch.sum(torch.stack(feat_losses))\n\n return metrics\n\n def run_epoch(self):\n # reset random seed at the beginning of the epoch\n self.rng = torch.Generator()\n self.rng.manual_seed(1234 + self.epoch)\n # run epoch\n super().run_epoch()\n\n def evaluate(self):\n \"\"\"Evaluate stage. Runs audio reconstruction evaluation.\"\"\"\n self.model.eval()\n evaluate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['evaluate']\n updates = len(loader)\n lp = self.log_progress(f'{evaluate_stage_name} inference', loader, total=updates, updates=self.log_updates)\n average = flashy.averager()\n\n pendings = []\n ctx = multiprocessing.get_context('spawn')\n with get_pool_executor(self.cfg.evaluate.num_workers, mp_context=ctx) as pool:\n for idx, batch in enumerate(lp):\n x = batch.to(self.device)\n with torch.no_grad():\n qres = self.model(x)\n\n y_pred = qres.x.cpu()\n y = batch.cpu() # should already be on CPU but just in case\n pendings.append(pool.submit(evaluate_audio_reconstruction, y_pred, y, self.cfg))\n\n metrics_lp = self.log_progress(f'{evaluate_stage_name} metrics', pendings, updates=self.log_updates)\n for pending in metrics_lp:\n metrics = pending.result()\n metrics = average(metrics)\n\n metrics = flashy.distrib.average_metrics(metrics, len(loader))\n return metrics\n\n def generate(self):\n \"\"\"Generate stage.\"\"\"\n self.model.eval()\n sample_manager = SampleManager(self.xp, map_reference_to_sample_id=True)\n generate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['generate']\n updates = len(loader)\n lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates)\n\n for batch in lp:\n reference, _ = batch\n reference = reference.to(self.device)\n with torch.no_grad():\n qres = self.model(reference)\n assert isinstance(qres, quantization.QuantizedResult)\n\n reference = reference.cpu()\n estimate = qres.x.cpu()\n sample_manager.add_samples(estimate, self.epoch, ground_truth_wavs=reference)\n\n flashy.distrib.barrier()\n\n def load_from_pretrained(self, name: str) -> dict:\n model = models.CompressionModel.get_pretrained(name)\n if isinstance(model, models.DAC):\n raise RuntimeError(\"Cannot fine tune a DAC model.\")\n elif isinstance(model, models.HFEncodecCompressionModel):\n self.logger.warning('Trying to automatically convert a HuggingFace model '\n 'to AudioCraft, this might fail!')\n state = model.model.state_dict()\n new_state = {}\n for k, v in state.items():\n if k.startswith('decoder.layers') and '.conv.' in k and '.block.' not in k:\n # We need to determine if this a convtr or a regular conv.\n layer = int(k.split('.')[2])\n if isinstance(model.model.decoder.layers[layer].conv, torch.nn.ConvTranspose1d):\n\n k = k.replace('.conv.', '.convtr.')\n k = k.replace('encoder.layers.', 'encoder.model.')\n k = k.replace('decoder.layers.', 'decoder.model.')\n k = k.replace('conv.', 'conv.conv.')\n k = k.replace('convtr.', 'convtr.convtr.')\n k = k.replace('quantizer.layers.', 'quantizer.vq.layers.')\n k = k.replace('.codebook.', '._codebook.')\n new_state[k] = v\n state = new_state\n elif isinstance(model, models.EncodecModel):\n state = model.state_dict()\n else:\n raise RuntimeError(f\"Cannot fine tune model type {type(model)}.\")\n return {\n 'best_state': {'model': state}\n }\n\n @staticmethod\n def model_from_checkpoint(checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a CompressionModel from a given checkpoint path or dora sig.\n This method is a convenient endpoint to load a CompressionModel to use in other solvers.\n\n Args:\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n This also supports pre-trained models by using a path of the form //pretrained/NAME.\n See `model_from_pretrained` for a list of supported pretrained models.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n checkpoint_path = str(checkpoint_path)\n if checkpoint_path.startswith('//pretrained/'):\n name = checkpoint_path.split('/', 3)[-1]\n return models.CompressionModel.get_pretrained(name, device)\n logger = logging.getLogger(__name__)\n logger.info(f\"Loading compression model from checkpoint: {checkpoint_path}\")\n _checkpoint_path = checkpoint.resolve_checkpoint_path(checkpoint_path, use_fsdp=False)\n assert _checkpoint_path is not None, f\"Could not resolve compression model checkpoint path: {checkpoint_path}\"\n state = checkpoint.load_checkpoint(_checkpoint_path)\n assert state is not None and 'xp.cfg' in state, f\"Could not load compression model from ckpt: {checkpoint_path}\"\n cfg = state['xp.cfg']\n cfg.device = device\n compression_model = models.builders.get_compression_model(cfg).to(device)\n assert compression_model.sample_rate == cfg.sample_rate, \"Compression model sample rate should match\"\n\n assert 'best_state' in state and state['best_state'] != {}\n assert 'exported' not in state, \"When loading an exported checkpoint, use the //pretrained/ prefix.\"\n compression_model.load_state_dict(state['best_state']['model'])\n compression_model.eval()\n logger.info(\"Compression model loaded!\")\n return compression_model\n\n @staticmethod\n def wrapped_model_from_checkpoint(cfg: omegaconf.DictConfig,\n checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a wrapped CompressionModel from a given checkpoint path or dora sig.\n\n Args:\n cfg (omegaconf.DictConfig): Configuration to read from for wrapped mode.\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n compression_model = CompressionSolver.model_from_checkpoint(checkpoint_path, device)\n compression_model = models.builders.get_wrapped_compression_model(compression_model, cfg)\n return compression_model" }, { "identifier": "MultiBandDiffusion", "path": "audiocraft/models/multibanddiffusion.py", "snippet": "class MultiBandDiffusion:\n \"\"\"Sample from multiple diffusion models.\n\n Args:\n DPs (list of DiffusionProcess): Diffusion processes.\n codec_model (CompressionModel): Underlying compression model used to obtain discrete tokens.\n \"\"\"\n def __init__(self, DPs: tp.List[DiffusionProcess], codec_model: CompressionModel) -> None:\n self.DPs = DPs\n self.codec_model = codec_model\n self.device = next(self.codec_model.parameters()).device\n\n @property\n def sample_rate(self) -> int:\n return self.codec_model.sample_rate\n\n @staticmethod\n def get_mbd_musicgen(device=None):\n \"\"\"Load our diffusion models trained for MusicGen.\"\"\"\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n path = 'facebook/multiband-diffusion'\n filename = 'mbd_musicgen_32khz.th'\n name = 'facebook/musicgen-small'\n codec_model = load_compression_model(name, device=device)\n models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)\n DPs = []\n for i in range(len(models)):\n schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)\n DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))\n return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)\n\n @staticmethod\n def get_mbd_24khz(bw: float = 3.0, pretrained: bool = True,\n device: tp.Optional[tp.Union[torch.device, str]] = None,\n n_q: tp.Optional[int] = None):\n \"\"\"Get the pretrained Models for MultibandDiffusion.\n\n Args:\n bw (float): Bandwidth of the compression model.\n pretrained (bool): Whether to use / download if necessary the models.\n device (torch.device or str, optional): Device on which the models are loaded.\n n_q (int, optional): Number of quantizers to use within the compression model.\n \"\"\"\n if device is None:\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n assert bw in [1.5, 3.0, 6.0], f\"bandwidth {bw} not available\"\n if n_q is not None:\n assert n_q in [2, 4, 8]\n assert {1.5: 2, 3.0: 4, 6.0: 8}[bw] == n_q, \\\n f\"bandwidth and number of codebooks missmatch to use n_q = {n_q} bw should be {n_q * (1.5 / 2)}\"\n n_q = {1.5: 2, 3.0: 4, 6.0: 8}[bw]\n codec_model = CompressionSolver.model_from_checkpoint(\n '//pretrained/facebook/encodec_24khz', device=device)\n codec_model.set_num_codebooks(n_q)\n codec_model = codec_model.to(device)\n path = 'facebook/multiband-diffusion'\n filename = f'mbd_comp_{n_q}.pt'\n models, processors, cfgs = load_diffusion_models(path, filename=filename, device=device)\n DPs = []\n for i in range(len(models)):\n schedule = NoiseSchedule(**cfgs[i].schedule, sample_processor=processors[i], device=device)\n DPs.append(DiffusionProcess(model=models[i], noise_schedule=schedule))\n return MultiBandDiffusion(DPs=DPs, codec_model=codec_model)\n\n return MultiBandDiffusion(DPs, codec_model)\n\n @torch.no_grad()\n def get_condition(self, wav: torch.Tensor, sample_rate: int) -> torch.Tensor:\n \"\"\"Get the conditioning (i.e. latent reprentatios of the compression model) from a waveform.\n Args:\n wav (torch.Tensor): The audio that we want to extract the conditioning from\n sample_rate (int): sample rate of the audio\"\"\"\n if sample_rate != self.sample_rate:\n wav = julius.resample_frac(wav, sample_rate, self.sample_rate)\n codes, scale = self.codec_model.encode(wav)\n assert scale is None, \"Scaled compression models not supported.\"\n emb = self.get_emb(codes)\n return emb\n\n @torch.no_grad()\n def get_emb(self, codes: torch.Tensor):\n \"\"\"Get latent representation from the discrete codes\n Argrs:\n codes (torch.Tensor): discrete tokens\"\"\"\n emb = self.codec_model.decode_latent(codes)\n return emb\n\n def generate(self, emb: torch.Tensor, size: tp.Optional[torch.Size] = None,\n step_list: tp.Optional[tp.List[int]] = None):\n \"\"\"Generate Wavform audio from the latent embeddings of the compression model\n Args:\n emb (torch.Tensor): Conditioning embeddinds\n size (none torch.Size): size of the output\n if None this is computed from the typical upsampling of the model\n step_list (optional list[int]): list of Markov chain steps, defaults to 50 linearly spaced step.\n \"\"\"\n if size is None:\n upsampling = int(self.codec_model.sample_rate / self.codec_model.frame_rate)\n size = torch.Size([emb.size(0), self.codec_model.channels, emb.size(-1) * upsampling])\n assert size[0] == emb.size(0)\n out = torch.zeros(size).to(self.device)\n for DP in self.DPs:\n out += DP.generate(condition=emb, step_list=step_list, initial_noise=torch.randn_like(out))\n return out\n\n def re_eq(self, wav: torch.Tensor, ref: torch.Tensor, n_bands: int = 32, strictness: float = 1):\n \"\"\"match the eq to the encodec output by matching the standard deviation of some frequency bands\n Args:\n wav (torch.Tensor): audio to equalize\n ref (torch.Tensor):refenrence audio from which we match the spectrogram.\n n_bands (int): number of bands of the eq\n strictness (float): how strict the the matching. 0 is no matching, 1 is exact matching.\n \"\"\"\n split = julius.SplitBands(n_bands=n_bands, sample_rate=self.codec_model.sample_rate).to(wav.device)\n bands = split(wav)\n bands_ref = split(ref)\n out = torch.zeros_like(ref)\n for i in range(n_bands):\n out += bands[i] * (bands_ref[i].std() / bands[i].std()) ** strictness\n return out\n\n def regenerate(self, wav: torch.Tensor, sample_rate: int):\n \"\"\"Regenerate a wavform through compression and diffusion regeneration.\n Args:\n wav (torch.Tensor): Original 'ground truth' audio\n sample_rate (int): sample rate of the input (and output) wav\n \"\"\"\n if sample_rate != self.codec_model.sample_rate:\n wav = julius.resample_frac(wav, sample_rate, self.codec_model.sample_rate)\n emb = self.get_condition(wav, sample_rate=self.codec_model.sample_rate)\n size = wav.size()\n out = self.generate(emb, size=size)\n if sample_rate != self.codec_model.sample_rate:\n out = julius.resample_frac(out, self.codec_model.sample_rate, sample_rate)\n return out\n\n def tokens_to_wav(self, tokens: torch.Tensor, n_bands: int = 32):\n \"\"\"Generate Waveform audio with diffusion from the discrete codes.\n Args:\n tokens (torch.Tensor): discrete codes\n n_bands (int): bands for the eq matching.\n \"\"\"\n wav_encodec = self.codec_model.decode(tokens)\n condition = self.get_emb(tokens)\n wav_diffusion = self.generate(emb=condition, size=wav_encodec.size())\n return self.re_eq(wav=wav_diffusion, ref=wav_encodec, n_bands=n_bands)" }, { "identifier": "MusicGen", "path": "audiocraft/models/musicgen.py", "snippet": "class MusicGen:\n \"\"\"MusicGen main model with convenient generation API.\n\n Args:\n name (str): name of the model.\n compression_model (CompressionModel): Compression model\n used to map audio to invertible discrete representations.\n lm (LMModel): Language model over discrete representations.\n max_duration (float, optional): maximum duration the model can produce,\n otherwise, inferred from the training params.\n \"\"\"\n def __init__(self, name: str, compression_model: CompressionModel, lm: LMModel,\n max_duration: tp.Optional[float] = None):\n self.name = name\n self.compression_model = compression_model\n self.lm = lm\n self.cfg: tp.Optional[omegaconf.DictConfig] = None\n # Just to be safe, let's put everything in eval mode.\n self.compression_model.eval()\n self.lm.eval()\n\n if hasattr(lm, 'cfg'):\n cfg = lm.cfg\n assert isinstance(cfg, omegaconf.DictConfig)\n self.cfg = cfg\n\n if self.cfg is not None:\n self.compression_model = get_wrapped_compression_model(self.compression_model, self.cfg)\n\n if max_duration is None:\n if self.cfg is not None:\n max_duration = lm.cfg.dataset.segment_duration # type: ignore\n else:\n raise ValueError(\"You must provide max_duration when building directly MusicGen\")\n assert max_duration is not None\n self.max_duration: float = max_duration\n self.device = next(iter(lm.parameters())).device\n\n self.generation_params: dict = {}\n self.set_generation_params(duration=15) # 15 seconds by default\n self._progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None\n if self.device.type == 'cpu':\n self.autocast = TorchAutocast(enabled=False)\n else:\n self.autocast = TorchAutocast(\n enabled=True, device_type=self.device.type, dtype=torch.float16)\n\n @property\n def frame_rate(self) -> float:\n \"\"\"Roughly the number of AR steps per seconds.\"\"\"\n return self.compression_model.frame_rate\n\n @property\n def sample_rate(self) -> int:\n \"\"\"Sample rate of the generated audio.\"\"\"\n return self.compression_model.sample_rate\n\n @property\n def audio_channels(self) -> int:\n \"\"\"Audio channels of the generated audio.\"\"\"\n return self.compression_model.channels\n\n @staticmethod\n def get_pretrained(name: str = 'facebook/musicgen-melody', device=None):\n \"\"\"Return pretrained model, we provide four models:\n - facebook/musicgen-small (300M), text to music,\n # see: https://huggingface.co/facebook/musicgen-small\n - facebook/musicgen-medium (1.5B), text to music,\n # see: https://huggingface.co/facebook/musicgen-medium\n - facebook/musicgen-melody (1.5B) text to music and text+melody to music,\n # see: https://huggingface.co/facebook/musicgen-melody\n - facebook/musicgen-large (3.3B), text to music,\n # see: https://huggingface.co/facebook/musicgen-large\n \"\"\"\n if device is None:\n if torch.cuda.device_count():\n device = 'cuda'\n else:\n device = 'cpu'\n\n if name == 'debug':\n # used only for unit tests\n compression_model = get_debug_compression_model(device)\n lm = get_debug_lm_model(device)\n return MusicGen(name, compression_model, lm, max_duration=30)\n\n if name in _HF_MODEL_CHECKPOINTS_MAP:\n warnings.warn(\n \"MusicGen pretrained model relying on deprecated checkpoint mapping. \" +\n f\"Please use full pre-trained id instead: facebook/musicgen-{name}\")\n name = _HF_MODEL_CHECKPOINTS_MAP[name]\n\n lm = load_lm_model(name, device=device)\n compression_model = load_compression_model(name, device=device)\n if 'self_wav' in lm.condition_provider.conditioners:\n lm.condition_provider.conditioners['self_wav'].match_len_on_eval = True\n lm.condition_provider.conditioners['self_wav']._use_masking = False\n\n return MusicGen(name, compression_model, lm)\n\n def set_generation_params(self, use_sampling: bool = True, top_k: int = 250,\n top_p: float = 0.0, temperature: float = 1.0,\n duration: float = 30.0, cfg_coef: float = 3.0,\n two_step_cfg: bool = False, extend_stride: float = 18):\n \"\"\"Set the generation parameters for MusicGen.\n\n Args:\n use_sampling (bool, optional): Use sampling if True, else do argmax decoding. Defaults to True.\n top_k (int, optional): top_k used for sampling. Defaults to 250.\n top_p (float, optional): top_p used for sampling, when set to 0 top_k is used. Defaults to 0.0.\n temperature (float, optional): Softmax temperature parameter. Defaults to 1.0.\n duration (float, optional): Duration of the generated waveform. Defaults to 30.0.\n cfg_coef (float, optional): Coefficient used for classifier free guidance. Defaults to 3.0.\n two_step_cfg (bool, optional): If True, performs 2 forward for Classifier Free Guidance,\n instead of batching together the two. This has some impact on how things\n are padded but seems to have little impact in practice.\n extend_stride: when doing extended generation (i.e. more than 30 seconds), by how much\n should we extend the audio each time. Larger values will mean less context is\n preserved, and shorter value will require extra computations.\n \"\"\"\n assert extend_stride < self.max_duration, \"Cannot stride by more than max generation duration.\"\n self.extend_stride = extend_stride\n self.duration = duration\n self.generation_params = {\n 'use_sampling': use_sampling,\n 'temp': temperature,\n 'top_k': top_k,\n 'top_p': top_p,\n 'cfg_coef': cfg_coef,\n 'two_step_cfg': two_step_cfg,\n }\n\n def set_custom_progress_callback(self, progress_callback: tp.Optional[tp.Callable[[int, int], None]] = None):\n \"\"\"Override the default progress callback.\"\"\"\n self._progress_callback = progress_callback\n\n def generate_unconditional(self, num_samples: int, progress: bool = False,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples in an unconditional manner.\n\n Args:\n num_samples (int): Number of samples to be generated.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n descriptions: tp.List[tp.Optional[str]] = [None] * num_samples\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate(self, descriptions: tp.List[str], progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_with_chroma(self, descriptions: tp.List[str], melody_wavs: MelodyType,\n melody_sample_rate: int, progress: bool = False,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=melody_wavs)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation(self, prompt: torch.Tensor, prompt_sample_rate: int,\n descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n if descriptions is None:\n descriptions = [None] * len(prompt)\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, prompt)\n assert prompt_tokens is not None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n def generate_continuation_with_audio_token(self, prompt, \n descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions, None)\n assert prompt_tokens is None\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_chroma(self, prompt: torch.Tensor, prompt_sample_rate: int, melody_wavs: MelodyType,\n melody_sample_rate: int, descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n \n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=prompt, melody_wavs=melody_wavs)\n assert prompt_tokens is not None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_tokens_and_audio_chroma(self, prompt, melody_wavs: MelodyType,\n melody_sample_rate: int, descriptions: tp.Optional[tp.List[tp.Optional[str]]] = None,\n progress: bool = False, return_tokens: bool = False) \\\n -> tp.Union[torch.Tensor, tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on audio prompts.\n\n Args:\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n Prompt should be [B, C, T], or [C, T] if only one sample is generated.\n prompt_sample_rate (int): Sampling rate of the given audio waveforms.\n descriptions (list of str, optional): A list of strings used as text conditioning. Defaults to None.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(melody_wavs, torch.Tensor):\n if melody_wavs.dim() == 2:\n melody_wavs = melody_wavs[None]\n if melody_wavs.dim() != 3:\n raise ValueError(\"Melody wavs should have a shape [B, C, T].\")\n melody_wavs = list(melody_wavs)\n else:\n for melody in melody_wavs:\n if melody is not None:\n assert melody.dim() == 2, \"One melody in the list has the wrong number of dims.\"\n\n melody_wavs = [\n convert_audio(wav, melody_sample_rate, self.sample_rate, self.audio_channels)\n if wav is not None else None\n for wav in melody_wavs]\n \n if descriptions is None:\n descriptions = [None] * len(prompt)\n \n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None, melody_wavs=melody_wavs)\n assert prompt_tokens is None\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_text_chroma(self, prompt: torch.Tensor, prompt_sample_rate: int, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if prompt.dim() == 2:\n prompt = prompt[None]\n if prompt.dim() != 3:\n raise ValueError(\"prompt should have 3 dimensions: [B, C, T] (C = 1).\")\n prompt = convert_audio(prompt, prompt_sample_rate, self.sample_rate, self.audio_channels)\n\n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=prompt,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n\n def generate_continuation_with_audio_tokens_and_text_chroma(self, prompt, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n \n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n prompt_tokens = prompt\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n def generate_with_text_chroma(self, descriptions: tp.List[str], chord_texts: tp.Union[tp.List[str],str],\n progress: bool = False, bpm: tp.Union[float,int,tp.List[float],tp.List[int]] = 120, meter: tp.Optional[tp.Union[int,tp.List[int]]] = 4,\n return_tokens: bool = False) -> tp.Union[torch.Tensor,\n tp.Tuple[torch.Tensor, torch.Tensor]]:\n \"\"\"Generate samples conditioned on text and melody.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n melody_wavs: (torch.Tensor or list of Tensor): A batch of waveforms used as\n melody conditioning. Should have shape [B, C, T] with B matching the description length,\n C=1 or 2. It can be [C, T] if there is a single description. It can also be\n a list of [C, T] tensors.\n melody_sample_rate: (int): Sample rate of the melody waveforms.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n \"\"\"\n if isinstance(chord_texts, str):\n chord_texts = [chord_texts]\n\n attributes, prompt_tokens = self._prepare_tokens_and_attributes(descriptions=descriptions, prompt=None,\n melody_wavs=chord_texts, bpm=bpm, meter=meter)\n assert prompt_tokens is None\n tokens = self._generate_tokens(attributes, prompt_tokens, progress)\n if return_tokens:\n return self.generate_audio(tokens), tokens\n return self.generate_audio(tokens)\n \n @torch.no_grad()\n def _prepare_tokens_and_attributes(\n self,\n descriptions: tp.Sequence[tp.Optional[str]],\n prompt: tp.Optional[torch.Tensor],\n melody_wavs: tp.Optional[tp.Union[MelodyList,tp.List[str]]] = None, bpm: tp.Optional[tp.Union[float,int,tp.List[float],tp.List[int]]] = None, meter:tp.Optional[tp.Union[int,tp.List[int]]] = None\n ) -> tp.Tuple[tp.List[ConditioningAttributes], tp.Optional[torch.Tensor]]:\n \"\"\"Prepare model inputs.\n\n Args:\n descriptions (list of str): A list of strings used as text conditioning.\n prompt (torch.Tensor): A batch of waveforms used for continuation.\n melody_wavs (torch.Tensor, optional): A batch of waveforms\n used as melody conditioning. Defaults to None.\n \"\"\"\n attributes = [\n ConditioningAttributes(text={'description': description})\n for description in descriptions]\n\n if melody_wavs is None:\n for attr in attributes:\n attr.wav['self_wav'] = WavCondition(\n torch.zeros((1, 1, 1), device=self.device),\n torch.tensor([0], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None])\n else:\n if 'self_wav' not in self.lm.condition_provider.conditioners:\n raise RuntimeError(\"This model doesn't support melody conditioning. \"\n \"Use the `melody` model.\")\n assert len(melody_wavs) == len(descriptions), \\\n f\"number of melody wavs must match number of descriptions! \" \\\n f\"got melody len={len(melody_wavs)}, and descriptions len={len(descriptions)}\"\n\n if bpm is not None and (isinstance(bpm, int) or isinstance(bpm, float)):\n bpm = [bpm for i in range(len(melody_wavs))]\n elif bpm is not None and isinstance(bpm, tp.List):\n assert len(melody_wavs) == len(bpm)\n\n if meter is not None and (isinstance(meter, int) or isinstance(meter, float)):\n meter = [meter for i in range(len(melody_wavs))]\n elif meter is not None and isinstance(meter, tp.List):\n assert len(melody_wavs) == len(meter)\n\n for attr, melody, i in zip(attributes, melody_wavs, range(len(melody_wavs))):\n if melody is None:\n attr.wav['self_wav'] = WavCondition(\n torch.zeros((1, 1, 1), device=self.device),\n torch.tensor([0], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None])\n elif isinstance(melody, torch.Tensor):\n attr.wav['self_wav'] = WavCondition(\n melody[None].to(device=self.device),\n torch.tensor([melody.shape[-1]], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None],\n )\n else :\n attr.wav['self_wav'] = WavChordTextCondition(\n [melody],\n torch.tensor([self.duration*self.sample_rate], device=self.device),\n sample_rate=[self.sample_rate],\n path=[None],\n bpm = [bpm[i]],\n meter = [meter[i]]\n )\n\n if prompt is not None:\n if descriptions is not None:\n assert len(descriptions) == len(prompt), \"Prompt and nb. descriptions doesn't match\"\n prompt = prompt.to(self.device)\n prompt_tokens, scale = self.compression_model.encode(prompt)\n assert scale is None\n else:\n prompt_tokens = None\n return attributes, prompt_tokens\n\n def _generate_tokens(self, attributes: tp.List[ConditioningAttributes],\n prompt_tokens: tp.Optional[torch.Tensor], progress: bool = False) -> torch.Tensor:\n \"\"\"Generate discrete audio tokens given audio prompt and/or conditions.\n\n Args:\n attributes (list of ConditioningAttributes): Conditions used for generation (text/melody).\n prompt_tokens (torch.Tensor, optional): Audio prompt used for continuation.\n progress (bool, optional): Flag to display progress of the generation process. Defaults to False.\n Returns:\n torch.Tensor: Generated audio, of shape [B, C, T], T is defined by the generation params.\n \"\"\"\n total_gen_len = int(self.duration * self.frame_rate)\n max_prompt_len = int(min(self.duration, self.max_duration) * self.frame_rate)\n current_gen_offset: int = 0\n\n def _progress_callback(generated_tokens: int, tokens_to_generate: int):\n generated_tokens += current_gen_offset\n if self._progress_callback is not None:\n # Note that total_gen_len might be quite wrong depending on the\n # codebook pattern used, but with delay it is almost accurate.\n self._progress_callback(generated_tokens, total_gen_len)\n else:\n print(f'{generated_tokens: 6d} / {total_gen_len: 6d}', end='\\r')\n\n if prompt_tokens is not None:\n assert max_prompt_len >= prompt_tokens.shape[-1], \\\n \"Prompt is longer than audio to generate\"\n\n callback = None\n if progress:\n callback = _progress_callback\n\n if self.duration <= self.max_duration:\n # generate by sampling from LM, simple case.\n with self.autocast:\n gen_tokens = self.lm.generate(\n prompt_tokens, attributes,\n callback=callback, max_gen_len=total_gen_len, **self.generation_params)\n\n else:\n # now this gets a bit messier, we need to handle prompts,\n # melody conditioning etc.\n ref_wavs = [attr.wav['self_wav'] for attr in attributes]\n all_tokens = []\n if prompt_tokens is None:\n prompt_length = 0\n else:\n all_tokens.append(prompt_tokens)\n prompt_length = prompt_tokens.shape[-1]\n\n stride_tokens = int(self.frame_rate * self.extend_stride)\n step = 0\n\n while current_gen_offset + prompt_length < total_gen_len:\n self.lm.condition_provider.conditioners['self_wav'].set_continuation_count(self.extend_stride/self.max_duration, step) #For text based chord conditioning\n time_offset = current_gen_offset / self.frame_rate\n chunk_duration = min(self.duration - time_offset, self.max_duration)\n max_gen_len = int(chunk_duration * self.frame_rate)\n for attr, ref_wav in zip(attributes, ref_wavs):\n if isinstance(ref_wav, WavCondition):\n wav_length = ref_wav.length.item()\n if wav_length == 0:\n continue\n # We will extend the wav periodically if it not long enough.\n # we have to do it here rather than in conditioners.py as otherwise\n # we wouldn't have the full wav.\n initial_position = int(time_offset * self.sample_rate)\n wav_target_length = int(self.max_duration * self.sample_rate)\n positions = torch.arange(initial_position,\n initial_position + wav_target_length, device=self.device)\n attr.wav['self_wav'] = WavCondition(\n ref_wav[0][..., positions % wav_length],\n torch.full_like(ref_wav[1], wav_target_length),\n [self.sample_rate] * ref_wav[0].size(0),\n [None], [0.])\n with self.autocast:\n gen_tokens = self.lm.generate(\n prompt_tokens, attributes,\n callback=callback, max_gen_len=max_gen_len, **self.generation_params)\n if prompt_tokens is None:\n all_tokens.append(gen_tokens)\n else:\n all_tokens.append(gen_tokens[:, :, prompt_tokens.shape[-1]:])\n prompt_tokens = gen_tokens[:, :, stride_tokens:]\n prompt_length = prompt_tokens.shape[-1]\n current_gen_offset += stride_tokens\n step = step + 1\n\n gen_tokens = torch.cat(all_tokens, dim=-1)\n return gen_tokens\n\n def generate_audio(self, gen_tokens: torch.Tensor):\n \"\"\"Generate Audio from tokens\"\"\"\n assert gen_tokens.dim() == 3\n with torch.no_grad():\n gen_audio = self.compression_model.decode(gen_tokens, None)\n return gen_audio" }, { "identifier": "CompressionSolver", "path": "audiocraft/solvers/compression.py", "snippet": "class CompressionSolver(base.StandardSolver):\n \"\"\"Solver for compression task.\n\n The compression task combines a set of perceptual and objective losses\n to train an EncodecModel (composed of an encoder-decoder and a quantizer)\n to perform high fidelity audio reconstruction.\n \"\"\"\n def __init__(self, cfg: omegaconf.DictConfig):\n super().__init__(cfg)\n self.rng: torch.Generator # set at each epoch\n self.adv_losses = builders.get_adversarial_losses(self.cfg)\n self.aux_losses = nn.ModuleDict()\n self.info_losses = nn.ModuleDict()\n assert not cfg.fsdp.use, \"FSDP not supported by CompressionSolver.\"\n loss_weights = dict()\n for loss_name, weight in self.cfg.losses.items():\n if loss_name in ['adv', 'feat']:\n for adv_name, _ in self.adv_losses.items():\n loss_weights[f'{loss_name}_{adv_name}'] = weight\n elif weight > 0:\n self.aux_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n loss_weights[loss_name] = weight\n else:\n self.info_losses[loss_name] = builders.get_loss(loss_name, self.cfg)\n self.balancer = builders.get_balancer(loss_weights, self.cfg.balancer)\n self.register_stateful('adv_losses')\n\n @property\n def best_metric_name(self) -> tp.Optional[str]:\n # best model is the last for the compression model\n return None\n\n def build_model(self):\n \"\"\"Instantiate model and optimizer.\"\"\"\n # Model and optimizer\n self.model = models.builders.get_compression_model(self.cfg).to(self.device)\n self.optimizer = builders.get_optimizer(self.model.parameters(), self.cfg.optim)\n self.register_stateful('model', 'optimizer')\n self.register_best_state('model')\n self.register_ema('model')\n\n def build_dataloaders(self):\n \"\"\"Instantiate audio dataloaders for each stage.\"\"\"\n self.dataloaders = builders.get_audio_datasets(self.cfg)\n\n def show(self):\n \"\"\"Show the compression model and employed adversarial loss.\"\"\"\n self.logger.info(f\"Compression model with {self.model.quantizer.total_codebooks} codebooks:\")\n self.log_model_summary(self.model)\n self.logger.info(\"Adversarial loss:\")\n self.log_model_summary(self.adv_losses)\n self.logger.info(\"Auxiliary losses:\")\n self.logger.info(self.aux_losses)\n self.logger.info(\"Info losses:\")\n self.logger.info(self.info_losses)\n\n def run_step(self, idx: int, batch: torch.Tensor, metrics: dict):\n \"\"\"Perform one training or valid step on a given batch.\"\"\"\n x = batch.to(self.device)\n y = x.clone()\n\n qres = self.model(x)\n assert isinstance(qres, quantization.QuantizedResult)\n y_pred = qres.x\n # Log bandwidth in kb/s\n metrics['bandwidth'] = qres.bandwidth.mean()\n\n if self.is_training:\n d_losses: dict = {}\n if len(self.adv_losses) > 0 and torch.rand(1, generator=self.rng).item() <= 1 / self.cfg.adversarial.every:\n for adv_name, adversary in self.adv_losses.items():\n disc_loss = adversary.train_adv(y_pred, y)\n d_losses[f'd_{adv_name}'] = disc_loss\n metrics['d_loss'] = torch.sum(torch.stack(list(d_losses.values())))\n metrics.update(d_losses)\n\n balanced_losses: dict = {}\n other_losses: dict = {}\n\n # penalty from quantization\n if qres.penalty is not None and qres.penalty.requires_grad:\n other_losses['penalty'] = qres.penalty # penalty term from the quantizer\n\n # adversarial losses\n for adv_name, adversary in self.adv_losses.items():\n adv_loss, feat_loss = adversary(y_pred, y)\n balanced_losses[f'adv_{adv_name}'] = adv_loss\n balanced_losses[f'feat_{adv_name}'] = feat_loss\n\n # auxiliary losses\n for loss_name, criterion in self.aux_losses.items():\n loss = criterion(y_pred, y)\n balanced_losses[loss_name] = loss\n\n # weighted losses\n metrics.update(balanced_losses)\n metrics.update(other_losses)\n metrics.update(qres.metrics)\n\n if self.is_training:\n # backprop losses that are not handled by balancer\n other_loss = torch.tensor(0., device=self.device)\n if 'penalty' in other_losses:\n other_loss += other_losses['penalty']\n if other_loss.requires_grad:\n other_loss.backward(retain_graph=True)\n ratio1 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio1, torch.Tensor)\n metrics['ratio1'] = ratio1.sqrt()\n\n # balancer losses backward, returns effective training loss\n # with effective weights at the current batch.\n metrics['g_loss'] = self.balancer.backward(balanced_losses, y_pred)\n # add metrics corresponding to weight ratios\n metrics.update(self.balancer.metrics)\n ratio2 = sum(p.grad.data.norm(p=2).pow(2)\n for p in self.model.parameters() if p.grad is not None)\n assert isinstance(ratio2, torch.Tensor)\n metrics['ratio2'] = ratio2.sqrt()\n\n # optim\n flashy.distrib.sync_model(self.model)\n if self.cfg.optim.max_norm:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.cfg.optim.max_norm\n )\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n # informative losses only\n info_losses: dict = {}\n with torch.no_grad():\n for loss_name, criterion in self.info_losses.items():\n loss = criterion(y_pred, y)\n info_losses[loss_name] = loss\n\n metrics.update(info_losses)\n\n # aggregated GAN losses: this is useful to report adv and feat across different adversarial loss setups\n adv_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('adv')]\n if len(adv_losses) > 0:\n metrics['adv'] = torch.sum(torch.stack(adv_losses))\n feat_losses = [loss for loss_name, loss in metrics.items() if loss_name.startswith('feat')]\n if len(feat_losses) > 0:\n metrics['feat'] = torch.sum(torch.stack(feat_losses))\n\n return metrics\n\n def run_epoch(self):\n # reset random seed at the beginning of the epoch\n self.rng = torch.Generator()\n self.rng.manual_seed(1234 + self.epoch)\n # run epoch\n super().run_epoch()\n\n def evaluate(self):\n \"\"\"Evaluate stage. Runs audio reconstruction evaluation.\"\"\"\n self.model.eval()\n evaluate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['evaluate']\n updates = len(loader)\n lp = self.log_progress(f'{evaluate_stage_name} inference', loader, total=updates, updates=self.log_updates)\n average = flashy.averager()\n\n pendings = []\n ctx = multiprocessing.get_context('spawn')\n with get_pool_executor(self.cfg.evaluate.num_workers, mp_context=ctx) as pool:\n for idx, batch in enumerate(lp):\n x = batch.to(self.device)\n with torch.no_grad():\n qres = self.model(x)\n\n y_pred = qres.x.cpu()\n y = batch.cpu() # should already be on CPU but just in case\n pendings.append(pool.submit(evaluate_audio_reconstruction, y_pred, y, self.cfg))\n\n metrics_lp = self.log_progress(f'{evaluate_stage_name} metrics', pendings, updates=self.log_updates)\n for pending in metrics_lp:\n metrics = pending.result()\n metrics = average(metrics)\n\n metrics = flashy.distrib.average_metrics(metrics, len(loader))\n return metrics\n\n def generate(self):\n \"\"\"Generate stage.\"\"\"\n self.model.eval()\n sample_manager = SampleManager(self.xp, map_reference_to_sample_id=True)\n generate_stage_name = str(self.current_stage)\n\n loader = self.dataloaders['generate']\n updates = len(loader)\n lp = self.log_progress(generate_stage_name, loader, total=updates, updates=self.log_updates)\n\n for batch in lp:\n reference, _ = batch\n reference = reference.to(self.device)\n with torch.no_grad():\n qres = self.model(reference)\n assert isinstance(qres, quantization.QuantizedResult)\n\n reference = reference.cpu()\n estimate = qres.x.cpu()\n sample_manager.add_samples(estimate, self.epoch, ground_truth_wavs=reference)\n\n flashy.distrib.barrier()\n\n def load_from_pretrained(self, name: str) -> dict:\n model = models.CompressionModel.get_pretrained(name)\n if isinstance(model, models.DAC):\n raise RuntimeError(\"Cannot fine tune a DAC model.\")\n elif isinstance(model, models.HFEncodecCompressionModel):\n self.logger.warning('Trying to automatically convert a HuggingFace model '\n 'to AudioCraft, this might fail!')\n state = model.model.state_dict()\n new_state = {}\n for k, v in state.items():\n if k.startswith('decoder.layers') and '.conv.' in k and '.block.' not in k:\n # We need to determine if this a convtr or a regular conv.\n layer = int(k.split('.')[2])\n if isinstance(model.model.decoder.layers[layer].conv, torch.nn.ConvTranspose1d):\n\n k = k.replace('.conv.', '.convtr.')\n k = k.replace('encoder.layers.', 'encoder.model.')\n k = k.replace('decoder.layers.', 'decoder.model.')\n k = k.replace('conv.', 'conv.conv.')\n k = k.replace('convtr.', 'convtr.convtr.')\n k = k.replace('quantizer.layers.', 'quantizer.vq.layers.')\n k = k.replace('.codebook.', '._codebook.')\n new_state[k] = v\n state = new_state\n elif isinstance(model, models.EncodecModel):\n state = model.state_dict()\n else:\n raise RuntimeError(f\"Cannot fine tune model type {type(model)}.\")\n return {\n 'best_state': {'model': state}\n }\n\n @staticmethod\n def model_from_checkpoint(checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a CompressionModel from a given checkpoint path or dora sig.\n This method is a convenient endpoint to load a CompressionModel to use in other solvers.\n\n Args:\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n This also supports pre-trained models by using a path of the form //pretrained/NAME.\n See `model_from_pretrained` for a list of supported pretrained models.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n checkpoint_path = str(checkpoint_path)\n if checkpoint_path.startswith('//pretrained/'):\n name = checkpoint_path.split('/', 3)[-1]\n return models.CompressionModel.get_pretrained(name, device)\n logger = logging.getLogger(__name__)\n logger.info(f\"Loading compression model from checkpoint: {checkpoint_path}\")\n _checkpoint_path = checkpoint.resolve_checkpoint_path(checkpoint_path, use_fsdp=False)\n assert _checkpoint_path is not None, f\"Could not resolve compression model checkpoint path: {checkpoint_path}\"\n state = checkpoint.load_checkpoint(_checkpoint_path)\n assert state is not None and 'xp.cfg' in state, f\"Could not load compression model from ckpt: {checkpoint_path}\"\n cfg = state['xp.cfg']\n cfg.device = device\n compression_model = models.builders.get_compression_model(cfg).to(device)\n assert compression_model.sample_rate == cfg.sample_rate, \"Compression model sample rate should match\"\n\n assert 'best_state' in state and state['best_state'] != {}\n assert 'exported' not in state, \"When loading an exported checkpoint, use the //pretrained/ prefix.\"\n compression_model.load_state_dict(state['best_state']['model'])\n compression_model.eval()\n logger.info(\"Compression model loaded!\")\n return compression_model\n\n @staticmethod\n def wrapped_model_from_checkpoint(cfg: omegaconf.DictConfig,\n checkpoint_path: tp.Union[Path, str],\n device: tp.Union[torch.device, str] = 'cpu') -> models.CompressionModel:\n \"\"\"Instantiate a wrapped CompressionModel from a given checkpoint path or dora sig.\n\n Args:\n cfg (omegaconf.DictConfig): Configuration to read from for wrapped mode.\n checkpoint_path (Path or str): Path to checkpoint or dora sig from where the checkpoint is resolved.\n use_ema (bool): Use EMA variant of the model instead of the actual model.\n device (torch.device or str): Device on which the model is loaded.\n \"\"\"\n compression_model = CompressionSolver.model_from_checkpoint(checkpoint_path, device)\n compression_model = models.builders.get_wrapped_compression_model(compression_model, cfg)\n return compression_model" }, { "identifier": "load_compression_model", "path": "audiocraft/models/loaders.py", "snippet": "def load_compression_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):\n pkg = load_compression_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)\n if 'pretrained' in pkg:\n return CompressionModel.get_pretrained(pkg['pretrained'], device=device)\n cfg = OmegaConf.create(pkg['xp.cfg'])\n cfg.device = str(device)\n model = builders.get_compression_model(cfg)\n model.load_state_dict(pkg['best_state'])\n model.eval()\n return model" }, { "identifier": "load_lm_model", "path": "audiocraft/models/loaders.py", "snippet": "def load_lm_model(file_or_url_or_id: tp.Union[Path, str], device='cpu', cache_dir: tp.Optional[str] = None):\n pkg = load_lm_model_ckpt(file_or_url_or_id, cache_dir=cache_dir)\n cfg = OmegaConf.create(pkg['xp.cfg'])\n cfg.device = str(device)\n if cfg.device == 'cpu':\n cfg.dtype = 'float32'\n else:\n cfg.dtype = 'float16'\n _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path')\n _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path')\n _delete_param(cfg, 'conditioners.args.merge_text_conditions_p')\n _delete_param(cfg, 'conditioners.args.drop_desc_p')\n model = builders.get_lm_model(cfg)\n model.load_state_dict(pkg['best_state'])\n model.eval()\n model.cfg = cfg\n return model" }, { "identifier": "audio_write", "path": "audiocraft/data/audio.py", "snippet": "def audio_write(stem_name: tp.Union[str, Path],\n wav: torch.Tensor, sample_rate: int,\n format: str = 'wav', mp3_rate: int = 320, ogg_rate: tp.Optional[int] = None,\n normalize: bool = True, strategy: str = 'peak', peak_clip_headroom_db: float = 1,\n rms_headroom_db: float = 18, loudness_headroom_db: float = 14,\n loudness_compressor: bool = False,\n log_clipping: bool = True, make_parent_dir: bool = True,\n add_suffix: bool = True) -> Path:\n \"\"\"Convenience function for saving audio to disk. Returns the filename the audio was written to.\n\n Args:\n stem_name (str or Path): Filename without extension which will be added automatically.\n wav (torch.Tensor): Audio data to save.\n sample_rate (int): Sample rate of audio data.\n format (str): Either \"wav\", \"mp3\", \"ogg\", or \"flac\".\n mp3_rate (int): kbps when using mp3s.\n ogg_rate (int): kbps when using ogg/vorbis. If not provided, let ffmpeg decide for itself.\n normalize (bool): if `True` (default), normalizes according to the prescribed\n strategy (see after). If `False`, the strategy is only used in case clipping\n would happen.\n strategy (str): Can be either 'clip', 'peak', or 'rms'. Default is 'peak',\n i.e. audio is normalized by its largest value. RMS normalizes by root-mean-square\n with extra headroom to avoid clipping. 'clip' just clips.\n peak_clip_headroom_db (float): Headroom in dB when doing 'peak' or 'clip' strategy.\n rms_headroom_db (float): Headroom in dB when doing 'rms' strategy. This must be much larger\n than the `peak_clip` one to avoid further clipping.\n loudness_headroom_db (float): Target loudness for loudness normalization.\n loudness_compressor (bool): Uses tanh for soft clipping when strategy is 'loudness'.\n when strategy is 'loudness' log_clipping (bool): If True, basic logging on stderr when clipping still\n occurs despite strategy (only for 'rms').\n make_parent_dir (bool): Make parent directory if it doesn't exist.\n Returns:\n Path: Path of the saved audio.\n \"\"\"\n assert wav.dtype.is_floating_point, \"wav is not floating point\"\n if wav.dim() == 1:\n wav = wav[None]\n elif wav.dim() > 2:\n raise ValueError(\"Input wav should be at most 2 dimension.\")\n assert wav.isfinite().all()\n wav = normalize_audio(wav, normalize, strategy, peak_clip_headroom_db,\n rms_headroom_db, loudness_headroom_db, loudness_compressor,\n log_clipping=log_clipping, sample_rate=sample_rate,\n stem_name=str(stem_name))\n if format == 'mp3':\n suffix = '.mp3'\n flags = ['-f', 'mp3', '-c:a', 'libmp3lame', '-b:a', f'{mp3_rate}k']\n elif format == 'wav':\n suffix = '.wav'\n flags = ['-f', 'wav', '-c:a', 'pcm_s16le']\n elif format == 'ogg':\n suffix = '.ogg'\n flags = ['-f', 'ogg', '-c:a', 'libvorbis']\n if ogg_rate is not None:\n flags += ['-b:a', f'{ogg_rate}k']\n elif format == 'flac':\n suffix = '.flac'\n flags = ['-f', 'flac']\n else:\n raise RuntimeError(f\"Invalid format {format}. Only wav or mp3 are supported.\")\n if not add_suffix:\n suffix = ''\n path = Path(str(stem_name) + suffix)\n if make_parent_dir:\n path.parent.mkdir(exist_ok=True, parents=True)\n try:\n _piping_to_ffmpeg(path, wav, sample_rate, flags)\n except Exception:\n if path.exists():\n # we do not want to leave half written files around.\n path.unlink()\n raise\n return path" }, { "identifier": "get_lm_model", "path": "audiocraft/models/builders.py", "snippet": "def get_lm_model(cfg: omegaconf.DictConfig) -> LMModel:\n \"\"\"Instantiate a transformer LM.\"\"\"\n if cfg.lm_model == 'transformer_lm':\n kwargs = dict_from_config(getattr(cfg, 'transformer_lm'))\n n_q = kwargs['n_q']\n q_modeling = kwargs.pop('q_modeling', None)\n codebooks_pattern_cfg = getattr(cfg, 'codebooks_pattern')\n attribute_dropout = dict_from_config(getattr(cfg, 'attribute_dropout'))\n cls_free_guidance = dict_from_config(getattr(cfg, 'classifier_free_guidance'))\n cfg_prob, cfg_coef = cls_free_guidance['training_dropout'], cls_free_guidance['inference_coef']\n fuser = get_condition_fuser(cfg)\n condition_provider = get_conditioner_provider(kwargs[\"dim\"], cfg).to(cfg.device)\n if len(fuser.fuse2cond['cross']) > 0: # enforce cross-att programmatically\n kwargs['cross_attention'] = True\n if codebooks_pattern_cfg.modeling is None:\n assert q_modeling is not None, \\\n \"LM model should either have a codebook pattern defined or transformer_lm.q_modeling\"\n codebooks_pattern_cfg = omegaconf.OmegaConf.create(\n {'modeling': q_modeling, 'delay': {'delays': list(range(n_q))}}\n )\n pattern_provider = get_codebooks_pattern_provider(n_q, codebooks_pattern_cfg)\n return LMModel(\n pattern_provider=pattern_provider,\n condition_provider=condition_provider,\n fuser=fuser,\n cfg_dropout=cfg_prob,\n cfg_coef=cfg_coef,\n attribute_dropout=attribute_dropout,\n dtype=getattr(torch, cfg.dtype),\n device=cfg.device,\n **kwargs\n ).to(cfg.device)\n else:\n raise KeyError(f\"Unexpected LM model {cfg.lm_model}\")" } ]
import os import random import torchaudio import typing as tp import numpy as np import torch import subprocess from typing import Optional from cog import BasePredictor, Input, Path from audiocraft.solvers.compression import CompressionSolver from audiocraft.models import MusicGen, MultiBandDiffusion from audiocraft.solvers.compression import CompressionSolver from audiocraft.models.loaders import ( load_compression_model, load_lm_model, ) from audiocraft.data.audio import audio_write from audiocraft.models.builders import get_lm_model from omegaconf import OmegaConf
17,926
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu" self.mbd = MultiBandDiffusion.get_mbd_musicgen() if str(weights) == "weights": weights = None if weights is not None: print("Fine-tuned model weights loaded!") self.model = load_ckpt(weights, self.device, url=True) def _load_model( self, model_path: str, cls: Optional[any] = None, load_args: Optional[dict] = {}, model_id: Optional[str] = None, device: Optional[str] = None, ) -> MusicGen: if device is None: device = self.device
# Prediction interface for Cog ⚙️ # https://github.com/replicate/cog/blob/main/docs/python.md # We need to set `TRANSFORMERS_CACHE` before any imports, which is why this is up here. MODEL_PATH = "/src/models/" os.environ["TRANSFORMERS_CACHE"] = MODEL_PATH os.environ["TORCH_HOME"] = MODEL_PATH # Model specific imports def _delete_param(cfg, full_name: str): parts = full_name.split('.') for part in parts[:-1]: if part in cfg: cfg = cfg[part] else: return OmegaConf.set_struct(cfg, False) if parts[-1] in cfg: del cfg[parts[-1]] OmegaConf.set_struct(cfg, True) def load_ckpt(path, device, url=False): if url: loaded = torch.hub.load_state_dict_from_url(str(path)) else: loaded = torch.load(str(path)) cfg = OmegaConf.create(loaded['xp.cfg']) cfg.device = str(device) if cfg.device == 'cpu': cfg.dtype = 'float32' else: cfg.dtype = 'float16' _delete_param(cfg, 'conditioners.self_wav.chroma_chord.cache_path') _delete_param(cfg, 'conditioners.self_wav.chroma_stem.cache_path') _delete_param(cfg, 'conditioners.args.merge_text_conditions_p') _delete_param(cfg, 'conditioners.args.drop_desc_p') lm = get_lm_model(loaded['xp.cfg']) lm.load_state_dict(loaded['model']) lm.eval() lm.cfg = cfg compression_model = CompressionSolver.model_from_checkpoint(cfg.compression_model_checkpoint, device=device) return MusicGen(f"{os.getenv('COG_USERNAME')}/musicgen-chord", compression_model, lm) class Predictor(BasePredictor): def setup(self, weights: Optional[Path] = None): """Load the model into memory to make running multiple predictions efficient""" self.device = "cuda" if torch.cuda.is_available() else "cpu" self.mbd = MultiBandDiffusion.get_mbd_musicgen() if str(weights) == "weights": weights = None if weights is not None: print("Fine-tuned model weights loaded!") self.model = load_ckpt(weights, self.device, url=True) def _load_model( self, model_path: str, cls: Optional[any] = None, load_args: Optional[dict] = {}, model_id: Optional[str] = None, device: Optional[str] = None, ) -> MusicGen: if device is None: device = self.device
compression_model = load_compression_model(
4
2023-10-09 09:52:24+00:00
24k
zhijie-group/LOVECon
test_lovecon.py
[ { "identifier": "UNetPseudo3DConditionModel", "path": "video_diffusion/models/unet_3d_condition.py", "snippet": "class UNetPseudo3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\",\n ),\n mid_block_type: str = \"UNetMidBlockPseudo3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n \"CrossAttnUpBlockPseudo3D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n **kwargs\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n if 'temporal_downsample' in kwargs and kwargs['temporal_downsample'] is True:\n kwargs['temporal_downsample_time'] = 3\n self.temporal_downsample_time = kwargs.get('temporal_downsample_time', 0)\n \n # input\n self.conv_in = PseudoConv3d(in_channels, block_out_channels[0], \n kernel_size=3, padding=(1, 1), model_config=kwargs)\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n kwargs_copy=copy.deepcopy(kwargs)\n temporal_downsample_i = ((i >= (len(down_block_types)-self.temporal_downsample_time))\n and (not is_final_block))\n kwargs_copy.update({'temporal_downsample': temporal_downsample_i} )\n # kwargs_copy.update({'SparseCausalAttention_index': temporal_downsample_i} )\n if temporal_downsample_i:\n print(f'Initialize model temporal downsample at layer {i}')\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n model_config=kwargs_copy\n )\n self.down_blocks.append(down_block)\n # mid\n if mid_block_type == \"UNetMidBlockPseudo3DCrossAttn\":\n self.mid_block = UNetMidBlockPseudo3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n model_config=kwargs\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n \n kwargs_copy=copy.deepcopy(kwargs)\n kwargs_copy.update({'temporal_downsample': \n i < (self.temporal_downsample_time-1)})\n if i < (self.temporal_downsample_time-1):\n print(f'Initialize model temporal updample at layer {i}')\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n model_config=kwargs_copy\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n self.conv_out = PseudoConv3d(block_out_channels[0], out_channels, \n kernel_size=3, padding=1, model_config=kwargs)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = (\n num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n )\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(\n module,\n (CrossAttnDownBlockPseudo3D, DownBlockPseudo3D, CrossAttnUpBlockPseudo3D, UpBlockPseudo3D),\n ):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None, # None\n attention_mask: Optional[torch.Tensor] = None, # None\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNetPseudo3DConditionOutput, Tuple]:\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None: # None\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 0. center input if necessary\n if self.config.center_input_sample: # False\n sample = 2 * sample - 1.0\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n new_down_block_res_samples += (down_block_res_sample + down_block_additional_residual,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n # for i in down_block_res_samples: print(i.shape) \n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 16, 64, 64])\n # torch.Size([1, 320, 8, 32, 32])\n # torch.Size([1, 640, 8, 32, 32])\n # torch.Size([1, 640, 8, 32, 32])\n # torch.Size([1, 640, 4, 16, 16])\n # torch.Size([1, 1280, 4, 16, 16])\n # torch.Size([1, 1280, 4, 16, 16])\n # torch.Size([1, 1280, 2, 8, 8])\n # torch.Size([1, 1280, 2, 8, 8])\n # torch.Size([1, 1280, 2, 8, 8])\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n \n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n )\n # 6. post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNetPseudo3DConditionOutput(sample=sample)\n\n @classmethod\n def from_2d_model(cls, model_path, model_config):\n config_path = os.path.join(model_path, \"config.json\")\n if not os.path.isfile(config_path):\n raise RuntimeError(f\"{config_path} does not exist\")\n with open(config_path, \"r\") as f:\n config = json.load(f)\n\n config.pop(\"_class_name\")\n config.pop(\"_diffusers_version\")\n\n block_replacer = {\n \"CrossAttnDownBlock2D\": \"CrossAttnDownBlockPseudo3D\",\n \"DownBlock2D\": \"DownBlockPseudo3D\",\n \"UpBlock2D\": \"UpBlockPseudo3D\",\n \"CrossAttnUpBlock2D\": \"CrossAttnUpBlockPseudo3D\",\n }\n\n def convert_2d_to_3d_block(block):\n return block_replacer[block] if block in block_replacer else block\n\n config[\"down_block_types\"] = [\n convert_2d_to_3d_block(block) for block in config[\"down_block_types\"]\n ]\n config[\"up_block_types\"] = [convert_2d_to_3d_block(block) for block in config[\"up_block_types\"]]\n if model_config is not None:\n config.update(model_config)\n\n model = cls(**config)\n\n state_dict_path_condidates = glob.glob(os.path.join(model_path, \"*.bin\"))\n if state_dict_path_condidates:\n state_dict = torch.load(state_dict_path_condidates[0], map_location=\"cpu\")\n model.load_2d_state_dict(state_dict=state_dict)\n\n return model\n\n def load_2d_state_dict(self, state_dict, **kwargs):\n state_dict_3d = self.state_dict()\n\n for k, v in state_dict.items():\n if k not in state_dict_3d:\n raise KeyError(f\"2d state_dict key {k} does not exist in 3d model\")\n elif v.shape != state_dict_3d[k].shape:\n raise ValueError(f\"state_dict shape mismatch, 2d {v.shape}, 3d {state_dict_3d[k].shape}\")\n\n for k, v in state_dict_3d.items():\n if \"_temporal\" in k:\n continue\n if k not in state_dict:\n raise KeyError(f\"3d state_dict key {k} does not exist in 2d model\")\n\n state_dict_3d.update(state_dict)\n self.load_state_dict(state_dict_3d, **kwargs)" }, { "identifier": "ControlNetPseudo3DModel", "path": "video_diffusion/models/controlnet_3d_condition.py", "snippet": "class ControlNetPseudo3DModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n in_channels: int = 4,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n projection_class_embeddings_input_dim: Optional[int] = None,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n **kwargs\n ):\n super().__init__()\n\n if 'temporal_downsample' in kwargs and kwargs['temporal_downsample'] is True:\n kwargs['temporal_downsample_time'] = 3\n self.temporal_downsample_time = kwargs.get('temporal_downsample_time', 0)\n\n # Check inputs\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n # self.conv_in = PseudoConv3d(\n # in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n # )\n self.conv_in = InflatedConv3d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n # time\n time_embed_dim = block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # control net conditioning embedding\n self.controlnet_cond_embedding = ControlNetPseudo3DConditioningEmbedding(\n conditioning_embedding_channels=block_out_channels[0],\n block_out_channels=conditioning_embedding_out_channels,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.controlnet_down_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n #non temperal \n # kwargs_copy=copy.deepcopy(kwargs)\n # temporal_downsample_i = ((i >= (len(down_block_types)-self.temporal_downsample_time))\n # and (not is_final_block))\n # kwargs_copy.update({'temporal_downsample': temporal_downsample_i} )\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n # model_config=kwargs_copy\n )\n self.down_blocks.append(down_block)\n\n for _ in range(layers_per_block):\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n if not is_final_block:\n # controlnet_block = PseudoConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n # mid\n mid_block_channel = block_out_channels[-1]\n\n # controlnet_block = PseudoConv3d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = InflatedConv3d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_mid_block = controlnet_block\n\n self.mid_block = UNetMidBlockPseudo3DCrossAttn(\n in_channels=mid_block_channel,\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n # model_config=kwargs\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlockPseudo3D, DownBlockPseudo3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n controlnet_cond: torch.FloatTensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> Union[ControlNetPseudo3DOutput, Tuple]:\n # check channel order\n channel_order = self.config.controlnet_conditioning_channel_order\n if channel_order == \"rgb\":\n # in rgb order by default\n ...\n elif channel_order == \"bgr\":\n controlnet_cond = torch.flip(controlnet_cond, dims=[1])\n else:\n raise ValueError(f\"unknown `controlnet_conditioning_channel_order`: {channel_order}\")\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n \n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb)\n\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)\n # print(sample.shape,controlnet_cond.shape)\n sample += controlnet_cond\n \n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n\n # 5. Control net blocks\n\n controlnet_down_block_res_samples = ()\n\n for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):\n down_block_res_sample = controlnet_block(down_block_res_sample)\n controlnet_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = controlnet_down_block_res_samples\n\n mid_block_res_sample = self.controlnet_mid_block(sample)\n\n if not return_dict:\n return (down_block_res_samples, mid_block_res_sample)\n\n return ControlNetPseudo3DOutput(\n down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample\n )\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, control_temporal_idx=None, control_mid_temporal=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"CrossAttnDownBlockPseudo3D\",\n \"DownBlockPseudo3D\"\n ]\n # config[\"control_temporal_idx\"] = control_temporal_idx\n # config[\"control_mid_temporal\"] = control_mid_temporal\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n\n state_dict = torch.load(model_file, map_location=\"cpu\")\n for k, v in model.state_dict().items():\n if '_temp.' in k:\n if 'conv' in k:\n state_dict.update({k: v})\n else:\n copyk = k\n copyk = copyk.replace('_temp.', '1.')\n state_dict.update({k: state_dict[copyk]})\n model.load_state_dict(state_dict)\n\n return model\n\n\n @classmethod\n def from_2d_model(cls, model_path, model_config):\n config_path = os.path.join(model_path, \"config.json\")\n if not os.path.isfile(config_path):\n raise RuntimeError(f\"{config_path} does not exist\")\n with open(config_path, \"r\") as f:\n config = json.load(f)\n\n config.pop(\"_class_name\")\n config.pop(\"_diffusers_version\")\n\n block_replacer = {\n \"CrossAttnDownBlock2D\": \"CrossAttnDownBlockPseudo3D\",\n \"DownBlock2D\": \"DownBlockPseudo3D\",\n \"UpBlock2D\": \"UpBlockPseudo3D\",\n \"CrossAttnUpBlock2D\": \"CrossAttnUpBlockPseudo3D\",\n }\n\n def convert_2d_to_3d_block(block):\n return block_replacer[block] if block in block_replacer else block\n\n config[\"down_block_types\"] = [\n convert_2d_to_3d_block(block) for block in config[\"down_block_types\"]\n ]\n \n if model_config is not None:\n config.update(model_config)\n\n model = cls(**config)\n\n state_dict_path_condidates = glob.glob(os.path.join(model_path, \"*.bin\"))\n if state_dict_path_condidates:\n state_dict = torch.load(state_dict_path_condidates[0], map_location=\"cpu\")\n model.load_2d_state_dict(state_dict=state_dict)\n\n return model\n\n def load_2d_state_dict(self, state_dict, **kwargs):\n state_dict_3d = self.state_dict()\n\n for k, v in state_dict.items():\n if k not in state_dict_3d:\n raise KeyError(f\"2d state_dict key {k} does not exist in 3d model\")\n elif v.shape != state_dict_3d[k].shape:\n raise ValueError(f\"state_dict shape mismatch, 2d {v.shape}, 3d {state_dict_3d[k].shape}\")\n\n for k, v in state_dict_3d.items():\n if \"_temporal\" in k:\n continue\n if k not in state_dict:\n raise KeyError(f\"3d state_dict key {k} does not exist in 2d model\")\n\n state_dict_3d.update(state_dict)\n self.load_state_dict(state_dict_3d, **kwargs)" }, { "identifier": "ImageSequenceDataset", "path": "video_diffusion/data/dataset.py", "snippet": "class ImageSequenceDataset(Dataset):\n def __init__(\n self,\n path: str,\n prompt_ids: torch.Tensor,\n prompt: str,\n start_sample_frame: int=0,\n n_sample_frame: int = 8,\n sampling_rate: int = 1,\n stride: int = -1, # only used during tuning to sample a long video\n image_mode: str = \"RGB\",\n image_size: int = 512,\n crop: str = \"center\",\n \n class_data_root: str = None,\n class_prompt_ids: torch.Tensor = None,\n \n offset: dict = {\n \"left\": 0,\n \"right\": 0,\n \"top\": 0,\n \"bottom\": 0\n },\n **args\n \n ):\n self.path = path\n self.images = self.get_image_list(path)\n self.n_images = len(self.images)\n self.offset = offset\n self.start_sample_frame = start_sample_frame\n if n_sample_frame < 0:\n n_sample_frame = len(self.images) \n self.n_sample_frame = n_sample_frame\n # local sampling rate from the video\n self.sampling_rate = sampling_rate\n\n self.sequence_length = (n_sample_frame - 1) * sampling_rate + 1\n if self.n_images < self.sequence_length:\n raise ValueError(f\"self.n_images {self.n_images } < self.sequence_length {self.sequence_length}: Required number of frames {self.sequence_length} larger than total frames in the dataset {self.n_images }\")\n \n # During tuning if video is too long, we sample the long video every self.stride globally\n self.stride = stride if stride > 0 else (self.n_images+1)\n self.video_len = (self.n_images - self.sequence_length) // self.stride + 1\n\n self.image_mode = image_mode\n self.image_size = image_size\n crop_methods = {\n \"center\": center_crop,\n \"random\": random_crop,\n }\n if crop not in crop_methods:\n raise ValueError\n self.crop = crop_methods[crop]\n\n self.prompt = prompt\n self.prompt_ids = prompt_ids\n # Negative prompt for regularization to avoid overfitting during one-shot tuning\n if class_data_root is not None:\n self.class_data_root = Path(class_data_root)\n self.class_images_path = sorted(list(self.class_data_root.iterdir()))\n self.num_class_images = len(self.class_images_path)\n self.class_prompt_ids = class_prompt_ids\n \n \n def __len__(self):\n max_len = (self.n_images - self.sequence_length) // self.stride + 1\n \n if hasattr(self, 'num_class_images'):\n max_len = max(max_len, self.num_class_images)\n \n return max_len\n\n def __getitem__(self, index):\n return_batch = {}\n frame_indices = self.get_frame_indices(index%self.video_len)\n frames = [self.load_frame(i) for i in frame_indices]\n frames = self.transform(frames)\n\n return_batch.update(\n {\n \"images\": frames,\n \"prompt_ids\": self.prompt_ids,\n }\n )\n\n if hasattr(self, 'class_data_root'):\n class_index = index % (self.num_class_images - self.n_sample_frame)\n class_indices = self.get_class_indices(class_index) \n frames = [self.load_class_frame(i) for i in class_indices]\n return_batch[\"class_images\"] = self.tensorize_frames(frames)\n return_batch[\"class_prompt_ids\"] = self.class_prompt_ids\n return return_batch\n \n def transform(self, frames):\n frames = self.tensorize_frames(frames)\n frames = offset_crop(frames, **self.offset)\n frames = short_size_scale(frames, size=self.image_size)\n frames = self.crop(frames, height=self.image_size, width=self.image_size)\n return frames\n\n @staticmethod\n def tensorize_frames(frames):\n frames = rearrange(np.stack(frames), \"f h w c -> c f h w\")\n return torch.from_numpy(frames).div(255) * 2 - 1\n\n def load_frame(self, index):\n image_path = os.path.join(self.path, self.images[index])\n return Image.open(image_path).convert(self.image_mode)\n\n def load_class_frame(self, index):\n image_path = self.class_images_path[index]\n return Image.open(image_path).convert(self.image_mode)\n\n def get_frame_indices(self, index):\n if self.start_sample_frame is not None:\n frame_start = self.start_sample_frame + self.stride * index\n else:\n frame_start = self.stride * index\n return (frame_start + i * self.sampling_rate for i in range(self.n_sample_frame))\n\n def get_class_indices(self, index):\n frame_start = index\n return (frame_start + i for i in range(self.n_sample_frame))\n\n @staticmethod\n def get_image_list(path):\n images = []\n for file in sorted(os.listdir(path)):\n if file.endswith(IMAGE_EXTENSION):\n images.append(file)\n return images" }, { "identifier": "get_time_string", "path": "video_diffusion/common/util.py", "snippet": "def get_time_string() -> str:\n x = datetime.datetime.now()\n return f\"{(x.year - 2000):02d}{x.month:02d}{x.day:02d}-{x.hour:02d}{x.minute:02d}{x.second:02d}\"" }, { "identifier": "get_function_args", "path": "video_diffusion/common/util.py", "snippet": "def get_function_args() -> Dict:\n frame = sys._getframe(1)\n args, _, _, values = inspect.getargvalues(frame)\n args_dict = copy.deepcopy({arg: values[arg] for arg in args})\n\n return args_dict" }, { "identifier": "get_logger_config_path", "path": "video_diffusion/common/logger.py", "snippet": "def get_logger_config_path(logdir):\n # accelerate handles the logger in multiprocessing\n logger = get_logger(__name__)\n logging.basicConfig(\n level=logging.INFO, \n format='%(asctime)s:%(levelname)s : %(message)s', \n datefmt='%a, %d %b %Y %H:%M:%S', \n filename=os.path.join(logdir, 'log.log'),\n filemode='w')\n chlr = logging.StreamHandler()\n chlr.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s : %(message)s'))\n logger.logger.addHandler(chlr)\n return logger" }, { "identifier": "log_train_samples", "path": "video_diffusion/common/image_util.py", "snippet": "def log_train_samples(\n train_dataloader,\n save_path,\n num_batch: int = 4,\n):\n train_samples = []\n for idx, batch in enumerate(train_dataloader):\n if idx >= num_batch:\n break\n train_samples.append(batch[\"images\"])\n\n train_samples = torch.cat(train_samples).numpy()\n train_samples = rearrange(train_samples, \"b c f h w -> b f h w c\")\n train_samples = (train_samples * 0.5 + 0.5).clip(0, 1)\n train_samples = numpy_batch_seq_to_pil(train_samples)\n train_samples = [make_grid(images, cols=int(np.ceil(np.sqrt(len(train_samples))))) for images in zip(*train_samples)]\n # save_images_as_gif(train_samples, save_path)\n save_gif_mp4_folder_type(train_samples, save_path)" }, { "identifier": "instantiate_from_config", "path": "video_diffusion/common/instantiate_from_config.py", "snippet": "def instantiate_from_config(config:dict, **args_from_code):\n \"\"\"Util funciton to decompose differenct modules using config\n\n Args:\n config (dict): with key of \"target\" and \"params\", better from yaml\n static \n args_from_code: additional con\n\n\n Returns:\n a validation/training pipeline, a module\n \"\"\"\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()), **args_from_code)" }, { "identifier": "P2pSampleLogger", "path": "video_diffusion/pipelines/p2p_validation_loop_controlnet.py", "snippet": "class P2pSampleLogger:\n def __init__(\n self,\n editing_prompts: List[str],\n clip_length: int,\n logdir: str,\n subdir: str = \"sample\",\n num_samples_per_prompt: int = 1,\n sample_seeds: List[int] = None,\n num_inference_steps: int = 20,\n guidance_scale: float = 7,\n strength: float = None,\n annotate: bool = False,\n annotate_size: int = 15,\n use_make_grid: bool = True,\n grid_column_size: int = 2,\n prompt2prompt_edit: bool=False,\n p2p_config: dict = None,\n use_inversion_attention: bool = True,\n source_prompt: str = None,\n traverse_p2p_config: bool = False,\n **args\n ) -> None:\n self.editing_prompts = editing_prompts\n self.clip_length = clip_length\n self.guidance_scale = guidance_scale\n self.num_inference_steps = num_inference_steps\n self.strength = strength\n\n if sample_seeds is None:\n max_num_samples_per_prompt = int(1e5)\n if num_samples_per_prompt > max_num_samples_per_prompt:\n raise ValueError\n sample_seeds = torch.randint(0, max_num_samples_per_prompt, (num_samples_per_prompt,))\n sample_seeds = sorted(sample_seeds.numpy().tolist())\n self.sample_seeds = sample_seeds\n\n self.logdir = os.path.join(logdir, subdir)\n os.makedirs(self.logdir)\n\n self.annotate = annotate\n self.annotate_size = annotate_size\n self.make_grid = use_make_grid\n self.grid_column_size = grid_column_size\n self.prompt2prompt_edit = prompt2prompt_edit\n self.p2p_config = p2p_config\n self.use_inversion_attention = use_inversion_attention\n self.source_prompt = source_prompt\n self.traverse_p2p_config =traverse_p2p_config\n\n def log_sample_images(\n self, pipeline: DiffusionPipeline,\n device: torch.device, step: int,\n image: Union[torch.FloatTensor, PIL.Image.Image] = None,\n control_image: torch.FloatTensor = None,\n latents: torch.FloatTensor = None,\n mask:torch.FloatTensor = None,\n editing_type:str = \"attribute\",\n uncond_embeddings_list: List[torch.FloatTensor] = None,\n save_dir = None,\n duration = 100,\n fps = 10,\n use_interpolater = True\n ):\n torch.cuda.empty_cache()\n samples_all = []\n attention_all = []\n # handle input image\n if image is not None:\n input_pil_images = pipeline.numpy_to_pil(tensor_to_numpy(image))[0]\n if self.annotate :\n samples_all.append([\n annotate_image(image, \"input sequence\", font_size=self.annotate_size) for image in input_pil_images\n ])\n else:\n samples_all.append(input_pil_images)\n if isinstance(self.editing_prompts,str):\n self.editing_prompts = [self.editing_prompts]\n for idx, prompt in enumerate(tqdm(self.editing_prompts, desc=\"Generating sample images\")):\n # if self.prompt2prompt_edit:\n # if self.traverse_p2p_config:\n # p2p_config_now = copy.deepcopy(self.p2p_config[idx])\n # else:\n # p2p_config_now = copy.deepcopy(self.p2p_config[idx])\n\n # if idx == 0 and not self.use_inversion_attention:\n # edit_type = 'save'\n # p2p_config_now.update({'save_self_attention': True})\n # print('Reflash the attention map in pipeline')\n\n # else:\n # edit_type = 'swap'\n # p2p_config_now.update({'save_self_attention': False})\n\n # p2p_config_now.update({'use_inversion_attention': self.use_inversion_attention})\n # else:\n # edit_type = None\n\n input_prompt = prompt\n\n # generator = torch.Generator(device=device)\n # generator.manual_seed(seed)\n generator = None\n sequence = []\n window = 8\n window = min(window,self.clip_length)\n start_frame = 0\n end_frame = window\n patch_index = 0\n while start_frame < self.clip_length:\n torch.cuda.empty_cache()\n if patch_index == 0:\n sequence_return = pipeline(\n prompt=input_prompt,\n source_prompt = self.editing_prompts[0] if self.source_prompt is None else self.source_prompt,\n # edit_type = edit_type,\n image=image[[0] + [0] + list(range(start_frame,min(self.clip_length,end_frame))),], # torch.Size([8, 3, 512, 512])\n strength=self.strength,\n generator=generator,\n # window = 1,\n num_inference_steps=self.num_inference_steps,\n guidance_scale=self.guidance_scale,\n num_images_per_prompt=1,\n # used in null inversion\n editing_type = editing_type,\n latents = [timestep_latent[:, :,[0] + [0] + list(range(start_frame,min(self.clip_length,end_frame))), :, :] for timestep_latent in latents],\n mask = mask[:,:, [0] + [0] + list(range(start_frame, min(self.clip_length,end_frame))),] if mask is not None else None,\n # latents = [timestep_latent[:, :,list(range(start_frame,min(self.clip_length,end_frame))), :, :] for timestep_latent in latents],\n # mask = mask[:,:, list(range(start_frame, min(self.clip_length,end_frame))),] if mask is not None else None,\n uncond_embeddings_list = uncond_embeddings_list,\n save_path = save_dir,\n # **p2p_config_now,\n )\n else:\n sequence_return = pipeline(\n prompt=input_prompt,\n reference_global_latents = reference_global_latents,\n reference_latents = reference_latents,\n source_prompt = self.editing_prompts[0] if self.source_prompt is None else self.source_prompt,\n # edit_type = edit_type,\n image=image[[0] + list(range(start_frame - 1,min(self.clip_length,end_frame))),], # torch.Size([8, 3, 512, 512])\n strength=self.strength,\n generator=generator,\n # window = window,\n num_inference_steps=self.num_inference_steps,\n guidance_scale=self.guidance_scale,\n num_images_per_prompt=1,\n # used in null inversion\n editing_type = editing_type,\n latents = [timestep_latent[:, :,[0] + list(range(start_frame-1,min(self.clip_length,end_frame))), :, :] for timestep_latent in latents],\n mask = mask[:,:, [0] + list(range(start_frame-1, min(self.clip_length,end_frame))),] if mask is not None else None,\n # latents = [timestep_latent[:, :,list(range(start_frame,min(self.clip_length,end_frame))), :, :] for timestep_latent in latents],\n # mask = mask[:,:, list(range(start_frame, min(self.clip_length,end_frame))),] if mask is not None else None,\n uncond_embeddings_list = uncond_embeddings_list,\n save_path = save_dir,\n # **p2p_config_now,\n )\n start_frame = end_frame\n end_frame = end_frame + window\n if patch_index == 0:\n reference_global_latents = sequence_return['reference_global_latents']\n reference_latents = sequence_return['reference_latents']\n patch_index = patch_index + 1\n # if self.prompt2prompt_edit:\n # sequence_temp = sequence_return['sdimage_output'].images[0]\n # # attention_output = sequence_return['attention_output']\n # else:\n # sequence_temp = sequence_return.images[0]\n sequence_temp = sequence_return['sdimage_output'].images[0]\n sequence = sequence + sequence_temp\n torch.cuda.empty_cache()\n # sequence = torch.cat(sequence,dim = 2)\n\n if self.annotate:\n images = [\n annotate_image(image, prompt, font_size=self.annotate_size) for image in sequence\n ]\n else:\n images = sequence\n control_images = []\n for i in range(control_image.shape[2]):\n control_images.append(Image.fromarray((control_image[0,:,i]*255).cpu().numpy().transpose(1,2,0).astype(np.uint8)))\n #smoother start\n if use_interpolater:\n for i in range(len(images)):\n images[i] = np.array(images[i]).transpose(2,0,1)[None:]/255\n frames = torch.from_numpy(np.stack(images, axis= 0)).cuda()\n f, C, H, W = frames.shape\n ph = ((H - 1) // 32 + 1) * 32\n pw = ((W - 1) // 32 + 1) * 32\n padding = (0, pw - W, 0, ph - H)\n frames = F.pad(frames,padding)\n smoother = Model()\n smoother.load_model('RIFEModel', -1)\n print('using smoother')\n with torch.no_grad():\n for i in range(f - 2):\n img0 = frames[i:i+1].float()\n img1 = frames[i+2:i+3].float()\n mid = smoother.inference(img0,img1)\n mid_padded = F.pad(mid,padding)\n frames[i+1:i+2,] = (frames[i+1:i+2,] + mid_padded[None:])/2\n torch.cuda.empty_cache()\n images = []\n for i in range(len(frames)):\n images.append(Image.fromarray((frames[i] * 255).cpu().numpy().astype(np.uint8).transpose(1,2,0)))\n # smoother end\n if self.make_grid:\n samples_all.append(control_images)\n samples_all.append(images)\n # if self.prompt2prompt_edit:\n # if attention_output is not None:\n # attention_all.append(attention_output)\n\n save_path = os.path.join(self.logdir, f\"step_{step}_{idx}.gif\")\n save_gif_mp4_folder_type(images, save_path,duration = duration,fps = fps)\n\n # if self.prompt2prompt_edit:\n\n # if attention_output is not None:\n # save_gif_mp4_folder_type(attention_output, save_path.replace('.gif', 'atten.gif'),duration = duration,fps = fps)\n\n if self.make_grid:\n samples_all = [make_grid(images, cols=int(len(samples_all))) for images in zip(*samples_all)]\n save_path = os.path.join(self.logdir, f\"step_{step}.gif\")\n save_gif_mp4_folder_type(samples_all, save_path,duration = duration,fps = fps)\n if self.prompt2prompt_edit:\n if len(attention_all) > 0 :\n attention_all = [make_grid(images, cols=1) for images in zip(*attention_all)]\n if len(attention_all) > 0:\n save_gif_mp4_folder_type(attention_all, save_path.replace('.gif', 'atten.gif'),duration = duration,fps = fps)\n return samples_all" }, { "identifier": "get_control", "path": "annotator/util.py", "snippet": "def get_control(type):\n if type == 'canny':\n from .canny import CannyDetector\n apply_control = CannyDetector()\n elif type == 'openpose':\n from .openpose import OpenposeDetector\n apply_control = OpenposeDetector()\n elif type == 'depth' or type == 'normal':\n from .midas import MidasDetector\n apply_control = MidasDetector()\n elif type == 'hed':\n from .hed import HEDdetector\n apply_control = HEDdetector()\n elif type == 'scribble':\n apply_control = None\n elif type == 'seg':\n from .uniformer import UniformerDetector\n apply_control = UniformerDetector()\n elif type == 'mlsd':\n from .mlsd import MLSDdetector\n apply_control = MLSDdetector()\n else:\n raise TypeError(type)\n return apply_control" }, { "identifier": "DDIMInterpolationScheduler", "path": "video_diffusion/pipelines/DDIMInterpolationScheduler.py", "snippet": "class DDIMInterpolationScheduler(DDIMScheduler):\n \"\"\"\n Denoising diffusion implicit models is a scheduler that extends the denoising procedure introduced in denoising\n diffusion probabilistic models (DDPMs) with non-Markovian guidance.\n\n [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__`\n function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`.\n [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and\n [`~SchedulerMixin.from_pretrained`] functions.\n\n For more details, see the original paper: https://arxiv.org/abs/2010.02502\n\n Args:\n num_train_timesteps (`int`): number of diffusion steps used to train the model.\n beta_start (`float`): the starting `beta` value of inference.\n beta_end (`float`): the final `beta` value.\n beta_schedule (`str`):\n the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from\n `linear`, `scaled_linear`, or `squaredcos_cap_v2`.\n trained_betas (`np.ndarray`, optional):\n option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc.\n clip_sample (`bool`, default `True`):\n option to clip predicted sample between -1 and 1 for numerical stability.\n set_alpha_to_one (`bool`, default `True`):\n each diffusion step uses the value of alphas product at that step and at the previous one. For the final\n step there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`,\n otherwise it uses the value of alpha at step 0.\n steps_offset (`int`, default `0`):\n an offset added to the inference steps. You can use a combination of `offset=1` and\n `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in\n stable diffusion.\n prediction_type (`str`, default `epsilon`, optional):\n prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion\n process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4\n https://imagen.research.google/video/paper.pdf)\n \"\"\"\n\n _compatibles = _COMPATIBLE_STABLE_DIFFUSION_SCHEDULERS.copy()\n _deprecated_kwargs = [\"predict_epsilon\"]\n order = 1\n\n def set_model(self,vae,interpolater):\n self.interpolater = interpolater\n self.vae = vae\n \n \n def decode_latents(self, latents):\n is_video = (latents.dim() == 5)\n b = latents.shape[0]\n latents = 1 / 0.18215 * latents\n \n if is_video:\n latents = rearrange(latents, \"b c f h w -> (b f) c h w\") # torch.Size([70, 4, 64, 64])\n\n latents_split = torch.split(latents, 16, dim=0)\n image = torch.cat([self.vae.decode(l).sample for l in latents_split], dim=0)\n \n # image_full = self.vae.decode(latents).sample\n # RuntimeError: upsample_nearest_nhwc only supports output tensors with less than INT_MAX elements\n # Pytorch upsample alogrithm not work for batch size 32 -> 64 \n image = (image / 2 + 0.5).clamp(0, 1)\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16\n\n # image = image.cpu().float().numpy()\n # if is_video:\n # image = rearrange(image, \"(b f) c h w -> b f h w c\", b=b)\n # else:\n # image = rearrange(image, \"b c h w -> b h w c\", b=b)\n return image\n def encode_latents(self,images,generator = None):\n if len(images.shape) == 4:\n images = images[None:]\n images = ((images - 0.5) * 2 ) \n latents = self.vae.encode(images).latent_dist.sample(generator)\n latents = latents * 0.18215\n return latents\n\n def step(\n self,\n model_output: torch.FloatTensor,\n timestep: int,\n sample: torch.FloatTensor,\n eta: float = 0.0,\n use_clipped_model_output: bool = False,\n generator=None,\n variance_noise: Optional[torch.FloatTensor] = None,\n return_dict: bool = True,\n ) -> Union[DDIMSchedulerOutput, Tuple]:\n \"\"\"\n Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion\n process from the learned model outputs (most often the predicted noise).\n\n Args:\n model_output (`torch.FloatTensor`): direct output from learned diffusion model.\n timestep (`int`): current discrete timestep in the diffusion chain.\n sample (`torch.FloatTensor`):\n current instance of sample being created by diffusion process.\n eta (`float`): weight of noise for added noise in diffusion step.\n use_clipped_model_output (`bool`): if `True`, compute \"corrected\" `model_output` from the clipped\n predicted original sample. Necessary because predicted original sample is clipped to [-1, 1] when\n `self.config.clip_sample` is `True`. If no clipping has happened, \"corrected\" `model_output` would\n coincide with the one provided as input and `use_clipped_model_output` will have not effect.\n generator: random number generator.\n variance_noise (`torch.FloatTensor`): instead of generating noise for the variance using `generator`, we\n can directly provide the noise for the variance itself. This is useful for methods such as\n CycleDiffusion. (https://arxiv.org/abs/2210.05559)\n return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class\n\n Returns:\n [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`:\n [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n\n \"\"\"\n if self.num_inference_steps is None:\n raise ValueError(\n \"Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler\"\n )\n\n # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf\n # Ideally, read DDIM paper in-detail understanding\n\n # Notation (<variable name> -> <name in paper>\n # - pred_noise_t -> e_theta(x_t, t)\n # - pred_original_sample -> f_theta(x_t, t) or x_0\n # - std_dev_t -> sigma_t\n # - eta -> η\n # - pred_sample_direction -> \"direction pointing to x_t\"\n # - pred_prev_sample -> \"x_t-1\"\n\n # 1. get previous step value (=t-1)\n prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps\n\n # 2. compute alphas, betas\n alpha_prod_t = self.alphas_cumprod[timestep]\n alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod\n\n beta_prod_t = 1 - alpha_prod_t\n\n # 3. compute predicted original sample from predicted noise also called\n # \"predicted x_0\" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf\n if self.config.prediction_type == \"epsilon\":\n pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)\n elif self.config.prediction_type == \"sample\":\n pred_original_sample = model_output\n elif self.config.prediction_type == \"v_prediction\":\n pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output\n # predict V\n model_output = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample\n else:\n raise ValueError(\n f\"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or\"\n \" `v_prediction`\"\n )\n\n # # add a interpolater\n images = self.decode_latents(pred_original_sample)\n\n f , C, H, W = images.shape\n # images = torch.from_numpy(images).cuda()\n ph = ((H - 1) // 32 + 1) * 32\n pw = ((W - 1) // 32 + 1) * 32\n padding = (0, pw - W, 0, ph - H)\n images= F.pad(images,padding).float()\n for i in range(1,f-2):\n img0 = images[i:i+1]\n img1 = images[i+2:i+3] \n inference_img = self.interpolater.inference(img0,img1)\n images[i+1:i+2] = inference_img\n pred_original_sample = self.encode_latents(images.to(self.vae.dtype),generator)\n pred_original_sample = rearrange(pred_original_sample[None], 'b f c h w -> b c f h w') \n\n \n # 4. Clip \"predicted x_0\"\n if self.config.clip_sample:\n pred_original_sample = torch.clamp(pred_original_sample, -1, 1)\n\n # 5. compute variance: \"sigma_t(η)\" -> see formula (16)\n # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)\n variance = self._get_variance(timestep, prev_timestep)\n std_dev_t = eta * variance ** (0.5)\n\n if use_clipped_model_output:\n # the model_output is always re-derived from the clipped x_0 in Glide\n model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)\n\n # 6. compute \"direction pointing to x_t\" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf\n pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output\n\n # 7. compute x_t without \"random noise\" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf\n prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction\n\n if eta > 0:\n # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072\n device = model_output.device\n if variance_noise is not None and generator is not None:\n raise ValueError(\n \"Cannot pass both generator and variance_noise. Please make sure that either `generator` or\"\n \" `variance_noise` stays `None`.\"\n )\n\n if variance_noise is None:\n if device.type == \"mps\":\n # randn does not work reproducibly on mps\n variance_noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator)\n variance_noise = variance_noise.to(device)\n else:\n variance_noise = torch.randn(\n model_output.shape, generator=generator, device=device, dtype=model_output.dtype\n )\n variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * variance_noise\n\n prev_sample = prev_sample + variance\n\n if not return_dict:\n return (prev_sample,)\n\n return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)" }, { "identifier": "Model", "path": "RIFEModel/RIFE_HDv3.py", "snippet": "class Model:\n def __init__(self, local_rank=-1):\n self.flownet = IFNet()\n self.device()\n self.optimG = AdamW(self.flownet.parameters(), lr=1e-6, weight_decay=1e-4)\n self.epe = EPE()\n # self.vgg = VGGPerceptualLoss().to(device)\n self.sobel = SOBEL()\n if local_rank != -1:\n self.flownet = DDP(self.flownet, device_ids=[local_rank], output_device=local_rank)\n\n def train(self):\n self.flownet.train()\n\n def eval(self):\n self.flownet.eval()\n\n def device(self):\n self.flownet.to(device)\n\n def load_model(self, path, rank=0):\n def convert(param):\n if rank == -1:\n return {\n k.replace(\"module.\", \"\"): v\n for k, v in param.items()\n if \"module.\" in k\n }\n else:\n return param\n if rank <= 0:\n if torch.cuda.is_available():\n self.flownet.load_state_dict(convert(torch.load('{}/flownet.pkl'.format(path))))\n else:\n self.flownet.load_state_dict(convert(torch.load('{}/flownet.pkl'.format(path), map_location ='cpu')))\n \n def save_model(self, path, rank=0):\n if rank == 0:\n torch.save(self.flownet.state_dict(),'{}/flownet.pkl'.format(path))\n\n def inference(self, img0, img1, scale=1.0):\n imgs = torch.cat((img0, img1), 1)\n scale_list = [4/scale, 2/scale, 1/scale]\n flow, mask, merged = self.flownet(imgs, scale_list)\n return merged[2]\n \n def update(self, imgs, gt, learning_rate=0, mul=1, training=True, flow_gt=None):\n for param_group in self.optimG.param_groups:\n param_group['lr'] = learning_rate\n img0 = imgs[:, :3]\n img1 = imgs[:, 3:]\n if training:\n self.train()\n else:\n self.eval()\n scale = [4, 2, 1]\n flow, mask, merged = self.flownet(torch.cat((imgs, gt), 1), scale=scale, training=training)\n loss_l1 = (merged[2] - gt).abs().mean()\n loss_smooth = self.sobel(flow[2], flow[2]*0).mean()\n # loss_vgg = self.vgg(merged[2], gt)\n if training:\n self.optimG.zero_grad()\n loss_G = loss_cons + loss_smooth * 0.1\n loss_G.backward()\n self.optimG.step()\n else:\n flow_teacher = flow[2]\n return merged[2], {\n 'mask': mask,\n 'flow': flow[2][:, :2],\n 'loss_l1': loss_l1,\n 'loss_cons': loss_cons,\n 'loss_smooth': loss_smooth,\n }" } ]
import os import copy import click import re import numpy as np import torch import torch.utils.data import torch.utils.checkpoint import decord import shutil from glob import glob from typing import Optional,Dict from tqdm.auto import tqdm from omegaconf import OmegaConf from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import ( AutoencoderKL, DDIMScheduler, ) from diffusers.utils.import_utils import is_xformers_available from transformers import AutoTokenizer, CLIPTextModel from einops import rearrange from video_diffusion.models.unet_3d_condition import UNetPseudo3DConditionModel from video_diffusion.models.controlnet_3d_condition import ControlNetPseudo3DModel from video_diffusion.data.dataset import ImageSequenceDataset from video_diffusion.common.util import get_time_string, get_function_args from video_diffusion.common.logger import get_logger_config_path from video_diffusion.common.image_util import log_train_samples from video_diffusion.common.instantiate_from_config import instantiate_from_config from video_diffusion.pipelines.p2p_validation_loop_controlnet import P2pSampleLogger from annotator.util import get_control from video_diffusion.pipelines.DDIMInterpolationScheduler import DDIMInterpolationScheduler from RIFEModel.RIFE_HDv3 import Model
20,217
shutil.rmtree(video_frame_folder) os.makedirs(video_frame_folder,exist_ok=True) for i in range(video.shape[0]): frame = video[i] frame_path = os.path.join(video_frame_folder,f'frame-{i:04}.jpg') frame = Image.fromarray(frame.numpy().astype(np.uint8)) frame.save(frame_path) dataset_config.update({'path': video_frame_folder} ) time_string = get_time_string() if logdir is None: logdir = config.replace('config', 'result').replace('.yml', '').replace('.yaml', '') logdir += f"_{time_string}" accelerator = Accelerator( gradient_accumulation_steps=gradient_accumulation_steps, mixed_precision=mixed_precision, ) if accelerator.is_main_process: os.makedirs(logdir, exist_ok=True) OmegaConf.save(args, os.path.join(logdir, "config.yml")) logger = get_logger_config_path(logdir) if seed is not None: set_seed(seed) # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained( pretrained_model_path, subfolder="tokenizer", use_fast=False, ) # Load models and create wrapper for stable diffusion text_encoder = CLIPTextModel.from_pretrained( pretrained_model_path, subfolder="text_encoder", ) vae = AutoencoderKL.from_pretrained( pretrained_model_path, subfolder="vae", ) #加载unet报错 unet = UNetPseudo3DConditionModel.from_2d_model( os.path.join(pretrained_model_path, "unet"), model_config=model_config ) controlnet = ControlNetPseudo3DModel.from_2d_model( pretrained_controlnet_model_path, model_config=model_config ) if 'target' not in test_pipeline_config: test_pipeline_config['target'] = 'video_diffusion.pipelines.stable_diffusion.SpatioTemporalStableDiffusionControlPipeline' scheduler = DDIMScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) pipeline = instantiate_from_config( test_pipeline_config, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, control_type = control_type, editing_type = editing_config.editing_type, dilation_kernel = editing_config.dilation_kernel, disk_store=kwargs.get('disk_store', False) ) pipeline.scheduler.set_timesteps(editing_config['num_inference_steps']) if editing_config.use_interpolater: new_scheduler = DDIMInterpolationScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) interpolater = Model() interpolater.load_model('RIFEModel', -1) new_scheduler.set_model(vae,interpolater) print('using interpolater') pipeline.add_new_scheduler(new_scheduler) pipeline.new_scheduler.set_timesteps(editing_config['num_inference_steps']) pipeline.set_progress_bar_config(disable=True) # pipeline.print_pipeline(logger) if is_xformers_available(): try: pipeline.enable_xformers_memory_efficient_attention() except Exception as e: logger.warning( "Could not enable memory efficient attention. Make sure xformers is installed" f" correctly and a GPU is available: {e}" ) vae.requires_grad_(False) unet.requires_grad_(False) text_encoder.requires_grad_(False) prompt_ids = tokenizer( dataset_config["prompt"], truncation=True, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ).input_ids video_dataset = ImageSequenceDataset(**dataset_config, prompt_ids=prompt_ids) train_dataloader = torch.utils.data.DataLoader( video_dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=collate_fn, ) train_sample_save_path = os.path.join(logdir, "train_samples.gif")
decord.bridge.set_bridge('torch') # from video_diffusion.pipelines.p2p_validation_loop_controlnet_ablation import P2pSampleLogger # logger = get_logger(__name__) def collate_fn(examples): """Concat a batch of sampled image in dataloader """ batch = { "prompt_ids": torch.cat([example["prompt_ids"] for example in examples], dim=0), "images": torch.stack([example["images"] for example in examples]), } return batch def test( config: str, pretrained_model_path: str, control_type:str, pretrained_controlnet_model_path :str, dataset_config: Dict, logdir: str = None, editing_config: Optional[Dict] = None, test_pipeline_config: Optional[Dict] = None, gradient_accumulation_steps: int = 1, seed: Optional[int] = None, mixed_precision: Optional[str] = "fp16", batch_size: int = 1, model_config: dict={}, verbose: bool=True, **kwargs ): args = get_function_args() vr = decord.VideoReader(dataset_config.video_path) fps = vr.get_avg_fps() duration = len(vr) / fps print("There are {} frames in the video but we take {} frames".format(len(vr), dataset_config.n_sample_frame)) if dataset_config.n_sample_frame <= 50: duration = 100 fps = 10 sample_index = list(range(0,len(vr), 1))[:dataset_config.n_sample_frame] video = vr.get_batch(sample_index) video_name_match = re.search(r"(.*)/(.*).mp4", dataset_config.video_path) video_name = video_name_match.group(2) video_frame_folder = os.path.join('data',video_name) if os.path.exists(video_frame_folder): shutil.rmtree(video_frame_folder) os.makedirs(video_frame_folder,exist_ok=True) for i in range(video.shape[0]): frame = video[i] frame_path = os.path.join(video_frame_folder,f'frame-{i:04}.jpg') frame = Image.fromarray(frame.numpy().astype(np.uint8)) frame.save(frame_path) dataset_config.update({'path': video_frame_folder} ) time_string = get_time_string() if logdir is None: logdir = config.replace('config', 'result').replace('.yml', '').replace('.yaml', '') logdir += f"_{time_string}" accelerator = Accelerator( gradient_accumulation_steps=gradient_accumulation_steps, mixed_precision=mixed_precision, ) if accelerator.is_main_process: os.makedirs(logdir, exist_ok=True) OmegaConf.save(args, os.path.join(logdir, "config.yml")) logger = get_logger_config_path(logdir) if seed is not None: set_seed(seed) # Load the tokenizer tokenizer = AutoTokenizer.from_pretrained( pretrained_model_path, subfolder="tokenizer", use_fast=False, ) # Load models and create wrapper for stable diffusion text_encoder = CLIPTextModel.from_pretrained( pretrained_model_path, subfolder="text_encoder", ) vae = AutoencoderKL.from_pretrained( pretrained_model_path, subfolder="vae", ) #加载unet报错 unet = UNetPseudo3DConditionModel.from_2d_model( os.path.join(pretrained_model_path, "unet"), model_config=model_config ) controlnet = ControlNetPseudo3DModel.from_2d_model( pretrained_controlnet_model_path, model_config=model_config ) if 'target' not in test_pipeline_config: test_pipeline_config['target'] = 'video_diffusion.pipelines.stable_diffusion.SpatioTemporalStableDiffusionControlPipeline' scheduler = DDIMScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) pipeline = instantiate_from_config( test_pipeline_config, vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, control_type = control_type, editing_type = editing_config.editing_type, dilation_kernel = editing_config.dilation_kernel, disk_store=kwargs.get('disk_store', False) ) pipeline.scheduler.set_timesteps(editing_config['num_inference_steps']) if editing_config.use_interpolater: new_scheduler = DDIMInterpolationScheduler.from_pretrained( pretrained_model_path, subfolder="scheduler", ) interpolater = Model() interpolater.load_model('RIFEModel', -1) new_scheduler.set_model(vae,interpolater) print('using interpolater') pipeline.add_new_scheduler(new_scheduler) pipeline.new_scheduler.set_timesteps(editing_config['num_inference_steps']) pipeline.set_progress_bar_config(disable=True) # pipeline.print_pipeline(logger) if is_xformers_available(): try: pipeline.enable_xformers_memory_efficient_attention() except Exception as e: logger.warning( "Could not enable memory efficient attention. Make sure xformers is installed" f" correctly and a GPU is available: {e}" ) vae.requires_grad_(False) unet.requires_grad_(False) text_encoder.requires_grad_(False) prompt_ids = tokenizer( dataset_config["prompt"], truncation=True, padding="max_length", max_length=tokenizer.model_max_length, return_tensors="pt", ).input_ids video_dataset = ImageSequenceDataset(**dataset_config, prompt_ids=prompt_ids) train_dataloader = torch.utils.data.DataLoader( video_dataset, batch_size=batch_size, shuffle=True, num_workers=4, collate_fn=collate_fn, ) train_sample_save_path = os.path.join(logdir, "train_samples.gif")
log_train_samples(save_path=train_sample_save_path, train_dataloader=train_dataloader)
6
2023-10-09 14:38:28+00:00
24k
ielab/llm-rankers
run.py
[ { "identifier": "SearchResult", "path": "rankers/rankers.py", "snippet": "class SearchResult:\n docid: str\n score: float\n text: str" }, { "identifier": "PointwiseLlmRanker", "path": "rankers/pointwise.py", "snippet": "class PointwiseLlmRanker(LlmRanker):\n\n def __init__(self, model_name_or_path, tokenizer_name_or_path, device, method=\"qlm\", batch_size=1, cache_dir=None):\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path,\n cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n\n self.device = device\n self.method = method\n self.batch_size = batch_size\n\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n if self.method == \"qlm\":\n prompt = \"Passage: {text}\\nPlease write a question based on this passage.\"\n data = [prompt.format(text=doc.text) for doc in ranking]\n dataset = Text2TextGenerationDataset(data, self.tokenizer)\n loader = DataLoader(\n dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n\n labels = self.tokenizer.encode(f\"<pad> {query}\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.llm.device).repeat(self.batch_size, 1)\n current_id = 0\n with torch.no_grad():\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_labels = labels if labels.shape[0] == len(batch_inputs['input_ids']) \\\n else labels[:len(batch_inputs['input_ids']), :] # last batch might be smaller\n self.total_prompt_tokens += batch_labels.shape[0] * batch_labels.shape[\n 1] # we count decoder inputs as part of prompt.\n\n batch_inputs = batch_inputs.to(self.llm.device)\n logits = self.llm(input_ids=batch_inputs['input_ids'],\n attention_mask=batch_inputs['attention_mask'],\n labels=batch_labels).logits\n\n loss_fct = torch.nn.CrossEntropyLoss(reduction=\"none\")\n scores = loss_fct(logits.view(-1, logits.size(-1)), batch_labels.view(-1))\n scores = -1 * scores.view(-1, batch_labels.size(-1)).sum(dim=1) # neg log prob\n for score in scores:\n ranking[current_id].score = score.item()\n current_id += 1\n\n elif self.method == \"yes_no\":\n prompt = \"Passage: {text}\\nQuery: {query}\\nDoes the passage answer the query? Answer 'Yes' or 'No'\"\n yes_id = self.tokenizer.encode(\"Yes\", add_special_tokens=False)[0]\n no_id = self.tokenizer.encode(\"No\", add_special_tokens=False)[0]\n data = [prompt.format(text=doc.text, query=query) for doc in ranking]\n dataset = Text2TextGenerationDataset(data, self.tokenizer)\n loader = DataLoader(\n dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n decoder_input_ids = torch.Tensor([self.tokenizer.pad_token_id]).to(self.llm.device, dtype=torch.long).repeat(self.batch_size, 1)\n current_id = 0\n with torch.no_grad():\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_inputs = batch_inputs.to(self.llm.device)\n\n batch_decoder_input_ids = decoder_input_ids if decoder_input_ids.shape[0] == len(batch_inputs['input_ids']) \\\n else decoder_input_ids[:len(batch_inputs['input_ids']), :] # last batch might be smaller\n\n self.total_prompt_tokens += batch_decoder_input_ids.shape[0] * batch_decoder_input_ids.shape[\n 1]\n\n logits = self.llm(input_ids=batch_inputs['input_ids'],\n attention_mask=batch_inputs['attention_mask'],\n decoder_input_ids=batch_decoder_input_ids).logits\n yes_scores = logits[:, :, yes_id]\n no_scores = logits[:, :, no_id]\n batch_scores = torch.cat((yes_scores, no_scores), dim=1)\n batch_scores = torch.nn.functional.softmax(batch_scores, dim=1)\n scores = batch_scores[:, 0]\n for score in scores:\n ranking[current_id].score = score.item()\n current_id += 1\n\n ranking = sorted(ranking, key=lambda x: x.score, reverse=True)\n return ranking\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" }, { "identifier": "MonoT5LlmRanker", "path": "rankers/pointwise.py", "snippet": "class MonoT5LlmRanker(PointwiseLlmRanker):\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n prompt = \"Query: {query} Document: {document} Relevant:\"\n data = [prompt.format(query=query, document=doc.text) for doc in ranking]\n dataset = Text2TextGenerationDataset(data, self.tokenizer)\n loader = DataLoader(\n dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n decoder_input_ids = torch.Tensor([self.llm.config.decoder_start_token_id]).to(self.llm.device, dtype=torch.long).repeat(\n self.batch_size, 1)\n current_id = 0\n with torch.no_grad():\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_inputs = batch_inputs.to(self.llm.device)\n\n batch_decoder_input_ids = decoder_input_ids if decoder_input_ids.shape[0] == len(\n batch_inputs['input_ids']) \\\n else decoder_input_ids[:len(batch_inputs['input_ids']), :] # last batch might be smaller\n\n self.total_prompt_tokens += batch_decoder_input_ids.shape[0] * batch_decoder_input_ids.shape[\n 1]\n\n logits = self.llm(input_ids=batch_inputs['input_ids'],\n attention_mask=batch_inputs['attention_mask'],\n decoder_input_ids=batch_decoder_input_ids).logits\n\n # 6136 and 1176 are the indexes of the tokens false and true in T5.\n batch_scores = logits[:, 0, [6136, 1176]]\n batch_scores = torch.nn.functional.softmax(batch_scores, dim=1)\n scores = batch_scores[:, 1]\n for score in scores:\n ranking[current_id].score = score.item()\n current_id += 1\n\n ranking = sorted(ranking, key=lambda x: x.score, reverse=True)\n return ranking" }, { "identifier": "SetwiseLlmRanker", "path": "rankers/setwise.py", "snippet": "class SetwiseLlmRanker(LlmRanker):\n CHARACTERS = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\",\n \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\", \"W\"] # \"Passage X\" and \"Passage Y\" will be tokenized into 3 tokens, so we dont use for now\n\n def __init__(self,\n model_name_or_path,\n tokenizer_name_or_path,\n device,\n num_child=3,\n k=10,\n scoring='generation',\n method=\"heapsort\",\n num_permutation=1,\n cache_dir=None):\n\n self.device = device\n self.num_child = num_child\n self.num_permutation = num_permutation\n self.k = k\n self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n if self.config.model_type == 't5':\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path,\n cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n self.decoder_input_ids = self.tokenizer.encode(\"<pad> Passage\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.device) if self.tokenizer else None\n\n test = []\n for i in range(len(self.CHARACTERS)):\n test.append(f'<pad> Passage {self.CHARACTERS[i]}')\n\n self.target_token_ids = self.tokenizer.batch_encode_plus([f'<pad> Passage {self.CHARACTERS[i]}'\n for i in range(len(self.CHARACTERS))],\n return_tensors=\"pt\",\n add_special_tokens=False,\n padding=True).input_ids[:, -1]\n elif self.config.model_type == 'llama':\n self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n self.tokenizer.use_default_system_prompt = False\n if 'vicuna' and 'v1.5' in model_name_or_path:\n self.tokenizer.chat_template = \"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\\\'s questions.' %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ system_message }}{% endif %}{% if message['role'] == 'user' %}{{ ' USER: ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' ASSISTANT:' }}{% endif %}\"\n self.llm = AutoModelForCausalLM.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir).eval()\n\n self.scoring = scoring\n self.method = method\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1 if self.num_permutation == 1 else self.num_permutation\n\n passages = \"\\n\\n\".join([f'Passage {self.CHARACTERS[i]}: \"{doc.text}\"' for i, doc in enumerate(docs)])\n input_text = f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage:'\n\n if self.scoring == 'generation':\n if self.config.model_type == 't5':\n\n if self.num_permutation == 1:\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n decoder_input_ids=self.decoder_input_ids,\n max_new_tokens=2)[0]\n\n self.total_completion_tokens += output_ids.shape[0]\n\n output = self.tokenizer.decode(output_ids,\n skip_special_tokens=True).strip()\n output = output[-1]\n else:\n id_passage = [(i, p) for i, p in enumerate(docs)]\n labels = [self.CHARACTERS[i] for i in range(len(docs))]\n batch_data = []\n for _ in range(self.num_permutation):\n batch_data.append([random.sample(id_passage, len(id_passage)),\n random.sample(labels, len(labels))])\n\n batch_ref = []\n input_text = []\n for batch in batch_data:\n ref = []\n passages = []\n characters = []\n for p, c in zip(batch[0], batch[1]):\n ref.append(p[0])\n passages.append(p[1].text)\n characters.append(c)\n batch_ref.append((ref, characters))\n passages = \"\\n\\n\".join([f'Passage {characters[i]}: \"{passages[i]}\"' for i in range(len(passages))])\n input_text.append(f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage:')\n\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1] * input_ids.shape[0]\n\n output_ids = self.llm.generate(input_ids,\n decoder_input_ids=self.decoder_input_ids.repeat(input_ids.shape[0], 1),\n max_new_tokens=2)\n output = self.tokenizer.batch_decode(output_ids[:, self.decoder_input_ids.shape[1]:],\n skip_special_tokens=True)\n\n # vote\n candidates = []\n for ref, result in zip(batch_ref, output):\n result = result.strip().upper()\n docids, characters = ref\n if len(result) != 1 or result not in characters:\n print(f\"Unexpected output: {result}\")\n continue\n win_doc = docids[characters.index(result)]\n candidates.append(win_doc)\n\n if len(candidates) == 0:\n print(f\"Unexpected voting: {output}\")\n output = \"Unexpected voting.\"\n else:\n # handle tie\n candidate_counts = Counter(candidates)\n max_count = max(candidate_counts.values())\n most_common_candidates = [candidate for candidate, count in candidate_counts.items() if\n count == max_count]\n if len(most_common_candidates) == 1:\n output = self.CHARACTERS[most_common_candidates[0]]\n else:\n output = self.CHARACTERS[random.choice(most_common_candidates)]\n\n elif self.config.model_type == 'llama':\n conversation = [{\"role\": \"user\", \"content\": input_text}]\n\n prompt = self.tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)\n prompt += \" Passage:\"\n\n input_ids = self.tokenizer(prompt, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n do_sample=False,\n temperature=0.0,\n top_p=None,\n max_new_tokens=1)[0]\n\n self.total_completion_tokens += output_ids.shape[0]\n\n output = self.tokenizer.decode(output_ids[input_ids.shape[1]:],\n skip_special_tokens=True).strip().upper()\n else:\n raise NotImplementedError\n\n elif self.scoring == 'likelihood':\n if self.config.model_type == 't5':\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n with torch.no_grad():\n logits = self.llm(input_ids=input_ids, decoder_input_ids=self.decoder_input_ids).logits[0][-1]\n distributions = torch.softmax(logits, dim=0)\n scores = distributions[self.target_token_ids[:len(docs)]]\n ranked = sorted(zip(self.CHARACTERS[:len(docs)], scores), key=lambda x: x[1], reverse=True)\n output = ranked[0][0]\n\n else:\n raise NotImplementedError\n\n if len(output) == 1 and output in self.CHARACTERS:\n pass\n else:\n print(f\"Unexpected output: {output}\")\n\n return output\n\n def heapify(self, arr, n, i, query):\n # Find largest among root and children\n if self.num_child * i + 1 < n: # if there are children\n docs = [arr[i]] + arr[self.num_child * i + 1: min((self.num_child * (i + 1) + 1), n)]\n inds = [i] + list(range(self.num_child * i + 1, min((self.num_child * (i + 1) + 1), n)))\n output = self.compare(query, docs)\n try:\n best_ind = self.CHARACTERS.index(output)\n except ValueError:\n best_ind = 0\n try:\n largest = inds[best_ind]\n except IndexError:\n largest = i\n # If root is not largest, swap with largest and continue heapifying\n if largest != i:\n arr[i], arr[largest] = arr[largest], arr[i]\n self.heapify(arr, n, largest, query)\n\n def heapSort(self, arr, query, k):\n n = len(arr)\n ranked = 0\n # Build max heap\n for i in range(n // self.num_child, -1, -1):\n self.heapify(arr, n, i, query)\n for i in range(n - 1, 0, -1):\n # Swap\n arr[i], arr[0] = arr[0], arr[i]\n ranked += 1\n if ranked == k:\n break\n # Heapify root element\n self.heapify(arr, i, 0, query)\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n original_ranking = copy.deepcopy(ranking)\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n if self.method == \"heapsort\":\n self.heapSort(ranking, query, self.k)\n ranking = list(reversed(ranking))\n\n # elif self.method == \"bubblesort\":\n # for i in range(k):\n # start_ind = len(ranking) - (self.num_child + 1)\n # end_ind = len(ranking)\n # while True:\n # if start_ind < i:\n # start_ind = i\n # output = self.compare(query, ranking[start_ind:end_ind])\n # try:\n # best_ind = self.CHARACTERS.index(output)\n # except ValueError:\n # best_ind = 0\n # if best_ind != 0:\n # ranking[start_ind], ranking[start_ind + best_ind] = ranking[start_ind + best_ind], ranking[start_ind]\n #\n # if start_ind == i:\n # break\n #\n # start_ind -= self.num_child\n # end_ind -= self.num_child\n elif self.method == \"bubblesort\":\n last_start = len(ranking) - (self.num_child + 1)\n\n for i in range(self.k):\n start_ind = last_start\n end_ind = last_start + (self.num_child + 1)\n is_change = False\n while True:\n if start_ind < i:\n start_ind = i\n output = self.compare(query, ranking[start_ind:end_ind])\n try:\n best_ind = self.CHARACTERS.index(output)\n except ValueError:\n best_ind = 0\n if best_ind != 0:\n ranking[start_ind], ranking[start_ind + best_ind] = ranking[start_ind + best_ind], ranking[start_ind]\n if not is_change:\n is_change = True\n if last_start != len(ranking) - (self.num_child + 1) \\\n and best_ind == len(ranking[start_ind:end_ind])-1:\n last_start += len(ranking[start_ind:end_ind])-1\n\n if start_ind == i:\n break\n\n if not is_change:\n last_start -= self.num_child\n\n start_ind -= self.num_child\n end_ind -= self.num_child\n\n else:\n raise NotImplementedError(f'Method {self.method} is not implemented.')\n\n results = []\n top_doc_ids = set()\n rank = 1\n\n for i, doc in enumerate(ranking[:self.k]):\n top_doc_ids.add(doc.docid)\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n for doc in original_ranking:\n if doc.docid not in top_doc_ids:\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n\n return results\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" }, { "identifier": "OpenAiSetwiseLlmRanker", "path": "rankers/setwise.py", "snippet": "class OpenAiSetwiseLlmRanker(SetwiseLlmRanker):\n def __init__(self, model_name_or_path, api_key, num_child=3, method='heapsort', k=10):\n self.llm = model_name_or_path\n self.tokenizer = tiktoken.encoding_for_model(model_name_or_path)\n self.num_child = num_child\n self.method = method\n self.k = k\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n self.system_prompt = \"You are RankGPT, an intelligent assistant specialized in selecting the most relevant passage from a pool of passages based on their relevance to the query.\"\n openai.api_key = api_key\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n passages = \"\\n\\n\".join([f'Passage {self.CHARACTERS[i]}: \"{doc.text}\"' for i, doc in enumerate(docs)])\n input_text = f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage.'\n\n while True:\n try:\n response = openai.ChatCompletion.create(\n model=self.llm,\n messages=[\n {\"role\": \"system\", \"content\": self.system_prompt},\n {\"role\": \"user\", \"content\": input_text},\n ],\n temperature=0.0,\n request_timeout=15\n )\n\n self.total_completion_tokens += int(response['usage']['completion_tokens'])\n self.total_prompt_tokens += int(response['usage']['prompt_tokens'])\n\n output = response['choices'][0]['message']['content']\n matches = re.findall(r\"(Passage [A-Z])\", output, re.MULTILINE)\n if matches:\n output = matches[0][8]\n elif output.strip() in self.CHARACTERS:\n pass\n else:\n print(f\"Unexpected output: {output}\")\n output = \"A\"\n return output\n\n except openai.error.APIError as e:\n # Handle API error here, e.g. retry or log\n print(f\"OpenAI API returned an API Error: {e}\")\n time.sleep(5)\n continue\n except openai.error.APIConnectionError as e:\n # Handle connection error here\n print(f\"Failed to connect to OpenAI API: {e}\")\n time.sleep(5)\n continue\n except openai.error.RateLimitError as e:\n # Handle rate limit error (we recommend using exponential backoff)\n print(f\"OpenAI API request exceeded rate limit: {e}\")\n time.sleep(5)\n continue\n except openai.error.InvalidRequestError as e:\n # Handle invalid request error\n print(f\"OpenAI API request was invalid: {e}\")\n raise e\n except openai.error.AuthenticationError as e:\n # Handle authentication error\n print(f\"OpenAI API request failed authentication: {e}\")\n raise e\n except openai.error.Timeout as e:\n # Handle timeout error\n print(f\"OpenAI API request timed out: {e}\")\n time.sleep(5)\n continue\n except openai.error.ServiceUnavailableError as e:\n # Handle service unavailable error\n print(f\"OpenAI API request failed with a service unavailable error: {e}\")\n time.sleep(5)\n continue\n except Exception as e:\n print(f\"Unknown error: {e}\")\n raise e\n\n def truncate(self, text, length):\n return self.tokenizer.decode(self.tokenizer.encode(text)[:length])" }, { "identifier": "PairwiseLlmRanker", "path": "rankers/pairwise.py", "snippet": "class PairwiseLlmRanker(LlmRanker):\n def __init__(self, model_name_or_path,\n tokenizer_name_or_path,\n device,\n method=\"allpair\",\n batch_size=2,\n k=10,\n cache_dir=None\n ):\n self.device = device\n self.method = method\n self.batch_size = batch_size\n self.k = k\n self.prompt = \"\"\"Given a query \"{query}\", which of the following two passages is more relevant to the query?\n\nPassage A: \"{doc1}\"\n\nPassage B: \"{doc2}\"\n\nOutput Passage A or Passage B:\"\"\"\n\n self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n if self.config.model_type == 't5':\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path, cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n self.decoder_input_ids = self.tokenizer.encode(\"<pad> Passage\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.llm.device)\n self.decoder_input_ids = self.decoder_input_ids.repeat(self.batch_size, 1)\n elif self.config.model_type == 'llama':\n self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n self.tokenizer.use_default_system_prompt = False\n if 'vicuna' and 'v1.5' in model_name_or_path:\n self.tokenizer.chat_template = \"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\\\'s questions.' %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ system_message }}{% endif %}{% if message['role'] == 'user' %}{{ ' USER: ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' ASSISTANT:' }}{% endif %}\"\n\n self.tokenizer.pad_token = \"[PAD]\"\n self.tokenizer.padding_side = \"left\"\n self.llm = AutoModelForCausalLM.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir).eval()\n else:\n raise NotImplementedError\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n doc1, doc2 = docs[0], docs[1]\n input_texts = [self.prompt.format(query=query, doc1=doc1, doc2=doc2),\n self.prompt.format(query=query, doc1=doc2, doc2=doc1)]\n if self.config.model_type == 't5':\n input_ids = self.tokenizer(input_texts,\n padding='longest',\n return_tensors=\"pt\").input_ids.to(self.llm.device)\n\n self.total_prompt_tokens += input_ids.shape[0] * input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n decoder_input_ids=self.decoder_input_ids,\n max_new_tokens=2)\n\n self.total_completion_tokens += output_ids.shape[0] * output_ids.shape[1]\n\n output = self.tokenizer.batch_decode(output_ids, skip_special_tokens=True)\n\n elif self.config.model_type == 'llama':\n conversation0 = [{\"role\": \"user\", \"content\": input_texts[0]}]\n conversation1 = [{\"role\": \"user\", \"content\": input_texts[1]}]\n\n prompt0 = self.tokenizer.apply_chat_template(conversation0, tokenize=False, add_generation_prompt=True)\n prompt0 += \" Passage:\"\n prompt1 = self.tokenizer.apply_chat_template(conversation1, tokenize=False, add_generation_prompt=True)\n prompt1 += \" Passage:\"\n\n input_ids = self.tokenizer([prompt0, prompt1], return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[0] * input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids,\n do_sample=False,\n temperature=0.0,\n top_p=None,\n max_new_tokens=1)\n\n self.total_completion_tokens += output_ids.shape[0] * output_ids.shape[1]\n\n output0 = self.tokenizer.decode(output_ids[0][input_ids.shape[1]:],\n skip_special_tokens=True).strip().upper()\n output1 = self.tokenizer.decode(output_ids[1][input_ids.shape[1]:],\n skip_special_tokens=True).strip().upper()\n return [f'Passage {output0}', f'Passage {output1}']\n else:\n raise NotImplementedError\n\n return output\n\n def heapify(self, arr, n, i):\n # Find largest among root and children\n largest = i\n l = 2 * i + 1\n r = 2 * i + 2\n if l < n and arr[l] > arr[i]:\n largest = l\n\n if r < n and arr[r] > arr[largest]:\n largest = r\n\n # If root is not largest, swap with largest and continue heapifying\n if largest != i:\n arr[i], arr[largest] = arr[largest], arr[i]\n self.heapify(arr, n, largest)\n\n def heapSort(self, arr, k):\n n = len(arr)\n ranked = 0\n # Build max heap\n for i in range(n // 2, -1, -1):\n self.heapify(arr, n, i)\n for i in range(n - 1, 0, -1):\n # Swap\n arr[i], arr[0] = arr[0], arr[i]\n ranked += 1\n if ranked == k:\n break\n # Heapify root element\n self.heapify(arr, i, 0)\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n original_ranking = copy.deepcopy(ranking)\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n if self.method == \"allpair\":\n doc_pairs = list(combinations(ranking, 2))\n allpairs = []\n for doc1, doc2 in tqdm(doc_pairs):\n allpairs.append(self.prompt.format(query=query, doc1=doc1.text, doc2=doc2.text))\n allpairs.append(self.prompt.format(query=query, doc1=doc2.text, doc2=doc1.text))\n\n allpairs_dataset = Text2TextGenerationDataset(allpairs, self.tokenizer)\n\n loader = DataLoader(\n allpairs_dataset,\n batch_size=self.batch_size,\n collate_fn=DataCollatorWithPadding(\n self.tokenizer,\n max_length=512,\n padding='longest',\n ),\n shuffle=False,\n drop_last=False,\n num_workers=4\n )\n\n outputs = []\n for batch_inputs in tqdm(loader):\n self.total_compare += 1\n self.total_prompt_tokens += batch_inputs['input_ids'].shape[0] * batch_inputs['input_ids'].shape[1]\n\n batch_outputs = self.llm.generate(batch_inputs['input_ids'].to(self.llm.device),\n decoder_input_ids=self.decoder_input_ids\n if self.decoder_input_ids.shape[0] == len(batch_inputs['input_ids'])\n else self.decoder_input_ids[:len(batch_inputs['input_ids']), :], # last batch might be smaller\n max_new_tokens=2)\n self.total_completion_tokens += batch_outputs.shape[0] * batch_outputs.shape[1]\n outputs.extend(batch_outputs.cpu().numpy())\n\n outputs = self.tokenizer.batch_decode(outputs, skip_special_tokens=True)\n scores = defaultdict(float)\n for i in range(0, len(outputs), 2):\n doc1, doc2 = doc_pairs[i//2]\n output1 = outputs[i]\n output2 = outputs[i + 1]\n if output1 == \"Passage A\" and output2 == \"Passage B\":\n scores[doc1.docid] += 1\n elif output1 == \"Passage B\" and output2 == \"Passage A\":\n scores[doc2.docid] += 1\n else: # conflict\n scores[doc1.docid] += 0.5\n scores[doc2.docid] += 0.5\n\n ranking = sorted([SearchResult(docid=docid, score=score, text=None) for docid, score in scores.items()],\n key=lambda x: x.score, reverse=True)\n\n elif self.method == \"heapsort\":\n class ComparableDoc:\n def __init__(self, docid, text, ranker):\n self.docid = docid\n self.text = text\n self.ranker = ranker\n\n def __gt__(self, other):\n out = self.ranker.compare(query, [self.text, other.text])\n if out[0] == \"Passage A\" and out[1] == \"Passage B\":\n return True\n else:\n return False\n\n arr = [ComparableDoc(docid=doc.docid, text=doc.text, ranker=self) for doc in ranking]\n self.heapSort(arr, self.k)\n ranking = [SearchResult(docid=doc.docid, score=-i, text=None) for i, doc in enumerate(reversed(arr))]\n\n #\n # elif self.method == \"bubblesort\":\n # k = min(k, len(ranking))\n # for i in range(k):\n # current_ind = len(ranking) - 1\n # while True:\n # if current_ind == i:\n # break\n # doc1 = ranking[current_ind]\n # doc2 = ranking[current_ind - 1]\n # output = self.compare(query, [doc1.text, doc2.text])\n # if output[0] == \"Passage A\" and output[1] == \"Passage B\":\n # ranking[current_ind - 1], ranking[current_ind] = ranking[current_ind], ranking[current_ind - 1]\n # current_ind -= 1\n elif self.method == \"bubblesort\":\n k = min(self.k, len(ranking))\n\n last_end = len(ranking) - 1\n for i in range(k):\n current_ind = last_end\n is_change = False\n while True:\n if current_ind <= i:\n break\n doc1 = ranking[current_ind]\n doc2 = ranking[current_ind - 1]\n output = self.compare(query, [doc1.text, doc2.text])\n if output[0] == \"Passage A\" and output[1] == \"Passage B\":\n ranking[current_ind - 1], ranking[current_ind] = ranking[current_ind], ranking[current_ind - 1]\n\n if not is_change:\n is_change = True\n if last_end != len(ranking) - 1: # skip unchanged pairs at the bottom\n last_end += 1\n if not is_change:\n last_end -= 1\n current_ind -= 1\n else:\n raise NotImplementedError(f'Method {self.method} is not implemented.')\n\n results = []\n top_doc_ids = set()\n rank = 1\n for i, doc in enumerate(ranking[:self.k]):\n top_doc_ids.add(doc.docid)\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n for doc in original_ranking:\n if doc.docid not in top_doc_ids:\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n return results\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" }, { "identifier": "DuoT5LlmRanker", "path": "rankers/pairwise.py", "snippet": "class DuoT5LlmRanker(PairwiseLlmRanker):\n def compare(self, query: str, docs: List[str]) -> bool:\n self.total_compare += 1\n self.prompt = 'Query: {query} Document0: {doc1} Document1: {doc2} Relevant:'\n\n inputs = [self.prompt.format(query=query, doc1=docs[0], doc2=docs[1]),\n self.prompt.format(query=query, doc1=docs[1], doc2=docs[0])]\n inputs = self.tokenizer(inputs, padding=True, truncation=True, return_tensors=\"pt\").to(self.llm.device)\n decode_ids = torch.full((2, 1),\n self.llm.config.decoder_start_token_id,\n dtype=torch.long, device=self.llm.device)\n\n self.total_prompt_tokens += inputs['input_ids'].shape[0] * inputs['input_ids'].shape[1]\n\n with torch.no_grad():\n logits = self.llm(input_ids=inputs['input_ids'],\n attention_mask=inputs['attention_mask'],\n decoder_input_ids=decode_ids).logits\n # 6136 and 1176 are the indexes of the tokens false and true in T5.\n batch_scores = logits[:, 0, [6136, 1176]]\n batch_scores = torch.nn.functional.softmax(batch_scores, dim=1)\n batch_probs = batch_scores[:, 1]\n return batch_probs[0] > batch_probs[1]\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n original_ranking = copy.deepcopy(ranking)\n self.total_compare = 0\n self.total_completion_tokens = 0\n self.total_prompt_tokens = 0\n if self.method == \"heapsort\":\n class ComparableDoc:\n def __init__(self, docid, text, ranker):\n self.docid = docid\n self.text = text\n self.ranker = ranker\n\n def __gt__(self, other):\n return self.ranker.compare(query, [self.text, other.text])\n arr = [ComparableDoc(docid=doc.docid, text=doc.text, ranker=self) for doc in ranking]\n self.heapSort(arr, self.k)\n ranking = [SearchResult(docid=doc.docid, score=-i, text=None) for i, doc in enumerate(reversed(arr))]\n\n else:\n raise NotImplementedError(f'Method {self.method} is not implemented.')\n\n results = []\n top_doc_ids = set()\n rank = 1\n for i, doc in enumerate(ranking[:self.k]):\n top_doc_ids.add(doc.docid)\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n for doc in original_ranking:\n if doc.docid not in top_doc_ids:\n results.append(SearchResult(docid=doc.docid, score=-rank, text=None))\n rank += 1\n return results" }, { "identifier": "OpenAiPairwiseLlmRanker", "path": "rankers/pairwise.py", "snippet": "class OpenAiPairwiseLlmRanker(PairwiseLlmRanker):\n def __init__(self,\n model_name_or_path,\n api_key,\n method=\"heapsort\",\n batch_size=2,\n k=10):\n self.llm = model_name_or_path\n self.tokenizer = tiktoken.encoding_for_model(model_name_or_path)\n self.method = method\n self.k = k\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n self.CHARACTERS = [\"A\", \"B\"]\n self.system_prompt = \"You are RankGPT, an intelligent assistant specialized in selecting the most relevant passage from a pair of passages based on their relevance to the query.\"\n self.prompt = \"\"\"Given a query \"{query}\", which of the following two passages is more relevant to the query?\n \nPassage A: \"{doc1}\"\n\nPassage B: \"{doc2}\"\n\nOutput Passage A or Passage B:\"\"\"\n openai.api_key = api_key\n\n def _get_response(self, input_text):\n while True:\n try:\n response = openai.ChatCompletion.create(\n model=self.llm,\n messages=[\n {\"role\": \"system\", \"content\": self.system_prompt},\n {\"role\": \"user\", \"content\": input_text},\n ],\n temperature=0.0,\n request_timeout=15\n )\n self.total_completion_tokens += int(response['usage']['completion_tokens'])\n self.total_prompt_tokens += int(response['usage']['prompt_tokens'])\n\n output = response['choices'][0]['message']['content']\n matches = re.findall(r\"(Passage [A-B])\", output, re.MULTILINE)\n if matches:\n output = matches[0][8]\n elif output.strip() in self.CHARACTERS:\n pass\n else:\n print(f\"Unexpected output: {output}\")\n output = \"A\"\n return output\n\n except openai.error.APIError as e:\n # Handle API error here, e.g. retry or log\n print(f\"OpenAI API returned an API Error: {e}\")\n time.sleep(5)\n continue\n except openai.error.APIConnectionError as e:\n # Handle connection error here\n print(f\"Failed to connect to OpenAI API: {e}\")\n time.sleep(5)\n continue\n except openai.error.RateLimitError as e:\n # Handle rate limit error (we recommend using exponential backoff)\n print(f\"OpenAI API request exceeded rate limit: {e}\")\n time.sleep(5)\n continue\n except openai.error.InvalidRequestError as e:\n # Handle invalid request error\n print(f\"OpenAI API request was invalid: {e}\")\n raise e\n except openai.error.AuthenticationError as e:\n # Handle authentication error\n print(f\"OpenAI API request failed authentication: {e}\")\n raise e\n except openai.error.Timeout as e:\n # Handle timeout error\n print(f\"OpenAI API request timed out: {e}\")\n time.sleep(5)\n continue\n except openai.error.ServiceUnavailableError as e:\n # Handle service unavailable error\n print(f\"OpenAI API request failed with a service unavailable error: {e}\")\n time.sleep(5)\n continue\n except Exception as e:\n print(f\"Unknown error: {e}\")\n raise e\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n doc1, doc2 = docs[0], docs[1]\n input_texts = [self.prompt.format(query=query, doc1=doc1, doc2=doc2),\n self.prompt.format(query=query, doc1=doc2, doc2=doc1)]\n\n return [f'Passage {self._get_response(input_texts[0])}', f'Passage {self._get_response(input_texts[1])}']\n\n def truncate(self, text, length):\n return self.tokenizer.decode(self.tokenizer.encode(text)[:length])" }, { "identifier": "OpenAiListwiseLlmRanker", "path": "rankers/listwise.py", "snippet": "class OpenAiListwiseLlmRanker(LlmRanker):\n def __init__(self, model_name_or_path, api_key, window_size, step_size, num_repeat):\n self.llm = model_name_or_path\n self.tokenizer = tiktoken.encoding_for_model(model_name_or_path)\n self.window_size = window_size\n self.step_size = step_size\n self.num_repeat = num_repeat\n openai.api_key = api_key\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n messages = create_permutation_instruction_chat(query, docs, self.llm)\n while True:\n try:\n completion = openai.ChatCompletion.create(\n model=self.llm,\n messages=messages,\n temperature=0.0,\n request_timeout=15)\n self.total_completion_tokens += int(completion['usage']['completion_tokens'])\n self.total_prompt_tokens += int(completion['usage']['prompt_tokens'])\n return completion['choices'][0]['message']['content']\n except Exception as e:\n print(str(e))\n if \"This model's maximum context length is\" in str(e):\n print('reduce_length')\n return 'ERROR::reduce_length'\n\n def rerank(self, query: str, ranking: List[SearchResult]) -> List[SearchResult]:\n self.total_compare = 0\n self.total_prompt_tokens = 0\n self.total_completion_tokens = 0\n\n for _ in range(self.num_repeat):\n ranking = copy.deepcopy(ranking)\n end_pos = len(ranking)\n start_pos = end_pos - self.window_size\n while start_pos >= 0:\n start_pos = max(start_pos, 0)\n result = self.compare(query, ranking[start_pos: end_pos])\n ranking = receive_permutation(ranking, result, start_pos, end_pos)\n end_pos = end_pos - self.step_size\n start_pos = start_pos - self.step_size\n\n for i, doc in enumerate(ranking):\n doc.score = -i\n return ranking\n\n def truncate(self, text, length):\n return self.tokenizer.decode(self.tokenizer.encode(text)[:length])" }, { "identifier": "ListwiseLlmRanker", "path": "rankers/listwise.py", "snippet": "class ListwiseLlmRanker(OpenAiListwiseLlmRanker):\n CHARACTERS = [\"A\", \"B\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"J\", \"K\", \"L\",\n \"M\", \"N\", \"O\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"U\", \"V\",\n \"W\"] # \"Passage X\" and \"Passage Y\" will be tokenized into 3 tokens, so we dont use for now\n\n def __init__(self, model_name_or_path, tokenizer_name_or_path, device, window_size, step_size,\n scoring='generation', num_repeat=1, cache_dir=None):\n\n self.scoring = scoring\n self.device = device\n self.window_size = window_size\n self.step_size = step_size\n self.num_repeat = num_repeat\n self.config = AutoConfig.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n\n if self.config.model_type == 't5':\n self.tokenizer = T5Tokenizer.from_pretrained(tokenizer_name_or_path\n if tokenizer_name_or_path is not None else\n model_name_or_path, cache_dir=cache_dir)\n self.llm = T5ForConditionalGeneration.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir)\n\n self.decoder_input_ids = self.tokenizer.encode(\"<pad> Passage\",\n return_tensors=\"pt\",\n add_special_tokens=False).to(self.device) if self.tokenizer else None\n self.target_token_ids = self.tokenizer.batch_encode_plus([f'<pad> Passage {self.CHARACTERS[i]}'\n for i in range(len(self.CHARACTERS))],\n return_tensors=\"pt\",\n add_special_tokens=False,\n padding=True).input_ids[:, -1]\n elif self.config.model_type == 'llama':\n self.tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, cache_dir=cache_dir)\n self.tokenizer.use_default_system_prompt = False\n if 'vicuna' and 'v1.5' in model_name_or_path:\n self.tokenizer.chat_template = \"{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = 'A chat between a curious user and an artificial intelligence assistant. The assistant gives helpful, detailed, and polite answers to the user\\\\'s questions.' %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 %}{{ system_message }}{% endif %}{% if message['role'] == 'user' %}{{ ' USER: ' + message['content'].strip() }}{% elif message['role'] == 'assistant' %}{{ ' ASSISTANT: ' + message['content'].strip() + eos_token }}{% endif %}{% endfor %}{% if add_generation_prompt %}{{ ' ASSISTANT:' }}{% endif %}\"\n\n self.llm = AutoModelForCausalLM.from_pretrained(model_name_or_path,\n device_map='auto',\n torch_dtype=torch.float16 if device == 'cuda'\n else torch.float32,\n cache_dir=cache_dir).eval()\n else:\n raise NotImplementedError\n\n def compare(self, query: str, docs: List):\n self.total_compare += 1\n if self.scoring == 'generation':\n if self.config.model_type == 't5':\n input_text = create_permutation_instruction_complete(query, docs)\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\", truncation=True).input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids)[0]\n self.total_completion_tokens += output_ids.shape[0]\n output = self.tokenizer.decode(output_ids,\n skip_special_tokens=True).strip()\n elif self.config.model_type == 'llama':\n input_text = create_permutation_instruction_chat(query, docs, model_name=None)\n input_ids = self.tokenizer.apply_chat_template(input_text, return_tensors=\"pt\",\n add_generation_prompt=True).to(self.device)\n\n self.total_prompt_tokens += input_ids.shape[1]\n\n output_ids = self.llm.generate(input_ids)[0]\n self.total_completion_tokens += output_ids.shape[0]\n output = self.tokenizer.decode(output_ids[input_ids.shape[1]:],\n skip_special_tokens=True).strip()\n\n elif self.scoring == 'likelihood':\n passages = \"\\n\\n\".join([f'Passage {self.CHARACTERS[i]}: \"{doc.text}\"' for i, doc in enumerate(docs)])\n input_text = f'Given a query \"{query}\", which of the following passages is the most relevant one to the query?\\n\\n' \\\n + passages + '\\n\\nOutput only the passage label of the most relevant passage:'\n\n input_ids = self.tokenizer(input_text, return_tensors=\"pt\").input_ids.to(self.device)\n self.total_prompt_tokens += input_ids.shape[1]\n\n with torch.no_grad():\n logits = self.llm(input_ids=input_ids, decoder_input_ids=self.decoder_input_ids).logits[0][-1]\n distributions = torch.softmax(logits, dim=0)\n scores = distributions[self.target_token_ids[:len(docs)]]\n ranked = sorted(zip([f\"[{str(i+1)}]\" for i in range(len(docs))], scores), key=lambda x: x[1], reverse=True)\n output = '>'.join(ranked[i][0] for i in range(len(ranked)))\n\n return output\n\n def truncate(self, text, length):\n return self.tokenizer.convert_tokens_to_string(self.tokenizer.tokenize(text)[:length])" } ]
import logging import ir_datasets import argparse import sys import json import time import random from pyserini.search.lucene import LuceneSearcher from pyserini.search._base import get_topics from rankers.rankers import SearchResult from rankers.pointwise import PointwiseLlmRanker, MonoT5LlmRanker from rankers.setwise import SetwiseLlmRanker, OpenAiSetwiseLlmRanker from rankers.pairwise import PairwiseLlmRanker, DuoT5LlmRanker, OpenAiPairwiseLlmRanker from rankers.listwise import OpenAiListwiseLlmRanker, ListwiseLlmRanker from tqdm import tqdm
14,507
if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else: ranker = PointwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) elif args.setwise: if args.run.openai_key: ranker = OpenAiSetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, num_child=args.setwise.num_child, method=args.setwise.method, k=args.setwise.k) else: ranker = SetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, num_child=args.setwise.num_child, scoring=args.run.scoring, method=args.setwise.method, num_permutation=args.setwise.num_permutation, k=args.setwise.k) elif args.pairwise: if args.pairwise.method != 'allpair': args.pairwise.batch_size = 2 logger.info(f'Setting batch_size to 2.') if args.run.openai_key: ranker = OpenAiPairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, method=args.pairwise.method, k=args.pairwise.k) elif 'duot5' in args.run.model_name_or_path: ranker = DuoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) else: ranker = PairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) elif args.listwise: if args.run.openai_key: ranker = OpenAiListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, window_size=args.listwise.window_size, step_size=args.listwise.step_size, num_repeat=args.listwise.num_repeat) else: ranker = ListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, window_size=args.listwise.window_size, step_size=args.listwise.step_size, scoring=args.run.scoring, num_repeat=args.listwise.num_repeat) else: raise ValueError('Must specify either --pointwise, --setwise, --pairwise or --listwise.') query_map = {} if args.run.ir_dataset_name is not None: dataset = ir_datasets.load(args.run.ir_dataset_name) for query in dataset.queries_iter(): qid = query.query_id text = query.text query_map[qid] = ranker.truncate(text, args.run.query_length) dataset = ir_datasets.load(args.run.ir_dataset_name) docstore = dataset.docs_store() else: topics = get_topics(args.run.pyserini_index+'-test') for topic_id in list(topics.keys()): text = topics[topic_id]['title'] query_map[str(topic_id)] = ranker.truncate(text, args.run.query_length) docstore = LuceneSearcher.from_prebuilt_index(args.run.pyserini_index+'.flat') logger.info(f'Loading first stage run from {args.run.run_path}.') first_stage_rankings = [] with open(args.run.run_path, 'r') as f: current_qid = None current_ranking = [] for line in tqdm(f): qid, _, docid, _, score, _ = line.strip().split() if qid != current_qid: if current_qid is not None: first_stage_rankings.append((current_qid, query_map[current_qid], current_ranking[:args.run.hits])) current_ranking = [] current_qid = qid if len(current_ranking) >= args.run.hits: continue if args.run.ir_dataset_name is not None: text = docstore.get(docid).text if 'title' in dir(docstore.get(docid)): text = f'{docstore.get(docid).title} {text}' else: data = json.loads(docstore.doc(docid).raw()) text = data['text'] if 'title' in data: text = f'{data["title"]} {text}' text = ranker.truncate(text, args.run.passage_length)
random.seed(929) logger = logging.getLogger(__name__) def parse_args(parser, commands): # Divide argv by commands split_argv = [[]] for c in sys.argv[1:]: if c in commands.choices: split_argv.append([c]) else: split_argv[-1].append(c) # Initialize namespace args = argparse.Namespace() for c in commands.choices: setattr(args, c, None) # Parse each command parser.parse_args(split_argv[0], namespace=args) # Without command for argv in split_argv[1:]: # Commands n = argparse.Namespace() setattr(args, argv[0], n) parser.parse_args(argv, namespace=n) return args def write_run_file(path, results, tag): with open(path, 'w') as f: for qid, _, ranking in results: rank = 1 for doc in ranking: docid = doc.docid score = doc.score f.write(f"{qid}\tQ0\t{docid}\t{rank}\t{score}\t{tag}\n") rank += 1 def main(args): if args.pointwise: if 'monot5' in args.run.model_name_or_path: ranker = MonoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) else: ranker = PointwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pointwise.method, batch_size=args.pointwise.batch_size) elif args.setwise: if args.run.openai_key: ranker = OpenAiSetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, num_child=args.setwise.num_child, method=args.setwise.method, k=args.setwise.k) else: ranker = SetwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, num_child=args.setwise.num_child, scoring=args.run.scoring, method=args.setwise.method, num_permutation=args.setwise.num_permutation, k=args.setwise.k) elif args.pairwise: if args.pairwise.method != 'allpair': args.pairwise.batch_size = 2 logger.info(f'Setting batch_size to 2.') if args.run.openai_key: ranker = OpenAiPairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, method=args.pairwise.method, k=args.pairwise.k) elif 'duot5' in args.run.model_name_or_path: ranker = DuoT5LlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) else: ranker = PairwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, method=args.pairwise.method, batch_size=args.pairwise.batch_size, k=args.pairwise.k) elif args.listwise: if args.run.openai_key: ranker = OpenAiListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, api_key=args.run.openai_key, window_size=args.listwise.window_size, step_size=args.listwise.step_size, num_repeat=args.listwise.num_repeat) else: ranker = ListwiseLlmRanker(model_name_or_path=args.run.model_name_or_path, tokenizer_name_or_path=args.run.tokenizer_name_or_path, device=args.run.device, cache_dir=args.run.cache_dir, window_size=args.listwise.window_size, step_size=args.listwise.step_size, scoring=args.run.scoring, num_repeat=args.listwise.num_repeat) else: raise ValueError('Must specify either --pointwise, --setwise, --pairwise or --listwise.') query_map = {} if args.run.ir_dataset_name is not None: dataset = ir_datasets.load(args.run.ir_dataset_name) for query in dataset.queries_iter(): qid = query.query_id text = query.text query_map[qid] = ranker.truncate(text, args.run.query_length) dataset = ir_datasets.load(args.run.ir_dataset_name) docstore = dataset.docs_store() else: topics = get_topics(args.run.pyserini_index+'-test') for topic_id in list(topics.keys()): text = topics[topic_id]['title'] query_map[str(topic_id)] = ranker.truncate(text, args.run.query_length) docstore = LuceneSearcher.from_prebuilt_index(args.run.pyserini_index+'.flat') logger.info(f'Loading first stage run from {args.run.run_path}.') first_stage_rankings = [] with open(args.run.run_path, 'r') as f: current_qid = None current_ranking = [] for line in tqdm(f): qid, _, docid, _, score, _ = line.strip().split() if qid != current_qid: if current_qid is not None: first_stage_rankings.append((current_qid, query_map[current_qid], current_ranking[:args.run.hits])) current_ranking = [] current_qid = qid if len(current_ranking) >= args.run.hits: continue if args.run.ir_dataset_name is not None: text = docstore.get(docid).text if 'title' in dir(docstore.get(docid)): text = f'{docstore.get(docid).title} {text}' else: data = json.loads(docstore.doc(docid).raw()) text = data['text'] if 'title' in data: text = f'{data["title"]} {text}' text = ranker.truncate(text, args.run.passage_length)
current_ranking.append(SearchResult(docid=docid, score=float(score), text=text))
0
2023-10-14 01:39:38+00:00
24k
LiYunfengLYF/LightFC
lib/train/data/base_functions.py
[ { "identifier": "sampler", "path": "lib/train/data/sampler.py", "snippet": "def no_processing(data):\r\n def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\r\n num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\r\n train_cls=False, pos_prob=0.5):\r\n def __len__(self):\r\n def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None,\r\n allow_invisible=False, force_invisible=False):\r\n def __getitem__(self, index):\r\n def getitem(self):\r\n def getitem_cls(self):\r\n def get_center_box(self, H, W, ratio=1 / 8):\r\n def sample_seq_from_dataset(self, dataset, is_video_dataset):\r\n def get_one_search(self):\r\n def get_frame_ids_trident(self, visible):\r\n def get_frame_ids_stark(self, visible, valid):\r\nclass TrackingSampler(torch.utils.data.Dataset):\r\n H, W, _ = template_frames[0].shape\r\n H, W, _ = template_frames[0].shape\r\n H, W, _ = search_frames[0].shape\r" }, { "identifier": "processing", "path": "lib/train/data/processing.py", "snippet": "def stack_tensors(x):\r\n def __init__(self, transform=transforms.ToTensor(), template_transform=None, search_transform=None,\r\n joint_transform=None):\r\n def __call__(self, data: TensorDict):\r\n def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor,\r\n mode='pair', settings=None, *args, **kwargs):\r\n def _get_jittered_box(self, box, mode):\r\n def __call__(self, data: TensorDict):\r\nclass BaseProcessing:\r\nclass STARKProcessing(BaseProcessing):\r" }, { "identifier": "LTRLoader", "path": "lib/train/data/loader.py", "snippet": "class LTRLoader(torch.utils.data.dataloader.DataLoader):\r\n \"\"\"\r\n Data loader. Combines a dataset and a sampler, and provides\r\n single- or multi-process iterators over the dataset.\r\n\r\n Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to\r\n select along which dimension the data should be stacked to form a batch.\r\n\r\n Arguments:\r\n dataset (Dataset): dataset from which to load the data.\r\n batch_size (int, optional): how many samples per batch to load\r\n (default: 1).\r\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\r\n at every epoch (default: False).\r\n sampler (Sampler, optional): defines the strategy to draw samples from\r\n the dataset. If specified, ``shuffle`` must be False.\r\n batch_sampler (Sampler, optional): like sampler, but returns a batch of\r\n indices at a time. Mutually exclusive with batch_size, shuffle,\r\n sampler, and drop_last.\r\n num_workers (int, optional): how many subprocesses to use for data\r\n loading. 0 means that the data will be loaded in the main process.\r\n (default: 0)\r\n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\r\n stack_dim (int): Dimension along which to stack to form the batch. (default: 0)\r\n pin_memory (bool, optional): If ``True``, the data loader will copy tensors\r\n into CUDA pinned memory before returning them.\r\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\r\n if the dataset size is not divisible by the batch size. If ``False`` and\r\n the size of dataset is not divisible by the batch size, then the last batch\r\n will be smaller. (default: False)\r\n timeout (numeric, optional): if positive, the timeout value for collecting a batch\r\n from workers. Should always be non-negative. (default: 0)\r\n worker_init_fn (callable, optional): If not None, this will be called on each\r\n worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\r\n input, after seeding and before data loading. (default: None)\r\n\r\n .. note:: By default, each worker will have its PyTorch seed set to\r\n ``base_seed + worker_id``, where ``base_seed`` is a long generated\r\n by main process using its RNG. However, seeds for other libraries\r\n may be duplicated upon initializing workers (w.g., NumPy), causing\r\n each worker to return identical random numbers. (See\r\n :ref:`dataloader-workers-random-seed` section in FAQ.) You may\r\n use ``torch.initial_seed()`` to access the PyTorch seed for each\r\n worker in :attr:`worker_init_fn`, and use it to set other seeds\r\n before data loading.\r\n\r\n .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an\r\n unpicklable object, e.g., a lambda function.\r\n \"\"\"\r\n\r\n __initialized = False\r\n\r\n def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\r\n num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False,\r\n timeout=0, worker_init_fn=None):\r\n if collate_fn is None:\r\n if stack_dim == 0:\r\n collate_fn = ltr_collate\r\n elif stack_dim == 1:\r\n collate_fn = ltr_collate_stack1\r\n else:\r\n raise ValueError('Stack dim no supported. Must be 0 or 1.')\r\n\r\n super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\r\n num_workers, collate_fn, pin_memory, drop_last,\r\n timeout, worker_init_fn)\r\n\r\n self.name = name\r\n self.training = training\r\n self.epoch_interval = epoch_interval\r\n self.stack_dim = stack_dim\r" }, { "identifier": "opencv_loader", "path": "lib/train/data/image_loader.py", "snippet": "def opencv_loader(path):\r\n \"\"\" Read image using opencv's imread function and returns it in rgb format\"\"\"\r\n try:\r\n im = cv.imread(path, cv.IMREAD_COLOR)\r\n\r\n # convert to rgb and return\r\n return cv.cvtColor(im, cv.COLOR_BGR2RGB)\r\n except Exception as e:\r\n print('ERROR: Could not read image \"{}\"'.format(path))\r\n print(e)\r\n return None\r" }, { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\r\n \"\"\" LaSOT dataset.\r\n\r\n Publication:\r\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\r\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\r\n CVPR, 2019\r\n https://arxiv.org/pdf/1809.07845.pdf\r\n\r\n Download the dataset from https://cis.temple.edu/lasot/download.html\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None,\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the lasot dataset.\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\r\n videos with subscripts -1, -3, and -5 from each class will be used for training.\r\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\r\n vid_ids or split option can be used at a time.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).lasot_dir if root is None else root\r\n super().__init__('LaSOT', root, image_loader)\r\n\r\n # Keep a list of all classes\r\n self.class_list = [f for f in os.listdir(self.root)]\r\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\r\n\r\n self.sequence_list = self._build_sequence_list(vid_ids, split)\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.seq_per_class = self._build_class_list()\r\n\r\n def _build_sequence_list(self, vid_ids=None, split=None):\r\n if split is not None:\r\n if vid_ids is not None:\r\n raise ValueError('Cannot set both split_name and vid_ids.')\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n if split == 'train':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\r\n else:\r\n raise ValueError('Unknown split name.')\r\n # sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\r\n sequence_list = pandas.read_csv(file_path, header=None).squeeze(\"columns\").values.tolist()\r\n elif vid_ids is not None:\r\n sequence_list = [c + '-' + str(v) for c in self.class_list for v in vid_ids]\r\n else:\r\n raise ValueError('Set either split_name or vid_ids.')\r\n\r\n return sequence_list\r\n\r\n def _build_class_list(self):\r\n seq_per_class = {}\r\n for seq_id, seq_name in enumerate(self.sequence_list):\r\n class_name = seq_name.split('-')[0]\r\n if class_name in seq_per_class:\r\n seq_per_class[class_name].append(seq_id)\r\n else:\r\n seq_per_class[class_name] = [seq_id]\r\n\r\n return seq_per_class\r\n\r\n def get_name(self):\r\n return 'lasot'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def has_occlusion_info(self):\r\n return True\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def get_num_classes(self):\r\n return len(self.class_list)\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _read_bb_anno(self, seq_path):\r\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\r\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\r\n low_memory=False).values\r\n return torch.tensor(gt)\r\n\r\n def _read_target_visible(self, seq_path):\r\n # Read full occlusion and out_of_view\r\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\r\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\r\n\r\n with open(occlusion_file, 'r', newline='') as f:\r\n occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\r\n with open(out_of_view_file, 'r') as f:\r\n out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\r\n\r\n target_visible = ~occlusion & ~out_of_view\r\n\r\n return target_visible\r\n\r\n def _get_sequence_path(self, seq_id):\r\n seq_name = self.sequence_list[seq_id]\r\n class_name = seq_name.split('-')[0]\r\n vid_id = seq_name.split('-')[1]\r\n\r\n return os.path.join(self.root, class_name, class_name + '-' + vid_id)\r\n\r\n def get_sequence_info(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n bbox = self._read_bb_anno(seq_path)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible = self._read_target_visible(seq_path) & valid.byte()\r\n\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame_path(self, seq_path, frame_id):\r\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id + 1)) # frames start from 1\r\n\r\n def _get_frame(self, seq_path, frame_id):\r\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\r\n\r\n def _get_class(self, seq_path):\r\n raw_class = seq_path.split('/')[-2]\r\n return raw_class\r\n\r\n def get_class_name(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n obj_class = self._get_class(seq_path)\r\n\r\n return obj_class\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n seq_path = self._get_sequence_path(seq_id)\r\n\r\n obj_class = self._get_class(seq_path)\r\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n object_meta = OrderedDict({'object_class_name': obj_class,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "Got10k", "path": "lib/train/dataset/got10k.py", "snippet": "class Got10k(BaseVideoDataset):\r\n \"\"\" GOT-10k dataset.\r\n\r\n Publication:\r\n GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\r\n Lianghua Huang, Xin Zhao, and Kaiqi Huang\r\n arXiv:1810.11981, 2018\r\n https://arxiv.org/pdf/1810.11981.pdf\r\n\r\n Download dataset from http://got-10k.aitestunion.com/downloads\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None,\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\r\n not NOT the official got-10k validation split. To use the official validation split, provide that as\r\n the root folder instead.\r\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\r\n options can be used at the same time.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).got10k_dir if root is None else root\r\n super().__init__('GOT10k', root, image_loader)\r\n\r\n # all folders inside the root\r\n self.sequence_list = self._get_sequence_list()\r\n\r\n # seq_id is the index of the folder inside the got10k root path\r\n if split is not None:\r\n if seq_ids is not None:\r\n raise ValueError('Cannot set both split_name and seq_ids.')\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n if split == 'train':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt')\r\n elif split == 'val':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt')\r\n elif split == 'train_full':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_full_split.txt')\r\n elif split == 'vottrain':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt')\r\n elif split == 'votval':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt')\r\n else:\r\n raise ValueError('Unknown split name.')\r\n # seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\r\n seq_ids = pandas.read_csv(file_path, header=None, dtype=np.int64).squeeze(\"columns\").values.tolist()\r\n elif seq_ids is None:\r\n seq_ids = list(range(0, len(self.sequence_list)))\r\n\r\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.sequence_meta_info = self._load_meta_info()\r\n self.seq_per_class = self._build_seq_per_class()\r\n\r\n self.class_list = list(self.seq_per_class.keys())\r\n self.class_list.sort()\r\n\r\n def get_name(self):\r\n return 'got10k'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def has_occlusion_info(self):\r\n return True\r\n\r\n def _load_meta_info(self):\r\n sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list}\r\n return sequence_meta_info\r\n\r\n def _read_meta(self, seq_path):\r\n try:\r\n with open(os.path.join(seq_path, 'meta_info.ini')) as f:\r\n meta_info = f.readlines()\r\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1],\r\n 'motion_class': meta_info[6].split(': ')[-1][:-1],\r\n 'major_class': meta_info[7].split(': ')[-1][:-1],\r\n 'root_class': meta_info[8].split(': ')[-1][:-1],\r\n 'motion_adverb': meta_info[9].split(': ')[-1][:-1]})\r\n except:\r\n object_meta = OrderedDict({'object_class_name': None,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n return object_meta\r\n\r\n def _build_seq_per_class(self):\r\n seq_per_class = {}\r\n\r\n for i, s in enumerate(self.sequence_list):\r\n object_class = self.sequence_meta_info[s]['object_class_name']\r\n if object_class in seq_per_class:\r\n seq_per_class[object_class].append(i)\r\n else:\r\n seq_per_class[object_class] = [i]\r\n\r\n return seq_per_class\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _get_sequence_list(self):\r\n with open(os.path.join(self.root, 'list.txt')) as f:\r\n dir_list = list(csv.reader(f))\r\n dir_list = [dir_name[0] for dir_name in dir_list]\r\n return dir_list\r\n\r\n def _read_bb_anno(self, seq_path):\r\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\r\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\r\n low_memory=False).values\r\n return torch.tensor(gt)\r\n\r\n def _read_target_visible(self, seq_path):\r\n # Read full occlusion and out_of_view\r\n occlusion_file = os.path.join(seq_path, \"absence.label\")\r\n cover_file = os.path.join(seq_path, \"cover.label\")\r\n\r\n with open(occlusion_file, 'r', newline='') as f:\r\n occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\r\n with open(cover_file, 'r', newline='') as f:\r\n cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\r\n\r\n target_visible = ~occlusion & (cover > 0).byte()\r\n\r\n visible_ratio = cover.float() / 8\r\n return target_visible, visible_ratio\r\n\r\n def _get_sequence_path(self, seq_id):\r\n return os.path.join(self.root, self.sequence_list[seq_id])\r\n\r\n def get_sequence_info(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n bbox = self._read_bb_anno(seq_path)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible, visible_ratio = self._read_target_visible(seq_path)\r\n visible = visible & valid.byte()\r\n\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\r\n\r\n def _get_frame_path(self, seq_path, frame_id):\r\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id + 1)) # frames start from 1\r\n\r\n def _get_frame(self, seq_path, frame_id):\r\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\r\n\r\n def get_class_name(self, seq_id):\r\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\r\n\r\n return obj_meta['object_class_name']\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n seq_path = self._get_sequence_path(seq_id)\r\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\r\n\r\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n return frame_list, anno_frames, obj_meta\r" }, { "identifier": "TrackingNet", "path": "lib/train/dataset/tracking_net.py", "snippet": "class TrackingNet(BaseVideoDataset):\r\n \"\"\" TrackingNet dataset.\r\n\r\n Publication:\r\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\r\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\r\n ECCV, 2018\r\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\r\n\r\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None, env_num=None):\r\n \"\"\"\r\n args:\r\n root - The path to the TrackingNet folder, containing the training sets.\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\r\n sets (0 - 11) will be used.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).trackingnet_dir if root is None else root\r\n super().__init__('TrackingNet', root, image_loader)\r\n\r\n if set_ids is None:\r\n set_ids = [i for i in range(12)]\r\n\r\n self.set_ids = set_ids\r\n\r\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\r\n # video_name for each sequence\r\n self.sequence_list = list_sequences(self.root, self.set_ids)\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\r\n\r\n # we do not have the class_lists for the tracking net\r\n self.class_list = list(self.seq_per_class.keys())\r\n self.class_list.sort()\r\n\r\n def _load_class_info(self):\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\r\n\r\n with open(class_map_path, 'r') as f:\r\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\r\n\r\n seq_per_class = {}\r\n for i, seq in enumerate(self.sequence_list):\r\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\r\n if class_name not in seq_per_class:\r\n seq_per_class[class_name] = [i]\r\n else:\r\n seq_per_class[class_name].append(i)\r\n\r\n return seq_to_class_map, seq_per_class\r\n\r\n def get_name(self):\r\n return 'trackingnet'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _read_bb_anno(self, seq_id):\r\n set_id = self.sequence_list[seq_id][0]\r\n vid_name = self.sequence_list[seq_id][1]\r\n bb_anno_file = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"anno\", vid_name + \".txt\")\r\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\r\n low_memory=False).values\r\n return torch.tensor(gt)\r\n\r\n def get_sequence_info(self, seq_id):\r\n bbox = self._read_bb_anno(seq_id)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible = valid.clone().byte()\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame(self, seq_id, frame_id):\r\n set_id = self.sequence_list[seq_id][0]\r\n vid_name = self.sequence_list[seq_id][1]\r\n frame_path = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"frames\", vid_name, str(frame_id) + \".jpg\")\r\n return self.image_loader(frame_path)\r\n\r\n def _get_class(self, seq_id):\r\n seq_name = self.sequence_list[seq_id][1]\r\n return self.seq_to_class_map[seq_name]\r\n\r\n def get_class_name(self, seq_id):\r\n obj_class = self._get_class(seq_id)\r\n\r\n return obj_class\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n obj_class = self._get_class(seq_id)\r\n\r\n object_meta = OrderedDict({'object_class_name': obj_class,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "ImagenetVID", "path": "lib/train/dataset/imagenetvid.py", "snippet": "class ImagenetVID(BaseVideoDataset):\r\n \"\"\" Imagenet VID dataset.\r\n\r\n Publication:\r\n ImageNet Large Scale Visual Recognition Challenge\r\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\r\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\r\n IJCV, 2015\r\n https://arxiv.org/pdf/1409.0575.pdf\r\n\r\n Download the dataset from http://image-net.org/\r\n \"\"\"\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1,env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the imagenet vid dataset.\r\n image_loader (default_image_loader) - The function to read the images. If installed,\r\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\r\n opencv's imread is used.\r\n min_length - Minimum allowed sequence length.\r\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\r\n which cover complete image.\r\n \"\"\"\r\n root = env_settings(env_num).imagenet_dir if root is None else root\r\n super().__init__(\"imagenetvid\", root, image_loader)\r\n\r\n cache_file = os.path.join(root, 'cache.json')\r\n if os.path.isfile(cache_file):\r\n # If available, load the pre-processed cache file containing meta-info for each sequence\r\n with open(cache_file, 'r') as f:\r\n sequence_list_dict = json.load(f)\r\n\r\n self.sequence_list = sequence_list_dict\r\n else:\r\n # Else process the imagenet annotations and generate the cache file\r\n self.sequence_list = self._process_anno(root)\r\n\r\n with open(cache_file, 'w') as f:\r\n json.dump(self.sequence_list, f)\r\n\r\n # Filter the sequences based on min_length and max_target_area in the first frame\r\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\r\n get_target_to_image_ratio(x) < max_target_area]\r\n\r\n def get_name(self):\r\n return 'imagenetvid'\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def get_sequence_info(self, seq_id):\r\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\r\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\r\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\r\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame(self, sequence, frame_id):\r\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\r\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\r\n frame_number = frame_id + sequence['start_frame']\r\n frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name,\r\n '{:06d}.JPEG'.format(frame_number))\r\n return self.image_loader(frame_path)\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n sequence = self.sequence_list[seq_id]\r\n\r\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n # Create anno dict\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n # added the class info to the meta info\r\n object_meta = OrderedDict({'object_class': sequence['class_name'],\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r\n\r\n def _process_anno(self, root):\r\n # Builds individual tracklets\r\n base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train')\r\n\r\n all_sequences = []\r\n for set in sorted(os.listdir(base_vid_anno_path)):\r\n set_id = int(set.split('_')[-1])\r\n for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))):\r\n\r\n vid_id = int(vid.split('_')[-1])\r\n anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid)))\r\n\r\n frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0]))\r\n image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)]\r\n\r\n objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object')\r\n for f in anno_files]\r\n\r\n tracklets = {}\r\n\r\n # Find all tracklets along with start frame\r\n for f_id, all_targets in enumerate(objects):\r\n for target in all_targets:\r\n tracklet_id = target.find('trackid').text\r\n if tracklet_id not in tracklets:\r\n tracklets[tracklet_id] = f_id\r\n\r\n for tracklet_id, tracklet_start in tracklets.items():\r\n tracklet_anno = []\r\n target_visible = []\r\n class_name_id = None\r\n\r\n for f_id in range(tracklet_start, len(objects)):\r\n found = False\r\n for target in objects[f_id]:\r\n if target.find('trackid').text == tracklet_id:\r\n if not class_name_id:\r\n class_name_id = target.find('name').text\r\n x1 = int(target.find('bndbox/xmin').text)\r\n y1 = int(target.find('bndbox/ymin').text)\r\n x2 = int(target.find('bndbox/xmax').text)\r\n y2 = int(target.find('bndbox/ymax').text)\r\n\r\n tracklet_anno.append([x1, y1, x2 - x1, y2 - y1])\r\n target_visible.append(target.find('occluded').text == '0')\r\n\r\n found = True\r\n break\r\n if not found:\r\n break\r\n\r\n new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id,\r\n 'start_frame': tracklet_start, 'anno': tracklet_anno,\r\n 'target_visible': target_visible, 'image_size': image_size}\r\n all_sequences.append(new_sequence)\r\n\r\n return all_sequences\r" }, { "identifier": "MSCOCOSeq", "path": "lib/train/dataset/coco_seq.py", "snippet": "class MSCOCOSeq(BaseVideoDataset):\r\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\r\n\r\n Publication:\r\n Microsoft COCO: Common Objects in Context.\r\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\r\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\r\n ECCV, 2014\r\n https://arxiv.org/pdf/1405.0312.pdf\r\n\r\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\r\n organized as follows.\r\n - coco_root\r\n - annotations\r\n - instances_train2014.json\r\n - instances_train2017.json\r\n - images\r\n - train2014\r\n - train2017\r\n\r\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\",env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the coco dataset.\r\n image_loader (default_image_loader) - The function to read the images. If installed,\r\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\r\n opencv's imread is used.\r\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\r\n images will be used\r\n split - 'train' or 'val'.\r\n version - version of coco dataset (2014 or 2017)\r\n \"\"\"\r\n root = env_settings(env_num).coco_dir if root is None else root\r\n super().__init__('COCO', root, image_loader)\r\n\r\n self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\r\n self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\r\n\r\n # Load the COCO set.\r\n self.coco_set = COCO(self.anno_path)\r\n\r\n self.cats = self.coco_set.cats\r\n\r\n self.class_list = self.get_class_list()\r\n\r\n self.sequence_list = self._get_sequence_list()\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\r\n self.seq_per_class = self._build_seq_per_class()\r\n\r\n def _get_sequence_list(self):\r\n ann_list = list(self.coco_set.anns.keys())\r\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\r\n\r\n return seq_list\r\n\r\n def is_video_sequence(self):\r\n return False\r\n\r\n def get_num_classes(self):\r\n return len(self.class_list)\r\n\r\n def get_name(self):\r\n return 'coco'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def get_class_list(self):\r\n class_list = []\r\n for cat_id in self.cats.keys():\r\n class_list.append(self.cats[cat_id]['name'])\r\n return class_list\r\n\r\n def has_segmentation_info(self):\r\n return True\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def _build_seq_per_class(self):\r\n seq_per_class = {}\r\n for i, seq in enumerate(self.sequence_list):\r\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\r\n if class_name not in seq_per_class:\r\n seq_per_class[class_name] = [i]\r\n else:\r\n seq_per_class[class_name].append(i)\r\n\r\n return seq_per_class\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def get_sequence_info(self, seq_id):\r\n anno = self._get_anno(seq_id)\r\n\r\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\r\n\r\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\r\n\r\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\r\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\r\n\r\n visible = valid.clone().byte()\r\n\r\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\r\n\r\n def _get_anno(self, seq_id):\r\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\r\n\r\n return anno\r\n\r\n def _get_frames(self, seq_id):\r\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\r\n img = self.image_loader(os.path.join(self.img_pth, path))\r\n return img\r\n\r\n def get_meta_info(self, seq_id):\r\n try:\r\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\r\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\r\n 'motion_class': None,\r\n 'major_class': cat_dict_current['supercategory'],\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n except:\r\n object_meta = OrderedDict({'object_class_name': None,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n return object_meta\r\n\r\n\r\n def get_class_name(self, seq_id):\r\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\r\n return cat_dict_current['name']\r\n\r\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\r\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\r\n # list containing these replicated images.\r\n frame = self._get_frames(seq_id)\r\n\r\n frame_list = [frame.copy() for _ in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\r\n\r\n object_meta = self.get_meta_info(seq_id)\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "Got10k_lmdb", "path": "lib/train/dataset/got10k_lmdb.py", "snippet": "class Got10k_lmdb(BaseVideoDataset):\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None,\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\r\n not NOT the official got-10k validation split. To use the official validation split, provide that as\r\n the root folder instead.\r\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\r\n options can be used at the same time.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n use_lmdb - whether the dataset is stored in lmdb format\r\n \"\"\"\r\n root = env_settings(env_num).got10k_lmdb_dir if root is None else root\r\n super().__init__('GOT10k_lmdb', root, image_loader)\r\n\r\n # all folders inside the root\r\n self.sequence_list = self._get_sequence_list()\r\n\r\n # seq_id is the index of the folder inside the got10k root path\r\n if split is not None:\r\n if seq_ids is not None:\r\n raise ValueError('Cannot set both split_name and seq_ids.')\r\n train_lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n if split == 'train':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_split.txt')\r\n elif split == 'val':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_val_split.txt')\r\n elif split == 'train_full':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_full_split.txt')\r\n elif split == 'vottrain':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_train_split.txt')\r\n elif split == 'votval':\r\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_val_split.txt')\r\n else:\r\n raise ValueError('Unknown split name.')\r\n seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\r\n elif seq_ids is None:\r\n seq_ids = list(range(0, len(self.sequence_list)))\r\n\r\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.sequence_meta_info = self._load_meta_info()\r\n self.seq_per_class = self._build_seq_per_class()\r\n\r\n self.class_list = list(self.seq_per_class.keys())\r\n self.class_list.sort()\r\n\r\n def get_name(self):\r\n return 'got10k_lmdb'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def has_occlusion_info(self):\r\n return True\r\n\r\n def _load_meta_info(self):\r\n def _read_meta(meta_info):\r\n\r\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1],\r\n 'motion_class': meta_info[6].split(': ')[-1],\r\n 'major_class': meta_info[7].split(': ')[-1],\r\n 'root_class': meta_info[8].split(': ')[-1],\r\n 'motion_adverb': meta_info[9].split(': ')[-1]})\r\n\r\n return object_meta\r\n\r\n sequence_meta_info = {}\r\n for s in self.sequence_list:\r\n try:\r\n meta_str = decode_str(self.root, \"train/%s/meta_info.ini\" % s)\r\n sequence_meta_info[s] = _read_meta(meta_str.split('\\n'))\r\n except:\r\n sequence_meta_info[s] = OrderedDict({'object_class_name': None,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n return sequence_meta_info\r\n\r\n def _build_seq_per_class(self):\r\n seq_per_class = {}\r\n\r\n for i, s in enumerate(self.sequence_list):\r\n object_class = self.sequence_meta_info[s]['object_class_name']\r\n if object_class in seq_per_class:\r\n seq_per_class[object_class].append(i)\r\n else:\r\n seq_per_class[object_class] = [i]\r\n\r\n return seq_per_class\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _get_sequence_list(self):\r\n dir_str = decode_str(self.root, 'train/list.txt')\r\n dir_list = dir_str.split('\\n')\r\n return dir_list\r\n\r\n def _read_bb_anno(self, seq_path):\r\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\r\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line in got10k is empty\r\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\r\n gt_arr = np.array(gt_list).astype(np.float32)\r\n\r\n return torch.tensor(gt_arr)\r\n\r\n def _read_target_visible(self, seq_path):\r\n # full occlusion and out_of_view files\r\n occlusion_file = os.path.join(seq_path, \"absence.label\")\r\n cover_file = os.path.join(seq_path, \"cover.label\")\r\n # Read these files\r\n occ_list = list(\r\n map(int, decode_str(self.root, occlusion_file).split('\\n')[:-1])) # the last line in got10k is empty\r\n occlusion = torch.ByteTensor(occ_list)\r\n cover_list = list(\r\n map(int, decode_str(self.root, cover_file).split('\\n')[:-1])) # the last line in got10k is empty\r\n cover = torch.ByteTensor(cover_list)\r\n\r\n target_visible = ~occlusion & (cover > 0).byte()\r\n\r\n visible_ratio = cover.float() / 8\r\n return target_visible, visible_ratio\r\n\r\n def _get_sequence_path(self, seq_id):\r\n return os.path.join(\"train\", self.sequence_list[seq_id])\r\n\r\n def get_sequence_info(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n bbox = self._read_bb_anno(seq_path)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible, visible_ratio = self._read_target_visible(seq_path)\r\n visible = visible & valid.byte()\r\n\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\r\n\r\n def _get_frame_path(self, seq_path, frame_id):\r\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id + 1)) # frames start from 1\r\n\r\n def _get_frame(self, seq_path, frame_id):\r\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\r\n\r\n def get_class_name(self, seq_id):\r\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\r\n\r\n return obj_meta['object_class_name']\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n seq_path = self._get_sequence_path(seq_id)\r\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\r\n\r\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n return frame_list, anno_frames, obj_meta\r" }, { "identifier": "Lasot_lmdb", "path": "lib/train/dataset/lasot_lmdb.py", "snippet": "class Lasot_lmdb(BaseVideoDataset):\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None,\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the lasot dataset.\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\r\n videos with subscripts -1, -3, and -5 from each class will be used for training.\r\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\r\n vid_ids or split option can be used at a time.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).lasot_lmdb_dir if root is None else root\r\n super().__init__('LaSOT_lmdb', root, image_loader)\r\n\r\n self.sequence_list = self._build_sequence_list(vid_ids, split)\r\n class_list = [seq_name.split('-')[0] for seq_name in self.sequence_list]\r\n self.class_list = []\r\n for ele in class_list:\r\n if ele not in self.class_list:\r\n self.class_list.append(ele)\r\n # Keep a list of all classes\r\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.seq_per_class = self._build_class_list()\r\n\r\n def _build_sequence_list(self, vid_ids=None, split=None):\r\n if split is not None:\r\n if vid_ids is not None:\r\n raise ValueError('Cannot set both split_name and vid_ids.')\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n if split == 'train':\r\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\r\n else:\r\n raise ValueError('Unknown split name.')\r\n sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\r\n elif vid_ids is not None:\r\n sequence_list = [c + '-' + str(v) for c in self.class_list for v in vid_ids]\r\n else:\r\n raise ValueError('Set either split_name or vid_ids.')\r\n\r\n return sequence_list\r\n\r\n def _build_class_list(self):\r\n seq_per_class = {}\r\n for seq_id, seq_name in enumerate(self.sequence_list):\r\n class_name = seq_name.split('-')[0]\r\n if class_name in seq_per_class:\r\n seq_per_class[class_name].append(seq_id)\r\n else:\r\n seq_per_class[class_name] = [seq_id]\r\n\r\n return seq_per_class\r\n\r\n def get_name(self):\r\n return 'lasot_lmdb'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def has_occlusion_info(self):\r\n return True\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def get_num_classes(self):\r\n return len(self.class_list)\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _read_bb_anno(self, seq_path):\r\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\r\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line is empty\r\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\r\n gt_arr = np.array(gt_list).astype(np.float32)\r\n return torch.tensor(gt_arr)\r\n\r\n def _read_target_visible(self, seq_path):\r\n # Read full occlusion and out_of_view\r\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\r\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\r\n\r\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split(',')))\r\n occlusion = torch.ByteTensor(occ_list)\r\n out_view_list = list(map(int, decode_str(self.root, out_of_view_file).split(',')))\r\n out_of_view = torch.ByteTensor(out_view_list)\r\n\r\n target_visible = ~occlusion & ~out_of_view\r\n\r\n return target_visible\r\n\r\n def _get_sequence_path(self, seq_id):\r\n seq_name = self.sequence_list[seq_id]\r\n class_name = seq_name.split('-')[0]\r\n vid_id = seq_name.split('-')[1]\r\n\r\n return os.path.join(class_name, class_name + '-' + vid_id)\r\n\r\n def get_sequence_info(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n bbox = self._read_bb_anno(seq_path)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible = self._read_target_visible(seq_path) & valid.byte()\r\n\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame_path(self, seq_path, frame_id):\r\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id + 1)) # frames start from 1\r\n\r\n def _get_frame(self, seq_path, frame_id):\r\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\r\n\r\n def _get_class(self, seq_path):\r\n raw_class = seq_path.split('/')[-2]\r\n return raw_class\r\n\r\n def get_class_name(self, seq_id):\r\n seq_path = self._get_sequence_path(seq_id)\r\n obj_class = self._get_class(seq_path)\r\n\r\n return obj_class\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n seq_path = self._get_sequence_path(seq_id)\r\n\r\n obj_class = self._get_class(seq_path)\r\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n object_meta = OrderedDict({'object_class_name': obj_class,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "ImagenetVID_lmdb", "path": "lib/train/dataset/imagenetvid_lmdb.py", "snippet": "class ImagenetVID_lmdb(BaseVideoDataset):\r\n \"\"\" Imagenet VID dataset.\r\n\r\n Publication:\r\n ImageNet Large Scale Visual Recognition Challenge\r\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\r\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\r\n IJCV, 2015\r\n https://arxiv.org/pdf/1409.0575.pdf\r\n\r\n Download the dataset from http://image-net.org/\r\n \"\"\"\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1,env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the imagenet vid dataset.\r\n image_loader (default_image_loader) - The function to read the images. If installed,\r\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\r\n opencv's imread is used.\r\n min_length - Minimum allowed sequence length.\r\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\r\n which cover complete image.\r\n \"\"\"\r\n root = env_settings(env_num).imagenet_dir if root is None else root\r\n super().__init__(\"imagenetvid_lmdb\", root, image_loader)\r\n\r\n sequence_list_dict = decode_json(root, \"cache.json\")\r\n self.sequence_list = sequence_list_dict\r\n\r\n # Filter the sequences based on min_length and max_target_area in the first frame\r\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\r\n get_target_to_image_ratio(x) < max_target_area]\r\n\r\n def get_name(self):\r\n return 'imagenetvid_lmdb'\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def get_sequence_info(self, seq_id):\r\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\r\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\r\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\r\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame(self, sequence, frame_id):\r\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\r\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\r\n frame_number = frame_id + sequence['start_frame']\r\n frame_path = os.path.join('Data', 'VID', 'train', set_name, vid_name,\r\n '{:06d}.JPEG'.format(frame_number))\r\n return decode_img(self.root, frame_path)\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n sequence = self.sequence_list[seq_id]\r\n\r\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n # Create anno dict\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n # added the class info to the meta info\r\n object_meta = OrderedDict({'object_class': sequence['class_name'],\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "MSCOCOSeq_lmdb", "path": "lib/train/dataset/coco_seq_lmdb.py", "snippet": "class MSCOCOSeq_lmdb(BaseVideoDataset):\r\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\r\n\r\n Publication:\r\n Microsoft COCO: Common Objects in Context.\r\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\r\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\r\n ECCV, 2014\r\n https://arxiv.org/pdf/1405.0312.pdf\r\n\r\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\r\n organized as follows.\r\n - coco_root\r\n - annotations\r\n - instances_train2014.json\r\n - instances_train2017.json\r\n - images\r\n - train2014\r\n - train2017\r\n\r\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\r\n \"\"\"\r\n\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\",\r\n env_num=None):\r\n \"\"\"\r\n args:\r\n root - path to the coco dataset.\r\n image_loader (default_image_loader) - The function to read the images. If installed,\r\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\r\n opencv's imread is used.\r\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\r\n images will be used\r\n split - 'train' or 'val'.\r\n version - version of coco dataset (2014 or 2017)\r\n \"\"\"\r\n root = env_settings(env_num).coco_dir if root is None else root\r\n super().__init__('COCO_lmdb', root, image_loader)\r\n self.root = root\r\n self.img_pth = 'images/{}{}/'.format(split, version)\r\n self.anno_path = 'annotations/instances_{}{}.json'.format(split, version)\r\n\r\n # Load the COCO set.\r\n print('loading annotations into memory...')\r\n tic = time.time()\r\n coco_json = decode_json(root, self.anno_path)\r\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\r\n\r\n self.coco_set = COCO(coco_json)\r\n\r\n self.cats = self.coco_set.cats\r\n\r\n self.class_list = self.get_class_list()\r\n\r\n self.sequence_list = self._get_sequence_list()\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n self.seq_per_class = self._build_seq_per_class()\r\n\r\n def _get_sequence_list(self):\r\n ann_list = list(self.coco_set.anns.keys())\r\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\r\n\r\n return seq_list\r\n\r\n def is_video_sequence(self):\r\n return False\r\n\r\n def get_num_classes(self):\r\n return len(self.class_list)\r\n\r\n def get_name(self):\r\n return 'coco_lmdb'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def get_class_list(self):\r\n class_list = []\r\n for cat_id in self.cats.keys():\r\n class_list.append(self.cats[cat_id]['name'])\r\n return class_list\r\n\r\n def has_segmentation_info(self):\r\n return True\r\n\r\n def get_num_sequences(self):\r\n return len(self.sequence_list)\r\n\r\n def _build_seq_per_class(self):\r\n seq_per_class = {}\r\n for i, seq in enumerate(self.sequence_list):\r\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\r\n if class_name not in seq_per_class:\r\n seq_per_class[class_name] = [i]\r\n else:\r\n seq_per_class[class_name].append(i)\r\n\r\n return seq_per_class\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def get_sequence_info(self, seq_id):\r\n anno = self._get_anno(seq_id)\r\n\r\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\r\n\r\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\r\n\r\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\r\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\r\n\r\n visible = valid.clone().byte()\r\n\r\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\r\n\r\n def _get_anno(self, seq_id):\r\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\r\n\r\n return anno\r\n\r\n def _get_frames(self, seq_id):\r\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\r\n # img = self.image_loader(os.path.join(self.img_pth, path))\r\n img = decode_img(self.root, os.path.join(self.img_pth, path))\r\n return img\r\n\r\n def get_meta_info(self, seq_id):\r\n try:\r\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\r\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\r\n 'motion_class': None,\r\n 'major_class': cat_dict_current['supercategory'],\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n except:\r\n object_meta = OrderedDict({'object_class_name': None,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n return object_meta\r\n\r\n def get_class_name(self, seq_id):\r\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\r\n return cat_dict_current['name']\r\n\r\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\r\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\r\n # list containing these replicated images.\r\n frame = self._get_frames(seq_id)\r\n\r\n frame_list = [frame.copy() for _ in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\r\n\r\n object_meta = self.get_meta_info(seq_id)\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "TrackingNet_lmdb", "path": "lib/train/dataset/tracking_net_lmdb.py", "snippet": "class TrackingNet_lmdb(BaseVideoDataset):\r\n \"\"\" TrackingNet dataset.\r\n\r\n Publication:\r\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\r\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\r\n ECCV, 2018\r\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\r\n\r\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\r\n \"\"\"\r\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None,env_num=None):\r\n \"\"\"\r\n args:\r\n root - The path to the TrackingNet folder, containing the training sets.\r\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\r\n is used by default.\r\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\r\n sets (0 - 11) will be used.\r\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\r\n \"\"\"\r\n root = env_settings(env_num).trackingnet_lmdb_dir if root is None else root\r\n super().__init__('TrackingNet_lmdb', root, image_loader)\r\n\r\n if set_ids is None:\r\n set_ids = [i for i in range(12)]\r\n\r\n self.set_ids = set_ids\r\n\r\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\r\n # video_name for each sequence\r\n self.sequence_list = list_sequences(self.root)\r\n\r\n if data_fraction is not None:\r\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\r\n\r\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\r\n\r\n # we do not have the class_lists for the tracking net\r\n self.class_list = list(self.seq_per_class.keys())\r\n self.class_list.sort()\r\n\r\n def _load_class_info(self):\r\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\r\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\r\n\r\n with open(class_map_path, 'r') as f:\r\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\r\n\r\n seq_per_class = {}\r\n for i, seq in enumerate(self.sequence_list):\r\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\r\n if class_name not in seq_per_class:\r\n seq_per_class[class_name] = [i]\r\n else:\r\n seq_per_class[class_name].append(i)\r\n\r\n return seq_to_class_map, seq_per_class\r\n\r\n def get_name(self):\r\n return 'trackingnet_lmdb'\r\n\r\n def has_class_info(self):\r\n return True\r\n\r\n def get_sequences_in_class(self, class_name):\r\n return self.seq_per_class[class_name]\r\n\r\n def _read_bb_anno(self, seq_id):\r\n set_id = self.sequence_list[seq_id][0]\r\n vid_name = self.sequence_list[seq_id][1]\r\n gt_str_list = decode_str(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\r\n os.path.join(\"anno\", vid_name + \".txt\")).split('\\n')[:-1]\r\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\r\n gt_arr = np.array(gt_list).astype(np.float32)\r\n return torch.tensor(gt_arr)\r\n\r\n def get_sequence_info(self, seq_id):\r\n bbox = self._read_bb_anno(seq_id)\r\n\r\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\r\n visible = valid.clone().byte()\r\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\r\n\r\n def _get_frame(self, seq_id, frame_id):\r\n set_id = self.sequence_list[seq_id][0]\r\n vid_name = self.sequence_list[seq_id][1]\r\n return decode_img(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\r\n os.path.join(\"frames\", vid_name, str(frame_id) + \".jpg\"))\r\n\r\n def _get_class(self, seq_id):\r\n seq_name = self.sequence_list[seq_id][1]\r\n return self.seq_to_class_map[seq_name]\r\n\r\n def get_class_name(self, seq_id):\r\n obj_class = self._get_class(seq_id)\r\n\r\n return obj_class\r\n\r\n def get_frames(self, seq_id, frame_ids, anno=None):\r\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\r\n\r\n if anno is None:\r\n anno = self.get_sequence_info(seq_id)\r\n\r\n anno_frames = {}\r\n for key, value in anno.items():\r\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\r\n\r\n obj_class = self._get_class(seq_id)\r\n\r\n object_meta = OrderedDict({'object_class_name': obj_class,\r\n 'motion_class': None,\r\n 'major_class': None,\r\n 'root_class': None,\r\n 'motion_adverb': None})\r\n\r\n return frame_list, anno_frames, object_meta\r" }, { "identifier": "Adan", "path": "lib/train/optimizer/anan.py", "snippet": "class Adan(Optimizer):\r\n \"\"\"\r\n Implements a pytorch variant of Adan\r\n Adan was proposed in\r\n Adan: Adaptive Nesterov Momentum Algorithm for\r\n Faster Optimizing Deep Models[J].arXiv preprint arXiv:2208.06677, 2022.\r\n https://arxiv.org/abs/2208.06677\r\n Arguments:\r\n params (iterable): iterable of parameters to optimize or\r\n dicts defining parameter groups.\r\n lr (float, optional): learning rate. (default: 1e-3)\r\n betas (Tuple[float, float, flot], optional): coefficients used for\r\n first- and second-order moments. (default: (0.98, 0.92, 0.99))\r\n eps (float, optional): term added to the denominator to improve\r\n numerical stability. (default: 1e-8)\r\n weight_decay (float, optional): decoupled weight decay\r\n (L2 penalty) (default: 0)\r\n max_grad_norm (float, optional): value used to clip\r\n global grad norm (default: 0.0 no clip)\r\n no_prox (bool): how to perform the decoupled weight decay\r\n (default: False)\r\n foreach (bool): if True would use torch._foreach implementation.\r\n It's faster but uses slightly more memory. (default: True)\r\n fused (bool, optional): whether fused implementation is used.\r\n (default: False)\r\n\r\n VIT:\r\n 150\r\n lr 0.015\r\n betas (0.98, 0.92, 0.99)\r\n eps 1.0e-08\r\n weight_decay 0.02\r\n max_grad_norm 5.0\r\n no_prox\r\n foreach\r\n fused\r\n 300\r\n lr 0.015\r\n betas (0.98, 0.92, 0.99)\r\n eps 1.0e-08\r\n weight_decay 0.02\r\n max_grad_norm 5.0\r\n no_prox\r\n foreach\r\n fused\r\n \"\"\"\r\n def __init__(self,\r\n params,\r\n lr=1e-3,\r\n betas=(0.98, 0.92, 0.99),\r\n eps=1e-8,\r\n weight_decay=0.0,\r\n max_grad_norm=0.0,\r\n no_prox=False,\r\n foreach: bool = True,\r\n fused: bool = False):\r\n if not 0.0 <= max_grad_norm:\r\n raise ValueError('Invalid Max grad norm: {}'.format(max_grad_norm))\r\n if not 0.0 <= lr:\r\n raise ValueError('Invalid learning rate: {}'.format(lr))\r\n if not 0.0 <= eps:\r\n raise ValueError('Invalid epsilon value: {}'.format(eps))\r\n if not 0.0 <= betas[0] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 0: {}'.format(\r\n betas[0]))\r\n if not 0.0 <= betas[1] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 1: {}'.format(\r\n betas[1]))\r\n if not 0.0 <= betas[2] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 2: {}'.format(\r\n betas[2]))\r\n defaults = dict(lr=lr,\r\n betas=betas,\r\n eps=eps,\r\n weight_decay=weight_decay,\r\n max_grad_norm=max_grad_norm,\r\n no_prox=no_prox,\r\n foreach=foreach,\r\n fused=fused)\r\n super().__init__(params, defaults)\r\n\r\n def __setstate__(self, state):\r\n super(Adan, self).__setstate__(state)\r\n for group in self.param_groups:\r\n group.setdefault('no_prox', False)\r\n\r\n @torch.no_grad()\r\n def restart_opt(self):\r\n for group in self.param_groups:\r\n group['step'] = 0\r\n for p in group['params']:\r\n if p.requires_grad:\r\n state = self.state[p]\r\n # State initialization\r\n\r\n # Exponential moving average of gradient values\r\n state['exp_avg'] = torch.zeros_like(p)\r\n # Exponential moving average of squared gradient values\r\n state['exp_avg_sq'] = torch.zeros_like(p)\r\n # Exponential moving average of gradient difference\r\n state['exp_avg_diff'] = torch.zeros_like(p)\r\n\r\n @torch.no_grad()\r\n def step(self, closure=None):\r\n \"\"\"Performs a single optimization step.\"\"\"\r\n\r\n loss = None\r\n if closure is not None:\r\n with torch.enable_grad():\r\n loss = closure()\r\n\r\n if self.defaults['max_grad_norm'] > 0:\r\n device = self.param_groups[0]['params'][0].device\r\n global_grad_norm = torch.zeros(1, device=device)\r\n\r\n max_grad_norm = torch.tensor(self.defaults['max_grad_norm'],\r\n device=device)\r\n for group in self.param_groups:\r\n\r\n for p in group['params']:\r\n if p.grad is not None:\r\n grad = p.grad\r\n global_grad_norm.add_(grad.pow(2).sum())\r\n\r\n global_grad_norm = torch.sqrt(global_grad_norm)\r\n\r\n clip_global_grad_norm = torch.clamp(\r\n max_grad_norm / (global_grad_norm + group['eps']),\r\n max=1.0).item()\r\n else:\r\n clip_global_grad_norm = 1.0\r\n\r\n for group in self.param_groups:\r\n params_with_grad = []\r\n grads = []\r\n exp_avgs = []\r\n exp_avg_sqs = []\r\n exp_avg_diffs = []\r\n neg_pre_grads = []\r\n\r\n beta1, beta2, beta3 = group['betas']\r\n # assume same step across group now to simplify things\r\n # per parameter step can be easily support\r\n # by making it tensor, or pass list into kernel\r\n if 'step' in group:\r\n group['step'] += 1\r\n else:\r\n group['step'] = 1\r\n\r\n bias_correction1 = 1.0 - beta1**group['step']\r\n bias_correction2 = 1.0 - beta2**group['step']\r\n bias_correction3 = 1.0 - beta3**group['step']\r\n\r\n for p in group['params']:\r\n if p.grad is None:\r\n continue\r\n params_with_grad.append(p)\r\n grads.append(p.grad)\r\n\r\n state = self.state[p]\r\n if len(state) == 0:\r\n state['exp_avg'] = torch.zeros_like(p)\r\n state['exp_avg_sq'] = torch.zeros_like(p)\r\n state['exp_avg_diff'] = torch.zeros_like(p)\r\n\r\n if 'neg_pre_grad' not in state or group['step'] == 1:\r\n state['neg_pre_grad'] = p.grad.clone().mul_(\r\n -clip_global_grad_norm)\r\n\r\n exp_avgs.append(state['exp_avg'])\r\n exp_avg_sqs.append(state['exp_avg_sq'])\r\n exp_avg_diffs.append(state['exp_avg_diff'])\r\n neg_pre_grads.append(state['neg_pre_grad'])\r\n\r\n kwargs = dict(\r\n params=params_with_grad,\r\n grads=grads,\r\n exp_avgs=exp_avgs,\r\n exp_avg_sqs=exp_avg_sqs,\r\n exp_avg_diffs=exp_avg_diffs,\r\n neg_pre_grads=neg_pre_grads,\r\n beta1=beta1,\r\n beta2=beta2,\r\n beta3=beta3,\r\n bias_correction1=bias_correction1,\r\n bias_correction2=bias_correction2,\r\n bias_correction3_sqrt=math.sqrt(bias_correction3),\r\n lr=group['lr'],\r\n weight_decay=group['weight_decay'],\r\n eps=group['eps'],\r\n no_prox=group['no_prox'],\r\n clip_global_grad_norm=clip_global_grad_norm,\r\n )\r\n\r\n if group['foreach']:\r\n if group['fused']:\r\n if torch.cuda.is_available():\r\n _fused_adan_multi_tensor(**kwargs)\r\n else:\r\n raise ValueError('Fused Adan does not support CPU')\r\n else:\r\n _multi_tensor_adan(**kwargs)\r\n elif group['fused']:\r\n if torch.cuda.is_available():\r\n _fused_adan_single_tensor(**kwargs)\r\n else:\r\n raise ValueError('Fused Adan does not support CPU')\r\n else:\r\n _single_tensor_adan(**kwargs)\r\n\r\n return loss\r" }, { "identifier": "Lion", "path": "lib/train/optimizer/lion.py", "snippet": "class Lion(Optimizer):\r\n r\"\"\"Implements Lion algorithm.\"\"\"\r\n\r\n def __init__(self, params, lr=1e-4, betas=(0.9, 0.99), weight_decay=0.0):\r\n \"\"\"Initialize the hyperparameters.\r\n\r\n Args:\r\n params (iterable): iterable of parameters to optimize or dicts defining\r\n parameter groups\r\n lr (float, optional): learning rate (default: 1e-4)\r\n betas (Tuple[float, float], optional): coefficients used for computing\r\n running averages of gradient and its square (default: (0.9, 0.99))\r\n weight_decay (float, optional): weight decay coefficient (default: 0)\r\n \"\"\"\r\n\r\n if not 0.0 <= lr:\r\n raise ValueError('Invalid learning rate: {}'.format(lr))\r\n if not 0.0 <= betas[0] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))\r\n if not 0.0 <= betas[1] < 1.0:\r\n raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))\r\n defaults = dict(lr=lr, betas=betas, weight_decay=weight_decay)\r\n super().__init__(params, defaults)\r\n\r\n @torch.no_grad()\r\n def step(self, closure=None):\r\n \"\"\"Performs a single optimization step.\r\n\r\n Args:\r\n closure (callable, optional): A closure that reevaluates the model\r\n and returns the loss.\r\n\r\n Returns:\r\n the loss.\r\n \"\"\"\r\n loss = None\r\n if closure is not None:\r\n with torch.enable_grad():\r\n loss = closure()\r\n\r\n for group in self.param_groups:\r\n for p in group['params']:\r\n if p.grad is None:\r\n continue\r\n\r\n # Perform stepweight decay\r\n p.data.mul_(1 - group['lr'] * group['weight_decay'])\r\n\r\n grad = p.grad\r\n state = self.state[p]\r\n # State initialization\r\n if len(state) == 0:\r\n # Exponential moving average of gradient values\r\n state['exp_avg'] = torch.zeros_like(p)\r\n\r\n exp_avg = state['exp_avg']\r\n beta1, beta2 = group['betas']\r\n\r\n # Weight update\r\n update = exp_avg * beta1 + grad * (1 - beta1)\r\n p.add_(torch.sign(update), alpha=-group['lr'])\r\n # Decay the momentum running average coefficient\r\n exp_avg.mul_(beta2).add_(grad, alpha=1 - beta2)\r\n\r\n return loss" }, { "identifier": "is_main_process", "path": "lib/utils/misc.py", "snippet": "def is_main_process():\r\n return get_rank() == 0\r" } ]
import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.optimizer.anan import Adan from lib.train.optimizer.lion import Lion from lib.utils.misc import is_main_process
21,064
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE settings.save_interval = cfg.TRAIN.SAVE_INTERVAL def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: # assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", # "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append( Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append( Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader, env_num=settings.env_num)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE settings.save_interval = cfg.TRAIN.SAVE_INTERVAL def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: # assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", # "COCO17", "VID", "TRACKINGNET"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append( Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append( Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader, env_num=settings.env_num)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader, env_num=settings.env_num)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader, env_num=settings.env_num)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader, env_num=settings.env_num)) else: datasets.append(
ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader, env_num=settings.env_num))
7
2023-10-08 11:44:32+00:00
24k
LiyaoTang/ERDA
utils/trainer.py
[ { "identifier": "log_config", "path": "config/utils.py", "snippet": "def log_config(config, title='', f_out=None, prefix='', base=None):\n if f_out is None:\n f_out = sys.stdout\n if base is None:\n root = os.path.join(os.getcwd(), os.path.dirname(__file__), '../')\n sys.path += [] if root in sys.path or os.path.realpath(root) in sys.path else [root]\n from config.base import Base as base\n\n print(f'\\n{prefix}<<< ======= {config._cls} ======= {title if title else config.name}', file=f_out)\n max_len = max([len(k) for k in dir(config) if not k.startswith('_')] + [0])\n for k in config.keys(): # dir would sort\n # if k.startswith('_') or _is_method(getattr(config, k)):\n # continue\n cur_attr = getattr(config, k)\n if isinstance(cur_attr, list) and len(str(cur_attr)) > 200: # overlong list\n cur_attr = '[' + f'\\n{prefix}\\t\\t'.join([''] + [str(s) for s in cur_attr]) + f'\\n{prefix}\\t]'\n\n print('\\t%s%s\\t= %s' % (prefix + k, ' ' * (max_len-len(k)), str(cur_attr)), file=f_out)\n if is_config(cur_attr, base=base):\n log_config(cur_attr, f_out=f_out, prefix=prefix+'\\t', base=base)\n print('\\n', file=f_out, flush=True)" }, { "identifier": "print_dict", "path": "utils/logger.py", "snippet": "def print_dict(d, prefix='', except_k=[], fn=None, head=None, dict_type=(dict,), list_type=(list, tuple), expand_len=120):\n if head is not None:\n d = {head: d}\n for k, v in d.items():\n if k in except_k:\n continue\n if isinstance(d[k], dict_type):\n print(f'{prefix}{str(k)}:')\n print_dict(d[k], prefix=f'{prefix}\\t', except_k=except_k, fn=fn, expand_len=120)\n else:\n if fn:\n rst = None\n try:\n if isinstance(v, list_type):\n rst = v.__class__([fn(vv) for vv in v])\n else:\n rst = fn(v)\n except:\n pass\n v = rst if rst else v\n line = f'{prefix}{str(k)}\\t{str(v)}'\n if isinstance(v, list_type) and expand_len and len(str(line)) > expand_len: # overlong\n line_pre = f'{prefix}{str(k)}\\t' + ('[' if isinstance(v, list) else '(')\n line_post = f'\\n{prefix}\\t' + (']' if isinstance(v, list) else ')')\n if set(dict_type).issuperset(set([type(s) for s in v])): # all dict in list\n print(line_pre)\n for s in v[:-1]:\n print_dict(s, prefix=f'{prefix}\\t\\t')\n print(f'{prefix}\\t\\t,')\n print_dict(v[-1], prefix=f'{prefix}\\t\\t')\n line = line_post\n else:\n line = line_pre + f'\\n{prefix}\\t\\t'.join([''] + [str(s) for s in v]) + line_post\n\n print(line)" }, { "identifier": "print_table", "path": "utils/logger.py", "snippet": "def print_table(t, prefix='', sep=' '): # assume a 2D-list\n max_len = np.array([[len(str(ii)) for ii in l] for l in t], dtype=int).max(axis=0)\n for line in t:\n print(prefix + sep.join([str(ii) + ' ' * (max_len[i] - len(str(ii))) for i, ii in enumerate(line)]))" }, { "identifier": "read_ply", "path": "utils/ply.py", "snippet": "def read_ply(filename, triangular_mesh=False):\n \"\"\"\n Read \".ply\" files\n\n Parameters\n ----------\n filename : string\n the name of the file to read.\n\n Returns\n -------\n result : array\n data stored in the file\n\n Examples\n --------\n Store data in file\n\n >>> points = np.random.rand(5, 3)\n >>> values = np.random.randint(2, size=10)\n >>> write_ply('example.ply', [points, values], ['x', 'y', 'z', 'values'])\n\n Read the file\n\n >>> data = read_ply('example.ply')\n >>> values = data['values']\n array([0, 0, 1, 1, 0])\n \n >>> points = np.vstack((data['x'], data['y'], data['z'])).T\n array([[ 0.466 0.595 0.324]\n [ 0.538 0.407 0.654]\n [ 0.850 0.018 0.988]\n [ 0.395 0.394 0.363]\n [ 0.873 0.996 0.092]])\n\n \"\"\"\n\n with open(filename, 'rb') as plyfile:\n\n\n # Check if the file start with ply\n if b'ply' not in plyfile.readline():\n raise ValueError('The file does not start whith the word ply')\n\n # get binary_little/big or ascii\n fmt = plyfile.readline().split()[1].decode()\n if fmt == \"ascii\":\n raise ValueError('The file is not binary')\n\n # get extension for building the numpy dtypes\n ext = valid_formats[fmt]\n\n # PointCloud reader vs mesh reader\n if triangular_mesh:\n\n # Parse header\n num_points, num_faces, properties = parse_mesh_header(plyfile, ext)\n\n # Get point data\n vertex_data = np.fromfile(plyfile, dtype=properties, count=num_points)\n\n # Get face data\n face_properties = [('k', ext + 'u1'),\n ('v1', ext + 'i4'),\n ('v2', ext + 'i4'),\n ('v3', ext + 'i4')]\n faces_data = np.fromfile(plyfile, dtype=face_properties, count=num_faces)\n\n # Return vertex data and concatenated faces\n faces = np.vstack((faces_data['v1'], faces_data['v2'], faces_data['v3'])).T\n data = [vertex_data, faces]\n\n else:\n\n # Parse header\n num_points, properties = parse_header(plyfile, ext)\n\n # Get data\n data = np.fromfile(plyfile, dtype=properties, count=num_points)\n\n return data" }, { "identifier": "write_ply", "path": "utils/ply.py", "snippet": "def write_ply(filename, field_list, field_names, triangular_faces=None):\n \"\"\"\n Write \".ply\" files\n\n Parameters\n ----------\n filename : string\n the name of the file to which the data is saved. A '.ply' extension will be appended to the \n file name if it does no already have one.\n\n field_list : list, tuple, numpy array\n the fields to be saved in the ply file. Either a numpy array, a list of numpy arrays or a \n tuple of numpy arrays. Each 1D numpy array and each column of 2D numpy arrays are considered \n as one field. \n\n field_names : list\n the name of each fields as a list of strings. Has to be the same length as the number of \n fields.\n\n Examples\n --------\n >>> points = np.random.rand(10, 3)\n >>> write_ply('example1.ply', points, ['x', 'y', 'z'])\n\n >>> values = np.random.randint(2, size=10)\n >>> write_ply('example2.ply', [points, values], ['x', 'y', 'z', 'values'])\n\n >>> colors = np.random.randint(255, size=(10,3), dtype=np.uint8)\n >>> field_names = ['x', 'y', 'z', 'red', 'green', 'blue', values']\n >>> write_ply('example3.ply', [points, colors, values], field_names)\n\n \"\"\"\n\n # Format list input to the right form\n field_list = list(field_list) if (type(field_list) == list or type(field_list) == tuple) else list((field_list,))\n for i, field in enumerate(field_list):\n if field.ndim < 2:\n field_list[i] = field.reshape(-1, 1)\n if field.ndim > 2:\n print('fields have more than 2 dimensions')\n return False \n\n # check all fields have the same number of data\n n_points = [field.shape[0] for field in field_list]\n if not np.all(np.equal(n_points, n_points[0])):\n print('wrong field dimensions')\n return False \n\n # Check if field_names and field_list have same nb of column\n n_fields = np.sum([field.shape[1] for field in field_list])\n if (n_fields != len(field_names)):\n print('wrong number of field names')\n return False\n\n # Add extension if not there\n if not filename.endswith('.ply'):\n filename += '.ply'\n\n # open in text mode to write the header\n with open(filename, 'w') as plyfile:\n\n # First magical word\n header = ['ply']\n\n # Encoding format\n header.append('format binary_' + sys.byteorder + '_endian 1.0')\n\n # Points properties description\n header.extend(header_properties(field_list, field_names))\n\n # Add faces if needded\n if triangular_faces is not None:\n header.append('element face {:d}'.format(triangular_faces.shape[0]))\n header.append('property list uchar int vertex_indices')\n\n # End of header\n header.append('end_header')\n\n # Write all lines\n for line in header:\n plyfile.write(\"%s\\n\" % line)\n\n # open in binary/append to use tofile\n with open(filename, 'ab') as plyfile:\n\n # Create a structured array\n i = 0\n type_list = []\n for fields in field_list:\n for field in fields.T:\n type_list += [(field_names[i], field.dtype.str)]\n i += 1\n data = np.empty(field_list[0].shape[0], dtype=type_list)\n i = 0\n for fields in field_list:\n for field in fields.T:\n data[field_names[i]] = field\n i += 1\n\n data.tofile(plyfile)\n\n if triangular_faces is not None:\n triangular_faces = triangular_faces.astype(np.int32)\n type_list = [('k', 'uint8')] + [(str(ind), 'int32') for ind in range(3)]\n data = np.empty(triangular_faces.shape[0], dtype=type_list)\n data['k'] = np.full((triangular_faces.shape[0],), 3, dtype=np.uint8)\n data['0'] = triangular_faces[:, 0]\n data['1'] = triangular_faces[:, 1]\n data['2'] = triangular_faces[:, 2]\n data.tofile(plyfile)\n\n return True" }, { "identifier": "ModelTester", "path": "utils/tester.py", "snippet": "class ModelTester:\n\n # Initiation methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def __init__(self, config, verbose=True):\n self.config = config\n self.verbose = verbose\n\n self.save_extra = {} # for saving with extra ops\n\n if config.dataset in ['S3DIS', 'ScanNet', 'SensatUrban']:\n self.val_running_vote = self.val_running_vote_seg\n self.val_vote = self.val_vote_seg\n self.test_vote = self.test_vote_seg\n else:\n raise NotImplementedError(f'not supported dataset: {config.dataset}')\n\n def init_pointcloud_log(self, dataset, split, d, dtype=np.float32, init_fn=np.zeros):\n shape = lambda l: [l, d] if d else [l] # d - size of last dimension => each point d-dim [N, d] (d = None to have [N])\n log = [init_fn(shape=shape(t.data.shape[0]), dtype=dtype) for t in dataset.input_trees[split]]\n return log\n\n def initialize(self, ops, dataset, model, split):\n # initialize cum_dict & ops\n config = self.config\n ncls = config.num_classes\n\n run_ops = {k: ops['result_dict'][k] for k in ['inputs', 'seg']} # assumes per-gpu rst - support multi-gpu\n cum_dict = {\n 'prob': self.init_pointcloud_log(dataset, split, ncls)\n }\n\n extra_ops = [k for k in config.extra_ops.split('-') if k]\n extra_ops_solved = extra_ops.copy()\n for k in extra_ops:\n if k in ['prob', 'conf']:\n continue\n else:\n raise ValueError(f'not supported extra ops k = {k} from {config.extra_ops}')\n\n return run_ops, cum_dict, extra_ops_solved\n\n # Val methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def val_running_vote_seg(self, sess, ops, dataset, model, validation_probs, epoch=1):\n \"\"\"\n One epoch validating - running voting used during training, main task results only\n \"\"\"\n\n val_smooth = 0.95 # Choose validation smoothing parameter (0 for no smothing, 0.99 for big smoothing)\n\n result_dict = {k: ops['result_dict'][k] for k in ['inputs', 'seg']} # result dict for seg\n val_ops = {'loss_dict': ops['loss_dict'], 'result_dict': result_dict}\n feed_dict = {ops['is_training']: False}\n\n # Initialise iterator\n sess.run(ops['val_init_op'])\n\n ep = 0\n loss_meter = {k: AverageMeter() for k in val_ops['loss_dict']} if 'loss_dict' in val_ops else{}\n cum_dict = {\n 'conf': 0, # conf from current validation\n 'prob': validation_probs, # accumulating probs\n }\n while ep < epoch:\n try:\n rst = sess.run(val_ops, feed_dict=feed_dict)\n\n loss_dict = rst['loss_dict'] if 'loss_dict' in rst else {}\n cur_rst = rst['result_dict'] # per-gpu result\n\n for k, v in loss_dict.items():\n loss_meter[k].update(v)\n\n # Stack all validation predictions for each class separately - iterate over each gpu & cloud\n self.cumulate_probs(dataset, model, cur_rst, cum_dict, task='seg', smooth=val_smooth)\n\n except tf.errors.OutOfRangeError:\n ep += 1\n pass\n\n if loss_meter:\n print(f'val loss avg:', ' '.join([f'{loss_n} = {meter.avg:.3f}' for loss_n, meter in loss_meter.items()]))\n\n label_to_idx = dataset.label_to_idx\n proportions = dataset.val_proportions\n cur_m = metrics_from_confusions(cum_dict['conf'], proportions=proportions) # use sampled pred-label of current epoch\n vote_m = metrics_from_result(validation_probs, dataset.input_labels['validation'], dataset.num_classes, label_to_idx=label_to_idx, proportions=proportions) # use the accumulated per-point voting\n\n print(f'metrics - current {cur_m}\\n'\n f' - accumulated {vote_m}', flush=True)\n return cur_m\n\n\n def val_vote_seg(self, sess, ops, dataset, model, num_votes=20):\n \"\"\"\n Voting validating\n \"\"\"\n\n feed_dict = {ops['is_training']: False}\n\n # Smoothing parameter for votes\n val_smooth = 0.95\n\n # Initialise iterator with val data\n sess.run(ops['val_init_op'])\n\n # Initiate global prediction over val clouds\n label_to_idx = dataset.label_to_idx\n proportions = dataset.val_proportions\n val_ops, cum_dict, extra_ops = self.initialize(ops, dataset, model, 'validation')\n val_probs = cum_dict['prob']\n\n vote_ind = 0\n last_min = -0.5\n if self.config.debug:\n print_dict(val_ops, head='val_vote_seg - val_ops')\n while last_min < num_votes:\n try:\n cur_rst = sess.run(val_ops, feed_dict=feed_dict)\n # Stack all validation predictions for each class separately - iterate over each gpu & cloud\n self.cumulate_probs(dataset, model, cur_rst, cum_dict, task='seg', smooth=val_smooth)\n\n except tf.errors.OutOfRangeError:\n new_min = np.min(dataset.min_potentials['validation'])\n if self.verbose:\n print(f'Step {vote_ind:3d}, end. Min potential = {new_min:.1f}', flush=True)\n if last_min + 1 < new_min:\n # Update last_min\n last_min += 1\n\n if self.verbose > 1:\n # Show vote results on subcloud (match original label to valid) => not the good values here\n vote_m = metrics_from_result(val_probs, dataset.input_labels['validation'], dataset.num_classes, label_to_idx=label_to_idx, proportions=proportions)\n print('==> Confusion on sub clouds: ', vote_m.scalar_str)\n\n if self.verbose > 1 and int(np.ceil(new_min)) % 2 == 0:\n # Project predictions\n vote_m = metrics_from_result(val_probs, dataset.validation_labels, dataset.num_classes, label_to_idx=label_to_idx, projections=dataset.validation_proj)\n print('==> Confusion on full clouds:', vote_m)\n\n sess.run(ops['val_init_op'])\n vote_ind += 1\n\n vote_m = metrics_from_result(val_probs, dataset.input_labels['validation'], dataset.num_classes, label_to_idx=label_to_idx, proportions=proportions)\n print('==> Confusion on sub clouds - final: ', vote_m.scalar_str)\n\n # Project predictions\n print('==> Confusion on full clouds - final:')\n vote_m = metrics_from_result(val_probs, dataset.validation_labels, dataset.num_classes, label_to_idx=label_to_idx, projections=dataset.validation_proj)\n vote_m.print()\n print('\\nfinished\\n', flush=True)\n\n return\n\n\n # Test methods\n # ------------------------------------------------------------------------------------------------------------------\n\n def test_classification(self, model, dataset, num_votes=100):\n\n # Initialise iterator with test data\n self.sess.run(dataset.test_init_op)\n\n # Number of classes predicted by the model\n nc_model = config.num_classes\n\n # Initiate votes\n average_probs = np.zeros((len(dataset.input_labels['test']), nc_model))\n average_counts = np.zeros((len(dataset.input_labels['test']), nc_model))\n\n mean_dt = np.zeros(2)\n last_display = time.time()\n while np.min(average_counts) < num_votes:\n\n # Run model on all test examples\n # ******************************\n\n # Initiate result containers\n probs = []\n targets = []\n obj_inds = []\n count = 0\n\n while True:\n try:\n\n # Run one step of the model\n t = [time.time()]\n ops = (self.prob_logits, model.labels, model.inputs['object_inds'])\n prob, labels, inds = self.sess.run(ops, {model.dropout_prob: 1.0})\n t += [time.time()]\n\n # Get probs and labels\n probs += [prob]\n targets += [labels]\n obj_inds += [inds]\n count += prob.shape[0]\n\n # Average timing\n t += [time.time()]\n mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))\n\n # Display\n if (t[-1] - last_display) > self.gap_display:\n last_display = t[-1]\n message = 'Vote {:.0f} : {:.1f}% (timings : {:4.2f} {:4.2f})'\n print(message.format(np.min(average_counts),\n 100 * count / dataset.num_test,\n 1000 * (mean_dt[0]),\n 1000 * (mean_dt[1])))\n\n except tf.errors.OutOfRangeError:\n break\n\n # Average votes\n # *************\n\n # Stack all validation predictions\n probs = np.vstack(probs)\n targets = np.hstack(targets)\n obj_inds = np.hstack(obj_inds)\n\n if np.any(dataset.input_labels['test'][obj_inds] != targets):\n raise ValueError('wrong object indices')\n\n # Compute incremental average (predictions are always ordered)\n average_counts[obj_inds] += 1\n average_probs[obj_inds] += (probs - average_probs[obj_inds]) / (average_counts[obj_inds])\n\n # Save/Display temporary results\n # ******************************\n\n test_labels = np.array(dataset.label_values)\n\n # Compute classification results\n C1 = confusion_matrix(dataset.input_labels['test'],\n np.argmax(average_probs, axis=1),\n test_labels)\n\n ACC = 100 * np.sum(np.diag(C1)) / (np.sum(C1) + 1e-6)\n print('Test Accuracy = {:.1f}%'.format(ACC))\n\n s = ''\n for cc in C1:\n for c in cc:\n s += '{:d} '.format(c)\n s += '\\n'\n print(s)\n\n\n\n # Initialise iterator with test data\n self.sess.run(dataset.test_init_op)\n\n return\n\n def test_multi_segmentation(self, model, dataset, num_votes=100, num_saves=10):\n\n ##################\n # Pre-computations\n ##################\n\n print('Preparing test structures')\n t1 = time.time()\n\n # Collect original test file names\n original_path = join(dataset.path, 'test_ply')\n test_names = [f[:-4] for f in listdir(original_path) if f[-4:] == '.ply']\n test_names = np.sort(test_names)\n\n original_labels = []\n original_points = []\n projection_inds = []\n for i, cloud_name in enumerate(test_names):\n\n # Read data in ply file\n data = read_ply(join(original_path, cloud_name + '.ply'))\n points = np.vstack((data['x'], -data['z'], data['y'])).T\n original_labels += [data['label'] - 1]\n original_points += [points]\n\n # Create tree structure to compute neighbors\n tree = KDTree(dataset.input_points['test'][i])\n projection_inds += [np.squeeze(tree.query(points, return_distance=False))]\n\n t2 = time.time()\n print('Done in {:.1f} s\\n'.format(t2 - t1))\n\n ##########\n # Initiate\n ##########\n\n # Test saving path\n if config.save_test:\n test_path = join(model.saving_path, 'test')\n if not exists(test_path):\n makedirs(test_path)\n else:\n test_path = None\n\n # Initialise iterator with test data\n self.sess.run(dataset.test_init_op)\n\n # Initiate result containers\n average_predictions = [np.zeros((1, 1), dtype=np.float32) for _ in test_names]\n\n #####################\n # Network predictions\n #####################\n\n mean_dt = np.zeros(2)\n last_display = time.time()\n for v in range(num_votes):\n\n # Run model on all test examples\n # ******************************\n\n # Initiate result containers\n all_predictions = []\n all_obj_inds = []\n\n while True:\n try:\n\n # Run one step of the model\n t = [time.time()]\n ops = (self.prob_logits,\n model.labels,\n model.inputs['super_labels'],\n model.inputs['object_inds'],\n model.inputs['in_batches'])\n preds, labels, obj_labels, o_inds, batches = self.sess.run(ops, {model.dropout_prob: 1.0})\n t += [time.time()]\n\n # Stack all predictions for each class separately\n max_ind = np.max(batches)\n for b_i, b in enumerate(batches):\n\n # Eliminate shadow indices\n b = b[b < max_ind - 0.5]\n\n # Get prediction (only for the concerned parts)\n obj = obj_labels[b[0]]\n predictions = preds[b][:, :config.num_classes[obj]]\n\n # Stack all results\n all_predictions += [predictions]\n all_obj_inds += [o_inds[b_i]]\n\n # Average timing\n t += [time.time()]\n mean_dt = 0.95 * mean_dt + 0.05 * (np.array(t[1:]) - np.array(t[:-1]))\n\n # Display\n if (t[-1] - last_display) > self.gap_display:\n last_display = t[-1]\n message = 'Vote {:d} : {:.1f}% (timings : {:4.2f} {:4.2f})'\n print(message.format(v,\n 100 * len(all_predictions) / dataset.num_test,\n 1000 * (mean_dt[0]),\n 1000 * (mean_dt[1])))\n\n except tf.errors.OutOfRangeError:\n break\n\n # Project predictions on original point clouds\n # ********************************************\n\n print('\\nGetting test confusions')\n t1 = time.time()\n\n for i, probs in enumerate(all_predictions):\n\n # Interpolate prediction from current positions to original points\n obj_i = all_obj_inds[i]\n proj_predictions = probs[projection_inds[obj_i]]\n\n # Average prediction across votes\n average_predictions[obj_i] = average_predictions[obj_i] + \\\n (proj_predictions - average_predictions[obj_i]) / (v + 1)\n\n Confs = []\n for obj_i, avg_probs in enumerate(average_predictions):\n\n # Compute confusion matrices\n parts = [j for j in range(avg_probs.shape[1])]\n Confs += [confusion_matrix(original_labels[obj_i], np.argmax(avg_probs, axis=1), parts)]\n\n\n t2 = time.time()\n print('Done in {:.1f} s\\n'.format(t2 - t1))\n\n # Save the best/worst segmentations per class\n # *******************************************\n\n print('Saving test examples')\n t1 = time.time()\n\n # Regroup confusions per object class\n Confs = np.array(Confs)\n obj_mIoUs = []\n for l in dataset.label_values:\n\n # Get confusions for this object\n obj_inds = np.where(dataset.input_labels['test'] == l)[0]\n obj_confs = np.stack(Confs[obj_inds])\n\n # Get IoU\n obj_IoUs = IoU_from_confusions(obj_confs)\n obj_mIoUs += [np.mean(obj_IoUs, axis=-1)]\n\n # Get X best and worst prediction\n order = np.argsort(obj_mIoUs[-1])\n worst_inds = obj_inds[order[:num_saves]]\n best_inds = obj_inds[order[:-num_saves-1:-1]]\n worst_IoUs = obj_IoUs[order[:num_saves]]\n best_IoUs = obj_IoUs[order[:-num_saves-1:-1]]\n\n # Save the names in a file\n if config.save_test:\n obj_path = join(test_path, dataset.label_to_names[l])\n if not exists(obj_path):\n makedirs(obj_path)\n worst_file = join(obj_path, 'worst_inds.txt')\n best_file = join(obj_path, 'best_inds.txt')\n with open(worst_file, \"w\") as text_file:\n for w_i, w_IoUs in zip(worst_inds, worst_IoUs):\n text_file.write('{:d} {:s} :'.format(w_i, test_names[w_i]))\n for IoU in w_IoUs:\n text_file.write(' {:.1f}'.format(100*IoU))\n text_file.write('\\n')\n\n with open(best_file, \"w\") as text_file:\n for b_i, b_IoUs in zip(best_inds, best_IoUs):\n text_file.write('{:d} {:s} :'.format(b_i, test_names[b_i]))\n for IoU in b_IoUs:\n text_file.write(' {:.1f}'.format(100*IoU))\n text_file.write('\\n')\n\n # Save the clouds\n for i, w_i in enumerate(worst_inds):\n filename = join(obj_path, 'worst_{:02d}.ply'.format(i+1))\n preds = np.argmax(average_predictions[w_i], axis=1).astype(np.int32)\n write_ply(filename,\n [original_points[w_i], original_labels[w_i], preds],\n ['x', 'y', 'z', 'gt', 'pre'])\n\n for i, b_i in enumerate(best_inds):\n filename = join(obj_path, 'best_{:02d}.ply'.format(i+1))\n preds = np.argmax(average_predictions[b_i], axis=1).astype(np.int32)\n write_ply(filename,\n [original_points[b_i], original_labels[b_i], preds],\n ['x', 'y', 'z', 'gt', 'pre'])\n\n t2 = time.time()\n print('Done in {:.1f} s\\n'.format(t2 - t1))\n\n # Display results\n # ***************\n\n objs_average = [np.mean(mIoUs) for mIoUs in obj_mIoUs]\n instance_average = np.mean(np.hstack(obj_mIoUs))\n class_average = np.mean(objs_average)\n\n print('Objs | Inst | Air Bag Cap Car Cha Ear Gui Kni Lam Lap Mot Mug Pis Roc Ska Tab')\n print('-----|------|--------------------------------------------------------------------------------')\n\n s = '{:4.1f} | {:4.1f} | '.format(100 * class_average, 100 * instance_average)\n for AmIoU in objs_average:\n s += '{:4.1f} '.format(100 * AmIoU)\n print(s + '\\n')\n\n # Initialise iterator with test data\n self.sess.run(dataset.test_init_op)\n\n return\n\n def test_vote_seg(self, sess, ops, dataset, model, num_votes=20, test_path=None, make_zip=True):\n\n config = self.config\n assert os.path.isdir(config.saving_path), f'not a dir: {config.saving_path}'\n if test_path is None:\n test_path = os.path.join(config.saving_path, 'test')\n os.makedirs(test_path, exist_ok=True)\n\n options = None # tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)\n run_metadata = None # tf.RunMetadata()\n feed_dict = {ops['is_training']: False}\n\n # Smoothing parameter for votes\n test_smooth = 0.98\n\n # Initialise iterator with test data\n sess.run(ops['test_init_op'])\n\n # Initiate global prediction over val clouds\n test_ops, cum_dict, extra_ops = self.initialize(ops, dataset, model, 'test')\n test_probs = cum_dict['prob']\n\n vote_ind = 0\n last_min = -0.5 \n if config.num_votes:\n num_votes = config.num_votes\n while last_min < num_votes:\n try:\n cur_rst = sess.run(test_ops, feed_dict=feed_dict, options=options, run_metadata=run_metadata)\n # Stack all test predictions for each class separately - iterate over each gpu & cloud\n self.cumulate_probs(dataset, model, cur_rst, cum_dict, task='seg', smooth=test_smooth)\n\n except tf.errors.OutOfRangeError:\n # NOTE: need to check\n new_min = np.min(dataset.min_potentials['test'])\n if self.verbose:\n print(f'Step {vote_ind:3d}, end. Min potential = {new_min:.1f}', flush=True)\n\n if last_min + 1 < new_min:\n # Update last_min\n last_min += 1\n\n # if int(last_min) > 0 and int(last_min) // 5 == 0: # periodic test results\n # self.project_test_predictions(dataset, test_path)\n\n sess.run(ops['test_init_op'])\n vote_ind += 1\n\n if self.verbose:\n new_min = np.min(dataset.min_potentials['test'])\n print(f'Step {vote_ind:3d}, end. Min potential = {new_min:.1f}', flush=True)\n\n self.project_test_predictions(dataset, test_probs, test_path)\n print('\\nfinished\\n', flush=True)\n\n if make_zip:\n zip_name = test_path.split(os.sep) # cfg name / Log_* / test_*\n zip_name = '_'.join([i for i in ['test', *zip_name[-3:-1], zip_name[-1][len('test'):].strip('_')] if i])\n # include test_* dir (except Semantic3D, ScanNet)\n j = 'j' if config.dataset in ['ScanNet', 'Semantic3D', 'SensatUrban'] else ''\n os.system(f'cd {os.path.dirname(test_path)}; zip -rmTq{j} {zip_name}.zip {test_path.split(os.sep)[-1]}/*') # -m to move, -j junk file, -T test integrity, -q quiet\n os.system(f'rm -r {test_path}')\n return\n\n def project_test_predictions(self, dataset, test_probs, test_path):\n\n # Project predictions\n t1 = time.time()\n files = dataset.test_files\n ignored_inds = None\n if hasattr(dataset, 'ignored_labels_test'):\n ignored_inds = dataset.label_to_idx[[l for l in dataset.ignored_labels_test if l not in dataset.ignored_labels]].astype(int)\n\n config = self.config\n if config.save_test:\n pred_path = os.sep.join([*test_path.split(os.sep)[:-1], test_path.split(os.sep)[-1].replace('test', 'predictions')]) # model pred\n os.makedirs(pred_path, exist_ok=True)\n\n for i_test, file_path in enumerate(files):\n\n # Reproject probs\n probs = test_probs[i_test][dataset.test_proj[i_test], :]\n\n # Remove invalid classes in test\n if ignored_inds is not None:\n probs[:, ignored_inds] = 0\n\n # Get the predicted labels\n preds = dataset.idx_to_label[np.argmax(probs, axis=-1)]\n\n # Save plys - predictions & probs\n cloud_name = file_path.split('/')[-1]\n if config.save_test:\n points = dataset.load_evaluation_points(file_path) # test original points\n pots = dataset.potentials['test'][i_test][dataset.test_proj[i_test]] # project potentials on original points\n test_name = os.path.join(pred_path, cloud_name)\n prob_names = ['_'.join(dataset.label_to_names[label].split()) for label in dataset.label_values if label not in dataset.ignored_labels]\n write_ply(test_name,\n [points, preds, pots, probs],\n ['x', 'y', 'z', 'preds', 'pots'] + prob_names)\n\n # Save ascii preds - submission files\n if config.dataset == 'Semantic3D':\n ascii_name = os.path.join(test_path, dataset.ascii_files[cloud_name])\n np.savetxt(ascii_name, preds, fmt='%d')\n elif config.dataset == 'SensatUrban':\n ascii_name = os.path.join(test_path, f'{cloud_name[:-4]}.label')\n preds.astype(np.uint8).tofile(ascii_name)\n else:\n ascii_name = os.path.join(test_path, cloud_name[:-4] + '.txt')\n np.savetxt(ascii_name, preds, fmt='%d')\n\n t2 = time.time()\n if self.verbose:\n print('\\nReproject Vote in {:.1f}s\\n'.format(t2-t1))\n\n\n # Utilities\n # ------------------------------------------------------------------------------------------------------------------\n\n def cumulate_probs(self, dataset, model, rst, cum_dict, task, smooth):\n # cum_dict - {cum_dict name : {args : rst_dict}}\n\n # iterate over gpu\n for gpu_i, cloud_inds in enumerate(rst['inputs']['cloud_inds']):\n point_inds = rst['inputs']['point_inds'][gpu_i]\n\n b_start = 0\n # iterate over clouds\n for b_i, c_i in enumerate(cloud_inds): # [B]\n if 'batches_len' in rst['inputs']: # [BxN] - stacked\n b_len = rst['inputs']['batches_len'][gpu_i][0][b_i] # npoints in cloud\n b_i = np.arange(b_start, b_start + b_len)\n b_start += b_len\n else: # [B, N] - batched\n pass\n inds = point_inds[b_i] # input point inds\n\n probs = rst[task]['probs'][gpu_i][b_i]\n labels = rst[task]['labels'][gpu_i][b_i]\n if np.all(labels == -1):\n # c_pts = np.array(dataset.input_trees['validation'][c_i].data, copy=False)[inds].mean(axis=0)\n # unique_l_cnt = np.unique(dataset.input_labels['validation'][c_i][inds], return_counts=True)\n # raise ValueError(f'all invalid labels found in cumulate_prob: cloud_inds={c_i}, center_pts={c_pts}'\n # f'input_labels & counts - {unique_l_cnt}')\n continue\n if 'conf' in cum_dict:\n cur_conf = confusion_matrix(labels, np.argmax(probs, axis=-1).astype(np.int), labels=np.arange(dataset.num_classes))\n cum_dict['conf'] += cur_conf\n if 'prob' in cum_dict:\n cum_dict['prob'][c_i][inds] = smooth * cum_dict['prob'][c_i][inds] + (1 - smooth) * probs\n if 'feature' in cum_dict:\n cum_dict['feature'][c_i][inds] = smooth * cum_dict['feature'][c_i][inds] + (1 - smooth) * rst[task]['latent'][gpu_i][b_i]\n\n def _search_func(self, k_r, cloud_idx, split, dataset, neighbor_dict, verbose=True): # create tf_ops of generating neighbor_idx & get result\n if cloud_idx in neighbor_dict[k_r]:\n return neighbor_dict[k_r][cloud_idx]\n\n config = self.config\n points = np.array(dataset.input_trees[split][cloud_idx].data, copy=False) # [N, 3]\n\n from ops import get_tf_func\n func = get_tf_func(config.search, verbose=verbose)\n\n if config.search in ['knn']:\n tf_ops = tf.squeeze(func(points[None, ...], points[None, ...], k_r), axis=0)\n elif config.search in ['radius']:\n tf_ops = func(points, points, [len(points)], [len(points)], k_r)\n # if hasattr(dataset, 'neighborhood_limits'):\n # print('neighborhood_limits', dataset.neighborhood_limits[0])\n # tf_ops = tf_ops[..., :dataset.neighborhood_limits[0]]\n else:\n raise\n\n if verbose:\n print_mem(f'k = {k_r} - start', check_time=True, check_sys=True, flush=True)\n with tf.Session(config=tf.ConfigProto(device_count={'GPU': 0}, allow_soft_placement=True)) as s:\n neighbor_idx = s.run(tf_ops)\n if verbose:\n print_mem(f'neighbor_idx {neighbor_idx.shape}', check_time=True, check_sys=True, flush=True)\n\n neighbor_dict[k_r][cloud_idx] = neighbor_idx # neighbor idx - np arr\n return neighbor_idx" }, { "identifier": "average_gradients", "path": "utils/average_gradients.py", "snippet": "def average_gradients(tower_grads, grad_norm, raise_on_none=True, grad_reduce=None, device=None):\n \"\"\"Calculate the average gradient for each shared variable across all towers.\n Note that this function provides a synchronization point across all towers.\n From tensorflow tutorial: cifar10/cifar10_multi_gpu_train.py\n Args:\n tower_grads: List of lists of (gradient, variable) tuples. The outer list\n is over individual gradients. The inner list is over the gradient\n calculation for each tower.\n - [[(g,v), ... at gpu 0], ..., [(g,v), ... at gpu N]]\n Returns:\n List of pairs of (gradient, variable) where the gradient has been averaged\n across all towers.\n \"\"\"\n if device:\n with tf.device(device):\n return average_gradients(tower_grads, grad_norm, raise_on_none, grad_reduce, None)\n\n use_clip = grad_norm and grad_norm > 0\n average_grads = []\n for grad_and_vars in zip(*tower_grads):\n # Note that each grad_and_vars containes (grad, var) calculated at each gpu, looks like the following:\n # ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))\n grads = []\n for g, v in grad_and_vars:\n if g is not None:\n if use_clip:\n g = tf.clip_by_norm(g, grad_norm)\n elif raise_on_none:\n raise ValueError(f'variable {v} got None gradients')\n else:\n continue\n # g = tf.zeros_like(v)\n\n # Append on a 'tower' dimension which we will average over below.\n grads.append(g)\n\n # Average over the 'tower' dimension.\n if len(grads) > 1 and (grad_reduce == 'concat' or not grad_reduce):\n # Add 0 dimension to the gradients to represent the tower.\n # grad = tf.stack(grads)\n grads = [tf.expand_dims(g, 0) for g in grads]\n grad = tf.concat(axis=0, values=grads)\n grad = tf.reduce_mean(grad, 0)\n elif len(grads) > 1 and grad_reduce == 'mean':\n # Direct mean\n grad = tf.accumulate_n(grads) / len(grads)\n elif len(grads) == 1:\n # skip if only 1 gpu\n grad = grads[0]\n elif len(grads) == 0:\n grad = None\n else:\n raise ValueError(f'not support grad_reduce = {grad_reduce}')\n\n # Keep in mind that the Variables are redundant because they are shared\n # across towers. So .. we will just return the first tower's pointer to\n # the Variable.\n v = grad_and_vars[0][1]\n grad_and_var = (grad, v)\n average_grads.append(grad_and_var)\n return average_grads" }, { "identifier": "AdamWeightDecayOptimizer", "path": "utils/AdamWOptimizer.py", "snippet": "class AdamWeightDecayOptimizer(tf.train.Optimizer):\n \"\"\"A basic Adam optimizer that includes \"correct\" L2 weight decay.\"\"\"\n\n def __init__(self,\n learning_rate,\n weight_decay_rate=0.0,\n beta_1=0.9,\n beta_2=0.999,\n epsilon=1e-6,\n exclude_from_weight_decay=None,\n name=\"AdamWeightDecayOptimizer\"):\n \"\"\"Constructs a AdamWeightDecayOptimizer.\"\"\"\n super(AdamWeightDecayOptimizer, self).__init__(False, name)\n\n self.learning_rate = learning_rate\n self.weight_decay_rate = weight_decay_rate\n self.beta_1 = beta_1\n self.beta_2 = beta_2\n self.epsilon = epsilon\n self.exclude_from_weight_decay = exclude_from_weight_decay\n\n def apply_gradients(self, grads_and_vars, global_step=None, name=None):\n \"\"\"See base class.\"\"\"\n assignments = []\n for (grad, param) in grads_and_vars:\n if grad is None or param is None:\n continue\n\n param_name = self._get_variable_name(param.name)\n\n m = tf.get_variable(\n name=param_name + \"/adam_m\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n v = tf.get_variable(\n name=param_name + \"/adam_v\",\n shape=param.shape.as_list(),\n dtype=tf.float32,\n trainable=False,\n initializer=tf.zeros_initializer())\n\n # Standard Adam update.\n next_m = (\n tf.multiply(self.beta_1, m) + tf.multiply(1.0 - self.beta_1, grad))\n next_v = (\n tf.multiply(self.beta_2, v) + tf.multiply(1.0 - self.beta_2,\n tf.square(grad)))\n\n update = next_m / (tf.sqrt(next_v) + self.epsilon)\n\n # Just adding the square of the weights to the loss function is *not*\n # the correct way of using L2 regularization/weight decay with Adam,\n # since that will interact with the m and v parameters in strange ways.\n #\n # Instead we want ot decay the weights in a manner that doesn't interact\n # with the m/v parameters. This is equivalent to adding the square\n # of the weights to the loss with plain (non-momentum) SGD.\n if self._do_use_weight_decay(param_name):\n update += self.weight_decay_rate * param\n\n update_with_lr = self.learning_rate * update\n\n next_param = param - update_with_lr\n\n assignments.extend(\n [param.assign(next_param),\n m.assign(next_m),\n v.assign(next_v)])\n return tf.group(*assignments, name=name)\n\n def _do_use_weight_decay(self, param_name):\n \"\"\"Whether to use L2 weight decay for `param_name`.\"\"\"\n if not self.weight_decay_rate:\n return False\n if self.exclude_from_weight_decay:\n for r in self.exclude_from_weight_decay:\n if re.search(r, param_name) is not None:\n return False\n return True\n\n def _get_variable_name(self, param_name):\n \"\"\"Get the variable name from the tensor name.\"\"\"\n m = re.match(\"^(.*):\\\\d+$\", param_name)\n if m is not None:\n param_name = m.group(1)\n return param_name" }, { "identifier": "setup_logger", "path": "utils/logger.py", "snippet": "@functools.lru_cache()\ndef setup_logger(\n output=None, distributed_rank=0, *, color=True, name=\"\", abbrev_name=None\n):\n \"\"\"\n Initialize the detectron2 logger and set its verbosity level to \"INFO\".\n\n Args:\n output (str): a file name or a directory to save log. If None, will not save log file.\n If ends with \".txt\" or \".log\", assumed to be a file name.\n Otherwise, logs will be saved to `output/log.txt`.\n name (str): the root module name of this logger\n\n Returns:\n logging.Logger: a logger\n \"\"\"\n logger = logging.getLogger(name) # a global named logger\n logger.setLevel(logging.DEBUG)\n logger.propagate = False\n\n if abbrev_name is None:\n abbrev_name = name\n\n plain_formatter = logging.Formatter(\n \"[%(asctime)s] %(name)s %(levelname)s: %(message)s\", datefmt=\"%m/%d %H:%M:%S\"\n )\n # stdout logging: master only\n if distributed_rank == 0:\n ch = logging.StreamHandler(stream=sys.stdout)\n ch.setLevel(logging.DEBUG)\n if color:\n formatter = _ColorfulFormatter(\n colored(\"[%(asctime)s %(name)s]: \", \"green\") + \"%(message)s\",\n datefmt=\"%m/%d %H:%M:%S\",\n root_name=name,\n abbrev_name=str(abbrev_name),\n )\n else:\n formatter = plain_formatter\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n # file logging: all workers\n if output is not None:\n if output.endswith(\".txt\") or output.endswith(\".log\"):\n filename = output\n else:\n filename = os.path.join(output, \"log.txt\")\n if distributed_rank > 0:\n filename = filename + f\".rank{distributed_rank}\"\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n\n fh = logging.StreamHandler(_cached_log_stream(filename))\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(plain_formatter)\n logger.addHandler(fh)\n\n return logger" }, { "identifier": "StepScheduler", "path": "utils/scheduler.py", "snippet": "class StepScheduler(object):\n def __init__(self, name, base_value, decay_rate, decay_step, max_steps, clip_min=0):\n self.name = name\n self.clip_min = clip_min\n self.cur_step = 0\n self.values = [base_value * decay_rate ** (i // decay_step) for i in range(max_steps)]\n\n def reset(self):\n self.cur_step = 0\n\n def step(self):\n # cur_value = self.base_value * self.decay_rate ** (cur_step // decay_step)\n cur_value = max(self.values[self.cur_step], self.clip_min)\n self.cur_step += 1\n return cur_value" }, { "identifier": "LrScheduler", "path": "utils/scheduler.py", "snippet": "class LrScheduler(object):\n def __init__(self, config):\n self.config = config\n self.start_lr = float(config.learning_rate)\n self.clip_min = config.clip_min if config.clip_min else 0\n\n self.decay = config.decay\n if self.decay.startswith('cos'):\n self._get_lr = self._get_lr_cos\n\n self.reset()\n\n # from matplotlib import pyplot as plt\n # plt.plot(self.to_list(config.max_epoch))\n # plt.savefig(config.name)\n\n def reset(self):\n self.cur_ep = 0\n self.cur_step = 0\n self.learning_rate = None # None to denote not initalized\n self.learning_rate = self._get_lr()\n\n def _get_lr_cos(self):\n # simple implementation for cos annealing (epoch based)\n # borrowing from https://github.com/katsura-jp/pytorch-cosine-annealing-with-warmup/blob/master/cosine_annealing_warmup/scheduler.py\n # e.g. cos_w10, cos_w10_c3_m2_g.5\n cfg = self.config\n cur_ep = self.cur_ep\n total_ep = cfg.max_epoch\n max_lr = self.start_lr\n base_lr = self.clip_min if self.clip_min > 0 else 1e-5 # starting lr (min)\n\n warm_ep = re.search('w\\d+', self.decay)\n warm_ep = float(warm_ep.group()[1:]) if warm_ep else 0\n if 0 < warm_ep and warm_ep < 1:\n warm_ep = total_ep * warm_ep\n\n # solve cycle\n cycle_ep = re.search('c\\d+', self.decay)\n cycle_ep = int(cycle_ep.group()[1:]) if cycle_ep else 0 # total num of cycles\n cycle_m = re.search('m\\d+', self.decay)\n cycle_m = float(cycle_m.group()[1:]) if cycle_m else 1 # extending len per cycle\n if cycle_m > 1:\n assert cycle_ep > 0, f'#cycle must > 0'\n cycle_ep_base = total_ep * (cycle_m - 1) / (cycle_m ** cycle_ep - 1) # solving the first cycle len - sum of geometric sequence (等比求和)\n cycle_ep = [cycle_ep_base * cycle_m ** i for i in range(cycle_ep)]\n cycle_n = len([i for i in np.cumsum(cycle_ep) if i < cur_ep]) # num of cycles\n cycle_base = np.sum(cycle_ep[:cycle_n]) # start ep of current cycle\n cycle_ep = cycle_ep[cycle_n] # current cycle length\n elif cycle_ep:\n assert total_ep % cycle_ep == 0, f'#cycle={cycle_ep} does not align with #total={total_ep}'\n cycle_ep = total_ep / cycle_ep # length of each cycle - default to total_ep (1 cycle)\n cycle_n = int(cur_ep / cycle_ep)\n cycle_base = cycle_n * cycle_ep\n else:\n cycle_ep, cycle_n, cycle_base = total_ep, 0, 0\n cur_ep = cur_ep - cycle_base\n\n # modulate max lr\n gamma = [i[1:] for i in self.decay.split('_') if i.startswith('g')]\n gamma = float(gamma[0]) if gamma else 1\n max_lr = max_lr * gamma ** cycle_n\n\n if cur_ep < warm_ep:\n # warmup stage - linear increasing\n return cur_ep / warm_ep * (max_lr - base_lr) + base_lr\n else:\n # cos decay stage\n cur_ep = cur_ep - warm_ep\n cycle_ep = cycle_ep - warm_ep\n decay = (1 + np.cos(np.pi * cur_ep / cycle_ep)) / 2 # rescaled cos weight in [0, 1]\n return base_lr + (max_lr - base_lr) * decay\n\n def _get_lr(self):\n # exponential decay (default)\n cfg = self.config\n cur_ep = self.cur_ep\n base_lr = self.clip_min if self.clip_min > 0 else 1e-5\n\n warm_ep = re.search('w\\d+', self.decay)\n warm_ep = float(warm_ep.group()[1:]) if warm_ep else 0\n\n if cur_ep < warm_ep:\n # warmup stage - linear increasing\n return cur_ep / warm_ep * (self.start_lr - base_lr) + base_lr\n\n # normal decay\n cur_ep = cur_ep - warm_ep\n if cfg.decay_step:\n times = self.cur_step // cfg.decay_step if isinstance(cfg.decay_step, int) else (np.array(cfg.decay_step) <= self.cur_step).sum()\n else:\n decay_epoch = cfg.decay_epoch if cfg.decay_epoch else 1 # decay per epoch by default\n if isinstance(decay_epoch, (list, tuple)):\n assert all(i >= 1 for i in decay_epoch), f'need to specify as real epoch, not {decay_epoch}'\n times = cur_ep // decay_epoch if isinstance(decay_epoch, int) else (np.array(decay_epoch) <= cur_ep).sum()\n\n cum_decay = (cfg.decay_rate ** times) if type(cfg.decay_rate) in [int, float] else np.prod(cfg.decay_rate[:times]) # np.prod([]) = 1.0\n cur_lr = self.start_lr * cum_decay\n return cur_lr\n\n def to_list(self, max_epoch=None):\n lrs = []\n max_epoch = max_epoch if max_epoch is not None else self.config.max_epoch\n for i in range(max_epoch):\n self.cur_ep = i\n lrs.append(self._get_lr())\n self.learning_rate = lrs[-1]\n self.reset()\n return lrs\n\n def step(self, epoch, step):\n self.cur_ep += epoch\n self.cur_step += step\n cur_lr = max(self._get_lr(), self.clip_min)\n self.learning_rate = cur_lr\n return cur_lr\n\n def to_plot(self, max_epoch=None):\n lrs = []\n max_epoch = max_epoch if max_epoch is not None else self.config.max_epoch\n for i in range(max_epoch):\n self.cur_ep = i\n lrs.append(self._get_lr())\n self.learning_rate = lrs[-1]\n self.reset()\n import matplotlib.pyplot as plt\n plt.plot(lrs)\n plt.show()\n return " }, { "identifier": "AverageMeter", "path": "utils/metrics.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n \n @property\n def avg(self):\n return self.sum / self.count" }, { "identifier": "GraphBuilder", "path": "utils/tf_graph_builder.py", "snippet": "class GraphBuilder(object):\n\n def __init__(self, config, graph=None, verbose=True):\n \"\"\"\n get the full compute graph including dataset, model inference, loss, optimizer, lr scheduler and required ops\n \"\"\"\n\n if graph is not None: # if graph specified\n with graph.as_default():\n return self.__init__(config, None, verbose)\n\n if isinstance(config.rand_seed, int): # set seed\n tf.set_random_seed(config.rand_seed)\n np.random.seed(config.rand_seed)\n if verbose:\n print(f'==> np random seed = {np.random.get_state()[1][0]}')\n\n # model & dataset fn\n self.get_dataset = getattr(datasets, f'{config.dataset}Dataset') # datasets.[name]Dataset\n self.get_model = models.get_model\n # if config.distribute == 'tf_device': # full compute graph (handle devices & platforms)\n # self.build = self.build_devices\n # else:\n # raise NotImplementedError(f'not supported type of distributing graphs: config.distribute={config.distribute}')\n\n # Get dataset\n if verbose:\n print('==> Preparing datasets...')\n dataset = self.get_dataset(config, verbose)\n dataset.initialize(verbose)\n if verbose:\n print('==> setting dataset info:')\n print_dict(dataset.info, prefix='\\t')\n print_mem('>>> dataset built')\n config.update(dataset.info)\n\n # placeholder\n is_training = tf.placeholder(tf.bool, shape=())\n learning_rate = tf.placeholder(tf.float32, shape=(), name='learning_rate')\n # learning_rate = tf.get_variable('learning_rate', [], initializer=tf.constant_initializer(float('nan')), trainable=False)\n\n # # build model\n # grads, total_loss_dict, total_result_dict, model = self.build(dataset, is_training, config, verbose=verbose)\n\n # -------------------------------------------\n # Get model and loss on multiple GPU devices\n # -------------------------------------------\n # Allocating variables on CPU first will greatly accelerate multi-gpu training.\n # Ref: https://github.com/kuza55/keras-extras/issues/21\n flat_inputs = dataset.flat_inputs\n if config.cpu_variables:\n self.get_model(flat_inputs[0], is_training, config=config, verbose=verbose)\n tower_grads = []\n total_losses = []\n total_result = []\n for igpu in range(config.gpu_num):\n with tf.variable_scope(tf.get_variable_scope(), reuse=True if config.cpu_variables else tf.AUTO_REUSE):\n name_scope = f'gpu_{igpu}' if config.cpu_variables or igpu > 0 else ''\n verbose = not bool(name_scope)\n with tf.device(f'/gpu:{igpu}'), tf.name_scope(name_scope) as scope:\n flat_inputs_i = flat_inputs[igpu]\n model = self.get_model(flat_inputs_i, is_training, config=config, scope=scope, verbose=verbose) # inference model\n\n # collect per-gpu info\n result_dict = model.get_result() # inference result\n total_result.append(result_dict)\n\n loss_dict = model.get_loss() # loss\n total_losses.append(loss_dict)\n\n var_list = tf.trainable_variables() # vars & grads\n var_list = self.collect_vars(var_list, include_k=config.vars_train, except_k=config.vars_freeze)\n grads = tf.gradients(loss_dict['loss'], var_list, colocate_gradients_with_ops=config.colocate_gradients_with_ops) # normally, should NOT co-locate\n grads = list(zip(grads, var_list))\n tower_grads.append(grads)\n total_inputs = dict_list(flat_inputs)\n total_result = dict_list(total_result)\n total_losses = dict_list(total_losses)\n\n # average losses from multiple GPUs\n with tf.variable_scope('losses'):\n total_losses = {k: tf.reduce_mean(v, name=k) if len(v) > 1 else v[0] for k, v in total_losses.items()}\n\n # average grad\n with tf.variable_scope('gradients'):\n # [(gradient, variable), ...] - gradient averaged over gpu towers (if >1)\n grads = average_gradients(tower_grads, grad_norm=config.grad_norm, raise_on_none=config.grad_raise_none, grad_reduce=config.grad_reduce)\n\n # setup optimizer\n with tf.variable_scope('optimizer'):\n if config.optimizer == 'sgd':\n optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=config.momentum)\n elif config.optimizer == 'adam':\n optimizer = tf.train.AdamOptimizer(learning_rate)\n elif config.optimizer == 'adamW':\n from utils.AdamWOptimizer import AdamWeightDecayOptimizer\n optimizer = AdamWeightDecayOptimizer(learning_rate=learning_rate, weight_decay_rate=config.weight_decay, exclude_from_weight_decay=[\"bias\"])\n\n # if config.mixed_precision:\n # optimizer = tf.train.experimental.enable_mixed_precision_graph_rewrite(optimizer)\n\n # momentume as update ops\n update_ops = self.get_momentum_update(model, config, total_inputs, total_result)\n for ops in update_ops: # add to collection\n tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, ops)\n\n # train op\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n train_op = optimizer.apply_gradients(grads)\n # train_op = optimizer.apply_gradients(grads)\n # train_op = tf.group([train_op, update_ops])\n\n # saver\n save_vars = None\n if config.save_compact:\n save_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model')\n if isinstance(config.save_compact, bool):\n pass\n elif isinstance(config.save_compact, str) and config.save_compact == 'trained':\n vars_grads = {v: g for g, v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='model')}\n save_vars = [v for v in save_vars if v in vars_grads and vars_grads[v] is not None] # save only trained\n else:\n raise ValueError(f'not support save_compact={config.save_compact}')\n saver = tf.train.Saver(save_vars, max_to_keep=int(config.max_to_keep))\n\n # summary\n with tf.variable_scope('summary'):\n if config.summary and isinstance(config.summary, str):\n inputs = model.inputs\n if 'summary' not in inputs:\n inputs['summary'] = defaultdict(lambda: [])\n if config.summary == 'loss':\n inputs['summary']['per_step'] += [tf.summary.scalar(k, v) for k, v in total_losses.items()]\n # log grads - debug use\n # inputs = model.inputs\n # inputs['summary'] = defaultdict(lambda: [])\n # from models.utils import tf_Print\n # for i, (g, v) in enumerate(grads):\n # if config.summary:\n # inputs['summary']['per_step'] += [tf.summary.histogram(f'{v.name}/v', v)]\n # inputs['summary']['per_step'] += [tf.summary.histogram(f'{v.name}/g', g)]\n # if v.name in [\n # 'model/resnet_scene_segmentation_head/up_conv3/weights:0',\n # 'model/resnet_scene_segmentation_head/segmentation_head/weights:0',\n # ]:\n # print(f'print grad - {v.name}')\n # g = tf_Print(g, [f'grads - {v.name}', g])\n # grads[i] = (g, v)\n # input('\\nprint above grads')\n # summary - merge\n summary_dict = {} # {level : merged op}\n if config.summary:\n sum_levels = ['per_step', 'per_log', 'per_epoch']\n summary_ops = model.inputs['summary'] if 'summary' in model.inputs else {k: [] for k in sum_levels}\n assert all([k in sum_levels for k in summary_ops]), f'undesired keys in summary ops: {summary_ops.keys()}'\n for i in range(len(sum_levels)):\n lv = sum_levels[-i - 1]\n ops = sum([summary_ops[k] for k in sum_levels[:len(sum_levels)-i]], [])\n summary_dict[lv] = tf.summary.merge(ops) if len(ops) > 0 else tf.no_op()\n\n # Create a session\n cProto = tf.ConfigProto()\n if config.gpu_allow_growth:\n cProto.gpu_options.allow_growth = True\n if config.debug_single:\n cProto.device_count['CPU'] = 1\n # config.intra_op_parallelism_threads = config.inter_op_parallelism_threads = psutil.cpu_count(logical=False) # set to num of physical (default to logical) cpu cores\n cProto.allow_soft_placement = bool(config.allow_soft_placement) or not bool(config.gpu_devices) # if specified or cpu-only\n cProto.log_device_placement = False\n sess = tf.Session(config=cProto)\n\n ops = {\n 'train_init_op': dataset.train_init_op,\n 'val_init_op': dataset.val_init_op,\n 'test_init_op': dataset.test_init_op,\n\n 'train_op': train_op,\n 'is_training': is_training,\n 'learning_rate': learning_rate,\n\n 'inputs': dict(total_inputs),\n 'loss_dict': dict(total_losses),\n 'result_dict': dict(total_result),\n 'summary_dict': dict(summary_dict),\n }\n if verbose:\n print_mem('>>> model built')\n print('\\n -------- inputs {')\n print_dict(model.inputs, prefix='\\t')\n print('} --------- inputs')\n print('\\n -------- loss_dict {')\n print_dict(total_losses, prefix='\\t')\n print('} --------- loss_dict')\n print('\\n -------- result_dict {')\n print_dict(total_result, prefix='\\t')\n print('} --------- result_dict')\n\n self.ops = ops\n self.sess = sess\n self.grads = grads\n self.saver = saver\n\n self.model = model\n self.dataset = dataset\n\n # -------------------------------------------\n # Other utils & interfaces\n # -------------------------------------------\n\n def collect_vars(self, var_list, include_k=[], except_k=[], match='search'):\n # collect specified vars - default to all vars\n var_collect = []\n match_func = getattr(re, match)\n include_k = [include_k] if include_k and isinstance(include_k, str) else include_k\n except_k = [include_k] if except_k and isinstance(except_k, str) else except_k\n for v in var_list:\n if include_k and not any(match_func(k, v.name) for k in include_k):\n continue\n if except_k and any(match_func(k, v.name) for k in except_k):\n continue\n var_collect.append(v)\n return var_collect\n\n def get_momentum_update(self, model, config, total_inputs, total_result):\n # collect update ops for momentum update\n update_ops = []\n\n # update ops - momentum dict\n # NOTE - can be done in per-head fashion\n # => check only sepcial 'momentum_update_stage'\n for head_n, head_d in total_result.items():\n if 'momentum_dict' not in head_d or 'momentum_dict' not in total_inputs: continue\n if head_n not in total_inputs['momentum_dict']:\n raise KeyError(f'building momentum cycle for head {head_n}: missing tensor for momentum dict')\n head_cfg = model.head_dict['config'][head_n]\n\n # per-device input/output\n mom_in = total_inputs['momentum_dict'][head_n] # {k : [v = tensor]}, with inputs['momentum_dict'] = {head_n: {k : placeholder/vars}}\n mom_out = head_d['momentum_dict'] # {k: [v = tensor]}\n for k, v_out in mom_out.items():\n v_in = mom_in[k]\n\n # collect for update\n mom_avg = head_cfg.momentum_update\n mom_avg = float(mom_avg) if isinstance(mom_avg, (str, int)) else mom_avg # can be variable\n with tf.variable_scope(f'mom_dict_update/{head_n}/{k}'):\n if head_cfg.momentum_update_stage == 'glb_avg':\n # average over devices\n v_out = tf.reduce_mean(tf.stack(v_out, axis=0), axis=0)\n v_out = [v_in[i] * mom_avg + v_out * (1 - mom_avg) for i in range(config.gpu_num)]\n\n elif head_cfg.momentum_update_stage == 'glb_sum':\n # sum over devices\n v_out = tf.reduce_sum(tf.stack(v_out, axis=0), axis=0)\n v_out = [v_in[i] * mom_avg + v_out * (1 - mom_avg) for i in range(config.gpu_num)]\n\n # create update ops\n for igpu in range(config.gpu_num): # assign to each device input\n with tf.variable_scope(f'gpu_{igpu}/mom_dict_update/{head_n}/{k}', reuse=True):\n update_ops += [tf.assign(v_in[igpu], v_out[igpu])]\n\n return update_ops\n\n\n\n def restore(self, *args, **kwargs):\n argspec = inspect.getfullargspec(restore)\n kwargs.update(zip(argspec.args, args))\n kw_self = {'session': self.sess} # , 'saver': self.saver\n for k, v in kw_self.items():\n if k not in kwargs:\n kwargs[k] = v\n return restore(**kwargs)\n\n def close(self):\n self.sess.close()\n tf.reset_default_graph()" } ]
import os, re, gc, sys, time, pickle, psutil, subprocess import numpy as np import tensorflow as tf from config import log_config from utils.logger import print_dict, print_table from utils.ply import read_ply, write_ply from utils.tester import ModelTester from utils.average_gradients import average_gradients from utils.AdamWOptimizer import AdamWeightDecayOptimizer from utils.logger import setup_logger from utils.scheduler import StepScheduler, LrScheduler from utils.metrics import AverageMeter from utils.tf_graph_builder import GraphBuilder
17,900
if tf.__version__.split('.')[0] == '2': tf = tf.compat.v1 tf.disable_v2_behavior() # PLY reader FILE_DIR = os.path.abspath(__file__) BASE_DIR = os.path.dirname(FILE_DIR) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.insert(0, ROOT_DIR) sys.path.insert(0, BASE_DIR) sys.path.insert(0, os.path.join(ROOT_DIR, 'models')) sys.path.insert(0, os.path.join(ROOT_DIR, 'utils')) DEBUG = False class ModelTrainer: """ get & train the model (potential multi-gpu training) """ def __init__(self, config, verbose=True): self.config = config self.verbose = verbose self.tester = ModelTester(config, verbose=False) def add_summary(self, model): with tf.variable_scope('summary'): summary = model.summary log_content = self.config.log_content if 'var' in log_content: summary['per_log'] += [tf.summary.histogram(v.name, v) for g, v in gvs] if 'gard' in log_content: summary['per_log'] += [tf.summary.histogram(f'{v.name}_grad', g) for g, v in gvs] sum_levels = ['per_step', 'per_log', 'per_epoch'] assert all([k in sum_levels for k in summary.keys()]), f'undesired keys in summary dict: {str(summary.keys())}' for i in range(len(sum_levels)): summary[lv] = tf.summary.merge(summary[lv]) if summary[lv] else [tf.no_op] self.summary = summary return # Training main method # ------------------------------------------------------------------------------------------------------------------ def train(self): config = self.config with tf.Graph().as_default(): # use one graph # prepare compute graph g = GraphBuilder(config, verbose=self.verbose) ops, sess, grads, saver = g.ops, g.sess, g.grads, g.saver model, dataset = g.model, g.dataset self.model = model # printing model parameters if self.verbose: print('\n --------- printing grads {') re_list = ['.*bias:.*', '.*batch_normalization.*'] # skipping print_table([(v.name, g) for g, v in grads if not any([bool(re.fullmatch(expr, v.name)) for expr in re_list])], prefix='\t') print('} --------- printing grads') # all ops in graph print('\n --------- all ops {') re_list = ['optimizer.*', 'gpu_.*', 'gradients.*', 'save.*'] # '.*/batch_normalization/.*', '.*/bias:.*' # skipping for n in tf.get_default_graph().as_graph_def().node: if any([bool(re.fullmatch(expr, n.name)) for expr in re_list]): continue print('\t', n.name) print('} --------- all ops') # model params all_params_size = sum([np.prod(v.shape) for _, v in grads]) # all_params_size = tf.reduce_sum([tf.reduce_prod(v.shape) for _, v in grads]) # all_params_size = sess.run(all_params_size) print(f'==> Model have {all_params_size} total Params', flush=True) # init sess sess.run(tf.global_variables_initializer()) if self.config.model_path: except_list = [f'.*{n}.*' for n in self.config.exclude_vars] + ['optimizer.*'] if not self.config.continue_training else [] g.restore(sess, self.config.model_path, except_list=except_list) print(f'Model restored -- {self.config.model_path}') # running voting - used throughout the training process (accumulated voting) validation_probs = self.tester.init_pointcloud_log(dataset, 'validation', config.num_classes) # train func if config.debug_nan: self.train_one_epoch = self.train_one_epoch_debug # train metric_best = None # save_snap = [i for i in range(1, config.max_epoch + 1) if i % config.save_freq == 0]
if tf.__version__.split('.')[0] == '2': tf = tf.compat.v1 tf.disable_v2_behavior() # PLY reader FILE_DIR = os.path.abspath(__file__) BASE_DIR = os.path.dirname(FILE_DIR) ROOT_DIR = os.path.dirname(BASE_DIR) sys.path.insert(0, ROOT_DIR) sys.path.insert(0, BASE_DIR) sys.path.insert(0, os.path.join(ROOT_DIR, 'models')) sys.path.insert(0, os.path.join(ROOT_DIR, 'utils')) DEBUG = False class ModelTrainer: """ get & train the model (potential multi-gpu training) """ def __init__(self, config, verbose=True): self.config = config self.verbose = verbose self.tester = ModelTester(config, verbose=False) def add_summary(self, model): with tf.variable_scope('summary'): summary = model.summary log_content = self.config.log_content if 'var' in log_content: summary['per_log'] += [tf.summary.histogram(v.name, v) for g, v in gvs] if 'gard' in log_content: summary['per_log'] += [tf.summary.histogram(f'{v.name}_grad', g) for g, v in gvs] sum_levels = ['per_step', 'per_log', 'per_epoch'] assert all([k in sum_levels for k in summary.keys()]), f'undesired keys in summary dict: {str(summary.keys())}' for i in range(len(sum_levels)): summary[lv] = tf.summary.merge(summary[lv]) if summary[lv] else [tf.no_op] self.summary = summary return # Training main method # ------------------------------------------------------------------------------------------------------------------ def train(self): config = self.config with tf.Graph().as_default(): # use one graph # prepare compute graph g = GraphBuilder(config, verbose=self.verbose) ops, sess, grads, saver = g.ops, g.sess, g.grads, g.saver model, dataset = g.model, g.dataset self.model = model # printing model parameters if self.verbose: print('\n --------- printing grads {') re_list = ['.*bias:.*', '.*batch_normalization.*'] # skipping print_table([(v.name, g) for g, v in grads if not any([bool(re.fullmatch(expr, v.name)) for expr in re_list])], prefix='\t') print('} --------- printing grads') # all ops in graph print('\n --------- all ops {') re_list = ['optimizer.*', 'gpu_.*', 'gradients.*', 'save.*'] # '.*/batch_normalization/.*', '.*/bias:.*' # skipping for n in tf.get_default_graph().as_graph_def().node: if any([bool(re.fullmatch(expr, n.name)) for expr in re_list]): continue print('\t', n.name) print('} --------- all ops') # model params all_params_size = sum([np.prod(v.shape) for _, v in grads]) # all_params_size = tf.reduce_sum([tf.reduce_prod(v.shape) for _, v in grads]) # all_params_size = sess.run(all_params_size) print(f'==> Model have {all_params_size} total Params', flush=True) # init sess sess.run(tf.global_variables_initializer()) if self.config.model_path: except_list = [f'.*{n}.*' for n in self.config.exclude_vars] + ['optimizer.*'] if not self.config.continue_training else [] g.restore(sess, self.config.model_path, except_list=except_list) print(f'Model restored -- {self.config.model_path}') # running voting - used throughout the training process (accumulated voting) validation_probs = self.tester.init_pointcloud_log(dataset, 'validation', config.num_classes) # train func if config.debug_nan: self.train_one_epoch = self.train_one_epoch_debug # train metric_best = None # save_snap = [i for i in range(1, config.max_epoch + 1) if i % config.save_freq == 0]
lr_scheduler = LrScheduler(config)
10
2023-10-13 08:03:07+00:00
24k
bilibini/Lovely_Image_Downloader
py/Python38/site-packages/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "py/Python38/site-packages/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(typing.MutableMapping[str, str]):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n _container: typing.MutableMapping[str, list[str]]\n\n def __init__(self, headers: ValidHTTPHeaderSource | None = None, **kwargs: str):\n super().__init__()\n self._container = {} # 'dict' is insert-ordered in Python 3.7+\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key: str, val: str) -> None:\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n self._container[key.lower()] = [key, val]\n\n def __getitem__(self, key: str) -> str:\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key: str) -> None:\n del self._container[key.lower()]\n\n def __contains__(self, key: object) -> bool:\n if isinstance(key, str):\n return key.lower() in self._container\n return False\n\n def setdefault(self, key: str, default: str = \"\") -> str:\n return super().setdefault(key, default)\n\n def __eq__(self, other: object) -> bool:\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return False\n else:\n other_as_http_header_dict = type(self)(maybe_constructable)\n\n return {k.lower(): v for k, v in self.itermerged()} == {\n k.lower(): v for k, v in other_as_http_header_dict.itermerged()\n }\n\n def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)\n\n def __len__(self) -> int:\n return len(self._container)\n\n def __iter__(self) -> typing.Iterator[str]:\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def discard(self, key: str) -> None:\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key: str, val: str, *, combine: bool = False) -> None:\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n If this is called with combine=True, instead of adding a new header value\n as a distinct item during iteration, this will instead append the value to\n any existing header value with a comma. If no existing header value exists\n for the key, then the value will simply be added, ignoring the combine parameter.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n >>> list(headers.items())\n [('foo', 'bar'), ('foo', 'baz')]\n >>> headers.add('foo', 'quz', combine=True)\n >>> list(headers.items())\n [('foo', 'bar, baz, quz')]\n \"\"\"\n # avoid a bytes/str comparison by decoding before httplib\n if isinstance(key, bytes):\n key = key.decode(\"latin-1\")\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n # if there are values here, then there is at least the initial\n # key/value pair\n assert len(vals) >= 2\n if combine:\n vals[-1] = vals[-1] + \", \" + val\n else:\n vals.append(val)\n\n def extend(self, *args: ValidHTTPHeaderSource, **kwargs: str) -> None:\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n f\"extend() takes at most 1 positional arguments ({len(args)} given)\"\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, typing.Mapping):\n for key, val in other.items():\n self.add(key, val)\n elif isinstance(other, typing.Iterable):\n other = typing.cast(typing.Iterable[typing.Tuple[str, str]], other)\n for key, value in other:\n self.add(key, value)\n elif hasattr(other, \"keys\") and hasattr(other, \"__getitem__\"):\n # THIS IS NOT A TYPESAFE BRANCH\n # In this branch, the object has a `keys` attr but is not a Mapping or any of\n # the other types indicated in the method signature. We do some stuff with\n # it as though it partially implements the Mapping interface, but we're not\n # doing that stuff safely AT ALL.\n for key in other.keys():\n self.add(key, other[key])\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n @typing.overload\n def getlist(self, key: str) -> list[str]:\n ...\n\n @typing.overload\n def getlist(self, key: str, default: _DT) -> list[str] | _DT:\n ...\n\n def getlist(\n self, key: str, default: _Sentinel | _DT = _Sentinel.not_passed\n ) -> list[str] | _DT:\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is _Sentinel.not_passed:\n # _DT is unbound; empty list is instance of List[str]\n return []\n # _DT is bound; default is instance of _DT\n return default\n else:\n # _DT may or may not be bound; vals[1:] is instance of List[str], which\n # meets our external interface requirement of `Union[List[str], _DT]`.\n return vals[1:]\n\n def _prepare_for_method_change(self) -> Self:\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}({dict(self.itermerged())})\"\n\n def _copy_from(self, other: HTTPHeaderDict) -> None:\n for key in other:\n val = other.getlist(key)\n self._container[key.lower()] = [key, *val]\n\n def copy(self) -> HTTPHeaderDict:\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self) -> typing.Iterator[tuple[str, str]]:\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self) -> HTTPHeaderDictItemView: # type: ignore[override]\n return HTTPHeaderDictItemView(self)\n\n def _has_value_for_header(self, header_name: str, potential_value: str) -> bool:\n if header_name in self:\n return potential_value in self._container[header_name.lower()][1:]\n return False\n\n def __ior__(self, other: object) -> HTTPHeaderDict:\n # Supports extending a header dict in-place using operator |=\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n self.extend(maybe_constructable)\n return self\n\n def __or__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator |\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = self.copy()\n result.extend(maybe_constructable)\n return result\n\n def __ror__(self, other: object) -> HTTPHeaderDict:\n # Supports merging header dicts using operator | when other is on left side\n # combining items with add instead of __setitem__\n maybe_constructable = ensure_can_construct_http_header_dict(other)\n if maybe_constructable is None:\n return NotImplemented\n result = type(self)(maybe_constructable)\n result.extend(self)\n return result" }, { "identifier": "RecentlyUsedContainer", "path": "py/Python38/site-packages/urllib3/_collections.py", "snippet": "class RecentlyUsedContainer(typing.Generic[_KT, _VT], typing.MutableMapping[_KT, _VT]):\n \"\"\"\n Provides a thread-safe dict-like container which maintains up to\n ``maxsize`` keys while throwing away the least-recently-used keys beyond\n ``maxsize``.\n\n :param maxsize:\n Maximum number of recent elements to retain.\n\n :param dispose_func:\n Every time an item is evicted from the container,\n ``dispose_func(value)`` is called. Callback which will get called\n \"\"\"\n\n _container: typing.OrderedDict[_KT, _VT]\n _maxsize: int\n dispose_func: typing.Callable[[_VT], None] | None\n lock: RLock\n\n def __init__(\n self,\n maxsize: int = 10,\n dispose_func: typing.Callable[[_VT], None] | None = None,\n ) -> None:\n super().__init__()\n self._maxsize = maxsize\n self.dispose_func = dispose_func\n self._container = OrderedDict()\n self.lock = RLock()\n\n def __getitem__(self, key: _KT) -> _VT:\n # Re-insert the item, moving it to the end of the eviction line.\n with self.lock:\n item = self._container.pop(key)\n self._container[key] = item\n return item\n\n def __setitem__(self, key: _KT, value: _VT) -> None:\n evicted_item = None\n with self.lock:\n # Possibly evict the existing value of 'key'\n try:\n # If the key exists, we'll overwrite it, which won't change the\n # size of the pool. Because accessing a key should move it to\n # the end of the eviction line, we pop it out first.\n evicted_item = key, self._container.pop(key)\n self._container[key] = value\n except KeyError:\n # When the key does not exist, we insert the value first so that\n # evicting works in all cases, including when self._maxsize is 0\n self._container[key] = value\n if len(self._container) > self._maxsize:\n # If we didn't evict an existing value, and we've hit our maximum\n # size, then we have to evict the least recently used item from\n # the beginning of the container.\n evicted_item = self._container.popitem(last=False)\n\n # After releasing the lock on the pool, dispose of any evicted value.\n if evicted_item is not None and self.dispose_func:\n _, evicted_value = evicted_item\n self.dispose_func(evicted_value)\n\n def __delitem__(self, key: _KT) -> None:\n with self.lock:\n value = self._container.pop(key)\n\n if self.dispose_func:\n self.dispose_func(value)\n\n def __len__(self) -> int:\n with self.lock:\n return len(self._container)\n\n def __iter__(self) -> typing.NoReturn:\n raise NotImplementedError(\n \"Iteration over this class is unlikely to be threadsafe.\"\n )\n\n def clear(self) -> None:\n with self.lock:\n # Copy pointers to all values, then wipe the mapping\n values = list(self._container.values())\n self._container.clear()\n\n if self.dispose_func:\n for value in values:\n self.dispose_func(value)\n\n def keys(self) -> set[_KT]: # type: ignore[override]\n with self.lock:\n return set(self._container.keys())" }, { "identifier": "RequestMethods", "path": "py/Python38/site-packages/urllib3/_request_methods.py", "snippet": "class RequestMethods:\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`urllib3.HTTPConnectionPool` and\n :class:`urllib3.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = {\"DELETE\", \"GET\", \"HEAD\", \"OPTIONS\"}\n\n def __init__(self, headers: typing.Mapping[str, str] | None = None) -> None:\n self.headers = headers or {}\n\n def urlopen(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n encode_multipart: bool = True,\n multipart_boundary: str | None = None,\n **kw: typing.Any,\n ) -> BaseHTTPResponse: # Abstract\n raise NotImplementedError(\n \"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\"\n )\n\n def request(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n fields: _TYPE_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n json: typing.Any | None = None,\n **urlopen_kw: typing.Any,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n if json is not None and body is not None:\n raise TypeError(\n \"request got values for both 'body' and 'json' parameters which are mutually exclusive\"\n )\n\n if json is not None:\n if headers is None:\n headers = self.headers.copy() # type: ignore\n if not (\"content-type\" in map(str.lower, headers.keys())):\n headers[\"Content-Type\"] = \"application/json\" # type: ignore\n\n body = _json.dumps(json, separators=(\",\", \":\"), ensure_ascii=False).encode(\n \"utf-8\"\n )\n\n if body is not None:\n urlopen_kw[\"body\"] = body\n\n if method in self._encode_url_methods:\n return self.request_encode_url(\n method,\n url,\n fields=fields, # type: ignore[arg-type]\n headers=headers,\n **urlopen_kw,\n )\n else:\n return self.request_encode_body(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n\n def request_encode_url(\n self,\n method: str,\n url: str,\n fields: _TYPE_ENCODE_URL_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n **urlopen_kw: str,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw: dict[str, typing.Any] = {\"headers\": headers}\n extra_kw.update(urlopen_kw)\n\n if fields:\n url += \"?\" + urlencode(fields)\n\n return self.urlopen(method, url, **extra_kw)\n\n def request_encode_body(\n self,\n method: str,\n url: str,\n fields: _TYPE_FIELDS | None = None,\n headers: typing.Mapping[str, str] | None = None,\n encode_multipart: bool = True,\n multipart_boundary: str | None = None,\n **urlopen_kw: str,\n ) -> BaseHTTPResponse:\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :func:`urllib3.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :func:`urllib.parse.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimic behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw: dict[str, typing.Any] = {\"headers\": HTTPHeaderDict(headers)}\n body: bytes | str\n\n if fields:\n if \"body\" in urlopen_kw:\n raise TypeError(\n \"request got values for both 'fields' and 'body', can only specify one.\"\n )\n\n if encode_multipart:\n body, content_type = encode_multipart_formdata(\n fields, boundary=multipart_boundary\n )\n else:\n body, content_type = (\n urlencode(fields), # type: ignore[arg-type]\n \"application/x-www-form-urlencoded\",\n )\n\n extra_kw[\"body\"] = body\n extra_kw[\"headers\"].setdefault(\"Content-Type\", content_type)\n\n extra_kw.update(urlopen_kw)\n\n return self.urlopen(method, url, **extra_kw)" }, { "identifier": "ProxyConfig", "path": "py/Python38/site-packages/urllib3/connection.py", "snippet": " class BaseSSLError(BaseException): # type: ignore[no-redef]\nclass HTTPConnection(_HTTPConnection):\nclass HTTPSConnection(HTTPConnection):\nclass _WrappedAndVerifiedSocket(typing.NamedTuple):\nclass DummyConnection:\nRECENT_DATE = datetime.date(2022, 1, 1)\n_CONTAINS_CONTROL_CHAR_RE = re.compile(r\"[^-!#$%&'*+.^_`|~0-9a-zA-Z]\")\n_HAS_SYS_AUDIT = hasattr(sys, \"audit\")\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n ) -> None:\n def host(self) -> str:\n def host(self, value: str) -> None:\n def _new_conn(self) -> socket.socket:\n def set_tunnel(\n self,\n host: str,\n port: int | None = None,\n headers: typing.Mapping[str, str] | None = None,\n scheme: str = \"http\",\n ) -> None:\n def connect(self) -> None:\n def is_closed(self) -> bool:\n def is_connected(self) -> bool:\n def has_connected_to_proxy(self) -> bool:\n def close(self) -> None:\n def putrequest(\n self,\n method: str,\n url: str,\n skip_host: bool = False,\n skip_accept_encoding: bool = False,\n ) -> None:\n def putheader(self, header: str, *values: str) -> None:\n def request( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n *,\n chunked: bool = False,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> None:\n def request_chunked(\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n ) -> None:\n def getresponse( # type: ignore[override]\n self,\n ) -> HTTPResponse:\n def __init__(\n self,\n host: str,\n port: int | None = None,\n *,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n source_address: tuple[str, int] | None = None,\n blocksize: int = 16384,\n socket_options: None\n | (connection._TYPE_SOCKET_OPTIONS) = HTTPConnection.default_socket_options,\n proxy: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n cert_reqs: int | str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n server_hostname: str | None = None,\n ssl_context: ssl.SSLContext | None = None,\n ca_certs: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ssl_minimum_version: int | None = None,\n ssl_maximum_version: int | None = None,\n ssl_version: int | str | None = None, # Deprecated\n cert_file: str | None = None,\n key_file: str | None = None,\n key_password: str | None = None,\n ) -> None:\n def set_cert(\n self,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n assert_hostname: None | str | Literal[False] = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n ca_cert_data: None | str | bytes = None,\n ) -> None:\n def connect(self) -> None:\n def _connect_tls_proxy(self, hostname: str, sock: socket.socket) -> ssl.SSLSocket:\ndef _ssl_wrap_socket_and_match_hostname(\n sock: socket.socket,\n *,\n cert_reqs: None | str | int,\n ssl_version: None | str | int,\n ssl_minimum_version: int | None,\n ssl_maximum_version: int | None,\n cert_file: str | None,\n key_file: str | None,\n key_password: str | None,\n ca_certs: str | None,\n ca_cert_dir: str | None,\n ca_cert_data: None | str | bytes,\n assert_hostname: None | str | Literal[False],\n assert_fingerprint: str | None,\n server_hostname: str | None,\n ssl_context: ssl.SSLContext | None,\n tls_in_tls: bool = False,\n) -> _WrappedAndVerifiedSocket:\ndef _match_hostname(\n cert: _TYPE_PEER_CERT_RET_DICT | None,\n asserted_hostname: str,\n hostname_checks_common_name: bool = False,\n) -> None:\ndef _wrap_proxy_error(err: Exception, proxy_scheme: str | None) -> ProxyError:\ndef _get_default_user_agent() -> str:\ndef _url_from_connection(\n conn: HTTPConnection | HTTPSConnection, path: str | None = None\n) -> str:" }, { "identifier": "HTTPConnectionPool", "path": "py/Python38/site-packages/urllib3/connectionpool.py", "snippet": "_TYPE_TIMEOUT = typing.Union[Timeout, float, _TYPE_DEFAULT, None]\nclass ConnectionPool:\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\nclass HTTPSConnectionPool(HTTPConnectionPool):\n def __init__(self, host: str, port: int | None = None) -> None:\n def __str__(self) -> str:\n def __enter__(self: _SelfT) -> _SelfT:\n def __exit__(\n self,\n exc_type: type[BaseException] | None,\n exc_val: BaseException | None,\n exc_tb: TracebackType | None,\n ) -> Literal[False]:\n def close(self) -> None:\n def __init__(\n self,\n host: str,\n port: int | None = None,\n timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n _proxy: Url | None = None,\n _proxy_headers: typing.Mapping[str, str] | None = None,\n _proxy_config: ProxyConfig | None = None,\n **conn_kw: typing.Any,\n ):\n def _new_conn(self) -> BaseHTTPConnection:\n def _get_conn(self, timeout: float | None = None) -> BaseHTTPConnection:\n def _put_conn(self, conn: BaseHTTPConnection | None) -> None:\n def _validate_conn(self, conn: BaseHTTPConnection) -> None:\n def _prepare_proxy(self, conn: BaseHTTPConnection) -> None:\n def _get_timeout(self, timeout: _TYPE_TIMEOUT) -> Timeout:\n def _raise_timeout(\n self,\n err: BaseSSLError | OSError | SocketTimeout,\n url: str,\n timeout_value: _TYPE_TIMEOUT | None,\n ) -> None:\n def _make_request(\n self,\n conn: BaseHTTPConnection,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | None = None,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n chunked: bool = False,\n response_conn: BaseHTTPConnection | None = None,\n preload_content: bool = True,\n decode_content: bool = True,\n enforce_content_length: bool = True,\n ) -> BaseHTTPResponse:\n def close(self) -> None:\n def is_same_host(self, url: str) -> bool:\n def urlopen( # type: ignore[override]\n self,\n method: str,\n url: str,\n body: _TYPE_BODY | None = None,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n redirect: bool = True,\n assert_same_host: bool = True,\n timeout: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n pool_timeout: int | None = None,\n release_conn: bool | None = None,\n chunked: bool = False,\n body_pos: _TYPE_BODY_POSITION | None = None,\n preload_content: bool = True,\n decode_content: bool = True,\n **response_kw: typing.Any,\n ) -> BaseHTTPResponse:\n def __init__(\n self,\n host: str,\n port: int | None = None,\n timeout: _TYPE_TIMEOUT | None = _DEFAULT_TIMEOUT,\n maxsize: int = 1,\n block: bool = False,\n headers: typing.Mapping[str, str] | None = None,\n retries: Retry | bool | int | None = None,\n _proxy: Url | None = None,\n _proxy_headers: typing.Mapping[str, str] | None = None,\n key_file: str | None = None,\n cert_file: str | None = None,\n cert_reqs: int | str | None = None,\n key_password: str | None = None,\n ca_certs: str | None = None,\n ssl_version: int | str | None = None,\n ssl_minimum_version: ssl.TLSVersion | None = None,\n ssl_maximum_version: ssl.TLSVersion | None = None,\n assert_hostname: str | Literal[False] | None = None,\n assert_fingerprint: str | None = None,\n ca_cert_dir: str | None = None,\n **conn_kw: typing.Any,\n ) -> None:\n def _prepare_proxy(self, conn: HTTPSConnection) -> None: # type: ignore[override]\n def _new_conn(self) -> BaseHTTPSConnection:\n def _validate_conn(self, conn: BaseHTTPConnection) -> None:\ndef connection_from_url(url: str, **kw: typing.Any) -> HTTPConnectionPool:\ndef _normalize_host(host: None, scheme: str | None) -> None:\ndef _normalize_host(host: str, scheme: str | None) -> str:\ndef _normalize_host(host: str | None, scheme: str | None) -> str | None:\ndef _url_from_pool(\n pool: HTTPConnectionPool | HTTPSConnectionPool, path: str | None = None\n) -> str:\ndef _close_pool_connections(pool: queue.LifoQueue[typing.Any]) -> None:" }, { "identifier": "LocationValueError", "path": "py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class LocationValueError(ValueError, HTTPError):\n \"\"\"Raised when there is something wrong with a given URL input.\"\"\"" }, { "identifier": "MaxRetryError", "path": "py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class MaxRetryError(RequestError):\n \"\"\"Raised when the maximum number of retries is exceeded.\n\n :param pool: The connection pool\n :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`\n :param str url: The requested Url\n :param reason: The underlying error\n :type reason: :class:`Exception`\n\n \"\"\"\n\n def __init__(\n self, pool: ConnectionPool, url: str, reason: Exception | None = None\n ) -> None:\n self.reason = reason\n\n message = f\"Max retries exceeded with url: {url} (Caused by {reason!r})\"\n\n super().__init__(pool, url, message)" }, { "identifier": "ProxySchemeUnknown", "path": "py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):\n \"\"\"ProxyManager does not support the supplied scheme\"\"\"\n\n # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.\n\n def __init__(self, scheme: str | None) -> None:\n # 'localhost' is here because our URL parser parses\n # localhost:8080 -> scheme=localhost, remove if we fix this.\n if scheme == \"localhost\":\n scheme = None\n if scheme is None:\n message = \"Proxy URL had no scheme, should start with http:// or https://\"\n else:\n message = f\"Proxy URL had unsupported scheme {scheme}, should use http:// or https://\"\n super().__init__(message)" }, { "identifier": "URLSchemeUnknown", "path": "py/Python38/site-packages/urllib3/exceptions.py", "snippet": "class URLSchemeUnknown(LocationValueError):\n \"\"\"Raised when a URL input has an unsupported scheme.\"\"\"\n\n def __init__(self, scheme: str):\n message = f\"Not supported URL scheme {scheme}\"\n super().__init__(message)\n\n self.scheme = scheme" }, { "identifier": "BaseHTTPResponse", "path": "py/Python38/site-packages/urllib3/response.py", "snippet": "class BaseHTTPResponse(io.IOBase):\n CONTENT_DECODERS = [\"gzip\", \"deflate\"]\n if brotli is not None:\n CONTENT_DECODERS += [\"br\"]\n if zstd is not None:\n CONTENT_DECODERS += [\"zstd\"]\n REDIRECT_STATUSES = [301, 302, 303, 307, 308]\n\n DECODER_ERROR_CLASSES: tuple[type[Exception], ...] = (IOError, zlib.error)\n if brotli is not None:\n DECODER_ERROR_CLASSES += (brotli.error,)\n\n if zstd is not None:\n DECODER_ERROR_CLASSES += (zstd.ZstdError,)\n\n def __init__(\n self,\n *,\n headers: typing.Mapping[str, str] | typing.Mapping[bytes, bytes] | None = None,\n status: int,\n version: int,\n reason: str | None,\n decode_content: bool,\n request_url: str | None,\n retries: Retry | None = None,\n ) -> None:\n if isinstance(headers, HTTPHeaderDict):\n self.headers = headers\n else:\n self.headers = HTTPHeaderDict(headers) # type: ignore[arg-type]\n self.status = status\n self.version = version\n self.reason = reason\n self.decode_content = decode_content\n self._has_decoded_content = False\n self._request_url: str | None = request_url\n self.retries = retries\n\n self.chunked = False\n tr_enc = self.headers.get(\"transfer-encoding\", \"\").lower()\n # Don't incur the penalty of creating a list and then discarding it\n encodings = (enc.strip() for enc in tr_enc.split(\",\"))\n if \"chunked\" in encodings:\n self.chunked = True\n\n self._decoder: ContentDecoder | None = None\n\n def get_redirect_location(self) -> str | None | Literal[False]:\n \"\"\"\n Should we redirect and where to?\n\n :returns: Truthy redirect location string if we got a redirect status\n code and valid location. ``None`` if redirect status and no\n location. ``False`` if not a redirect status code.\n \"\"\"\n if self.status in self.REDIRECT_STATUSES:\n return self.headers.get(\"location\")\n return False\n\n @property\n def data(self) -> bytes:\n raise NotImplementedError()\n\n def json(self) -> typing.Any:\n \"\"\"\n Parses the body of the HTTP response as JSON.\n\n To use a custom JSON decoder pass the result of :attr:`HTTPResponse.data` to the decoder.\n\n This method can raise either `UnicodeDecodeError` or `json.JSONDecodeError`.\n\n Read more :ref:`here <json>`.\n \"\"\"\n data = self.data.decode(\"utf-8\")\n return _json.loads(data)\n\n @property\n def url(self) -> str | None:\n raise NotImplementedError()\n\n @url.setter\n def url(self, url: str | None) -> None:\n raise NotImplementedError()\n\n @property\n def connection(self) -> HTTPConnection | None:\n raise NotImplementedError()\n\n @property\n def retries(self) -> Retry | None:\n return self._retries\n\n @retries.setter\n def retries(self, retries: Retry | None) -> None:\n # Override the request_url if retries has a redirect location.\n if retries is not None and retries.history:\n self.url = retries.history[-1].redirect_location\n self._retries = retries\n\n def stream(\n self, amt: int | None = 2**16, decode_content: bool | None = None\n ) -> typing.Iterator[bytes]:\n raise NotImplementedError()\n\n def read(\n self,\n amt: int | None = None,\n decode_content: bool | None = None,\n cache_content: bool = False,\n ) -> bytes:\n raise NotImplementedError()\n\n def read_chunked(\n self,\n amt: int | None = None,\n decode_content: bool | None = None,\n ) -> typing.Iterator[bytes]:\n raise NotImplementedError()\n\n def release_conn(self) -> None:\n raise NotImplementedError()\n\n def drain_conn(self) -> None:\n raise NotImplementedError()\n\n def close(self) -> None:\n raise NotImplementedError()\n\n def _init_decoder(self) -> None:\n \"\"\"\n Set-up the _decoder attribute if necessary.\n \"\"\"\n # Note: content-encoding value should be case-insensitive, per RFC 7230\n # Section 3.2\n content_encoding = self.headers.get(\"content-encoding\", \"\").lower()\n if self._decoder is None:\n if content_encoding in self.CONTENT_DECODERS:\n self._decoder = _get_decoder(content_encoding)\n elif \",\" in content_encoding:\n encodings = [\n e.strip()\n for e in content_encoding.split(\",\")\n if e.strip() in self.CONTENT_DECODERS\n ]\n if encodings:\n self._decoder = _get_decoder(content_encoding)\n\n def _decode(\n self, data: bytes, decode_content: bool | None, flush_decoder: bool\n ) -> bytes:\n \"\"\"\n Decode the data passed in and potentially flush the decoder.\n \"\"\"\n if not decode_content:\n if self._has_decoded_content:\n raise RuntimeError(\n \"Calling read(decode_content=False) is not supported after \"\n \"read(decode_content=True) was called.\"\n )\n return data\n\n try:\n if self._decoder:\n data = self._decoder.decompress(data)\n self._has_decoded_content = True\n except self.DECODER_ERROR_CLASSES as e:\n content_encoding = self.headers.get(\"content-encoding\", \"\").lower()\n raise DecodeError(\n \"Received response with content-encoding: %s, but \"\n \"failed to decode it.\" % content_encoding,\n e,\n ) from e\n if flush_decoder:\n data += self._flush_decoder()\n\n return data\n\n def _flush_decoder(self) -> bytes:\n \"\"\"\n Flushes the decoder. Should only be called if the decoder is actually\n being used.\n \"\"\"\n if self._decoder:\n return self._decoder.decompress(b\"\") + self._decoder.flush()\n return b\"\"\n\n # Compatibility methods for `io` module\n def readinto(self, b: bytearray) -> int:\n temp = self.read(len(b))\n if len(temp) == 0:\n return 0\n else:\n b[: len(temp)] = temp\n return len(temp)\n\n # Compatibility methods for http.client.HTTPResponse\n def getheaders(self) -> HTTPHeaderDict:\n warnings.warn(\n \"HTTPResponse.getheaders() is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Instead access HTTPResponse.headers directly.\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n return self.headers\n\n def getheader(self, name: str, default: str | None = None) -> str | None:\n warnings.warn(\n \"HTTPResponse.getheader() is deprecated and will be removed \"\n \"in urllib3 v2.1.0. Instead use HTTPResponse.headers.get(name, default).\",\n category=DeprecationWarning,\n stacklevel=2,\n )\n return self.headers.get(name, default)\n\n # Compatibility method for http.cookiejar\n def info(self) -> HTTPHeaderDict:\n return self.headers\n\n def geturl(self) -> str | None:\n return self.url" }, { "identifier": "_TYPE_SOCKET_OPTIONS", "path": "py/Python38/site-packages/urllib3/util/connection.py", "snippet": "_TYPE_SOCKET_OPTIONS = typing.Sequence[typing.Tuple[int, int, typing.Union[int, bytes]]]" }, { "identifier": "connection_requires_http_tunnel", "path": "py/Python38/site-packages/urllib3/util/proxy.py", "snippet": "def connection_requires_http_tunnel(\n proxy_url: Url | None = None,\n proxy_config: ProxyConfig | None = None,\n destination_scheme: str | None = None,\n) -> bool:\n \"\"\"\n Returns True if the connection requires an HTTP CONNECT through the proxy.\n\n :param URL proxy_url:\n URL of the proxy.\n :param ProxyConfig proxy_config:\n Proxy configuration from poolmanager.py\n :param str destination_scheme:\n The scheme of the destination. (i.e https, http, etc)\n \"\"\"\n # If we're not using a proxy, no way to use a tunnel.\n if proxy_url is None:\n return False\n\n # HTTP destinations never require tunneling, we always forward.\n if destination_scheme == \"http\":\n return False\n\n # Support for forwarding with HTTPS proxies and HTTPS destinations.\n if (\n proxy_url.scheme == \"https\"\n and proxy_config\n and proxy_config.use_forwarding_for_https\n ):\n return False\n\n # Otherwise always use a tunnel.\n return True" }, { "identifier": "Retry", "path": "py/Python38/site-packages/urllib3/util/retry.py", "snippet": "class Retry:\n \"\"\"Retry configuration.\n\n Each retry attempt will create a new Retry object with updated values, so\n they can be safely reused.\n\n Retries can be defined as a default for a pool:\n\n .. code-block:: python\n\n retries = Retry(connect=5, read=2, redirect=5)\n http = PoolManager(retries=retries)\n response = http.request(\"GET\", \"https://example.com/\")\n\n Or per-request (which overrides the default for the pool):\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", retries=Retry(10))\n\n Retries can be disabled by passing ``False``:\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", retries=False)\n\n Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless\n retries are disabled, in which case the causing exception will be raised.\n\n :param int total:\n Total number of retries to allow. Takes precedence over other counts.\n\n Set to ``None`` to remove this constraint and fall back on other\n counts.\n\n Set to ``0`` to fail on the first retry.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int connect:\n How many connection-related errors to retry on.\n\n These are errors raised before the request is sent to the remote server,\n which we assume has not triggered the server to process the request.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int read:\n How many times to retry on read errors.\n\n These errors are raised after the request was sent to the server, so the\n request may have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int redirect:\n How many redirects to perform. Limit this to avoid infinite redirect\n loops.\n\n A redirect is a HTTP response with a status code 301, 302, 303, 307 or\n 308.\n\n Set to ``0`` to fail on the first retry of this type.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int status:\n How many times to retry on bad status codes.\n\n These are retries made on responses, where status code matches\n ``status_forcelist``.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int other:\n How many times to retry on other errors.\n\n Other errors are errors that are not connect, read, redirect or status errors.\n These errors might be raised after the request was sent to the server, so the\n request might have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n If ``total`` is not set, it's a good idea to set this to 0 to account\n for unexpected edge cases and avoid infinite retry loops.\n\n :param Collection allowed_methods:\n Set of uppercased HTTP method verbs that we should retry on.\n\n By default, we only retry on methods which are considered to be\n idempotent (multiple requests with the same parameters end with the\n same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.\n\n Set to a ``None`` value to retry on any verb.\n\n :param Collection status_forcelist:\n A set of integer HTTP status codes that we should force a retry on.\n A retry is initiated if the request method is in ``allowed_methods``\n and the response status code is in ``status_forcelist``.\n\n By default, this is disabled with ``None``.\n\n :param float backoff_factor:\n A backoff factor to apply between attempts after the second try\n (most errors are resolved immediately by a second try without a\n delay). urllib3 will sleep for::\n\n {backoff factor} * (2 ** ({number of previous retries}))\n\n seconds. If `backoff_jitter` is non-zero, this sleep is extended by::\n\n random.uniform(0, {backoff jitter})\n\n seconds. For example, if the backoff_factor is 0.1, then :func:`Retry.sleep` will\n sleep for [0.0s, 0.2s, 0.4s, 0.8s, ...] between retries. No backoff will ever\n be longer than `backoff_max`.\n\n By default, backoff is disabled (factor set to 0).\n\n :param bool raise_on_redirect: Whether, if the number of redirects is\n exhausted, to raise a MaxRetryError, or to return a response with a\n response code in the 3xx range.\n\n :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:\n whether we should raise an exception, or return a response,\n if status falls in ``status_forcelist`` range and retries have\n been exhausted.\n\n :param tuple history: The history of the request encountered during\n each call to :meth:`~Retry.increment`. The list is in the order\n the requests occurred. Each list item is of class :class:`RequestHistory`.\n\n :param bool respect_retry_after_header:\n Whether to respect Retry-After header on status codes defined as\n :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.\n\n :param Collection remove_headers_on_redirect:\n Sequence of headers to remove from the request when a response\n indicating a redirect is returned before firing off the redirected\n request.\n \"\"\"\n\n #: Default methods to be used for ``allowed_methods``\n DEFAULT_ALLOWED_METHODS = frozenset(\n [\"HEAD\", \"GET\", \"PUT\", \"DELETE\", \"OPTIONS\", \"TRACE\"]\n )\n\n #: Default status codes to be used for ``status_forcelist``\n RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])\n\n #: Default headers to be used for ``remove_headers_on_redirect``\n DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset([\"Cookie\", \"Authorization\"])\n\n #: Default maximum backoff time.\n DEFAULT_BACKOFF_MAX = 120\n\n # Backward compatibility; assigned outside of the class.\n DEFAULT: typing.ClassVar[Retry]\n\n def __init__(\n self,\n total: bool | int | None = 10,\n connect: int | None = None,\n read: int | None = None,\n redirect: bool | int | None = None,\n status: int | None = None,\n other: int | None = None,\n allowed_methods: typing.Collection[str] | None = DEFAULT_ALLOWED_METHODS,\n status_forcelist: typing.Collection[int] | None = None,\n backoff_factor: float = 0,\n backoff_max: float = DEFAULT_BACKOFF_MAX,\n raise_on_redirect: bool = True,\n raise_on_status: bool = True,\n history: tuple[RequestHistory, ...] | None = None,\n respect_retry_after_header: bool = True,\n remove_headers_on_redirect: typing.Collection[\n str\n ] = DEFAULT_REMOVE_HEADERS_ON_REDIRECT,\n backoff_jitter: float = 0.0,\n ) -> None:\n self.total = total\n self.connect = connect\n self.read = read\n self.status = status\n self.other = other\n\n if redirect is False or total is False:\n redirect = 0\n raise_on_redirect = False\n\n self.redirect = redirect\n self.status_forcelist = status_forcelist or set()\n self.allowed_methods = allowed_methods\n self.backoff_factor = backoff_factor\n self.backoff_max = backoff_max\n self.raise_on_redirect = raise_on_redirect\n self.raise_on_status = raise_on_status\n self.history = history or ()\n self.respect_retry_after_header = respect_retry_after_header\n self.remove_headers_on_redirect = frozenset(\n h.lower() for h in remove_headers_on_redirect\n )\n self.backoff_jitter = backoff_jitter\n\n def new(self, **kw: typing.Any) -> Retry:\n params = dict(\n total=self.total,\n connect=self.connect,\n read=self.read,\n redirect=self.redirect,\n status=self.status,\n other=self.other,\n allowed_methods=self.allowed_methods,\n status_forcelist=self.status_forcelist,\n backoff_factor=self.backoff_factor,\n backoff_max=self.backoff_max,\n raise_on_redirect=self.raise_on_redirect,\n raise_on_status=self.raise_on_status,\n history=self.history,\n remove_headers_on_redirect=self.remove_headers_on_redirect,\n respect_retry_after_header=self.respect_retry_after_header,\n backoff_jitter=self.backoff_jitter,\n )\n\n params.update(kw)\n return type(self)(**params) # type: ignore[arg-type]\n\n @classmethod\n def from_int(\n cls,\n retries: Retry | bool | int | None,\n redirect: bool | int | None = True,\n default: Retry | bool | int | None = None,\n ) -> Retry:\n \"\"\"Backwards-compatibility for the old retries format.\"\"\"\n if retries is None:\n retries = default if default is not None else cls.DEFAULT\n\n if isinstance(retries, Retry):\n return retries\n\n redirect = bool(redirect) and None\n new_retries = cls(retries, redirect=redirect)\n log.debug(\"Converted retries value: %r -> %r\", retries, new_retries)\n return new_retries\n\n def get_backoff_time(self) -> float:\n \"\"\"Formula for computing the current backoff\n\n :rtype: float\n \"\"\"\n # We want to consider only the last consecutive errors sequence (Ignore redirects).\n consecutive_errors_len = len(\n list(\n takewhile(lambda x: x.redirect_location is None, reversed(self.history))\n )\n )\n if consecutive_errors_len <= 1:\n return 0\n\n backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))\n if self.backoff_jitter != 0.0:\n backoff_value += random.random() * self.backoff_jitter\n return float(max(0, min(self.backoff_max, backoff_value)))\n\n def parse_retry_after(self, retry_after: str) -> float:\n seconds: float\n # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4\n if re.match(r\"^\\s*[0-9]+\\s*$\", retry_after):\n seconds = int(retry_after)\n else:\n retry_date_tuple = email.utils.parsedate_tz(retry_after)\n if retry_date_tuple is None:\n raise InvalidHeader(f\"Invalid Retry-After header: {retry_after}\")\n\n retry_date = email.utils.mktime_tz(retry_date_tuple)\n seconds = retry_date - time.time()\n\n seconds = max(seconds, 0)\n\n return seconds\n\n def get_retry_after(self, response: BaseHTTPResponse) -> float | None:\n \"\"\"Get the value of Retry-After in seconds.\"\"\"\n\n retry_after = response.headers.get(\"Retry-After\")\n\n if retry_after is None:\n return None\n\n return self.parse_retry_after(retry_after)\n\n def sleep_for_retry(self, response: BaseHTTPResponse) -> bool:\n retry_after = self.get_retry_after(response)\n if retry_after:\n time.sleep(retry_after)\n return True\n\n return False\n\n def _sleep_backoff(self) -> None:\n backoff = self.get_backoff_time()\n if backoff <= 0:\n return\n time.sleep(backoff)\n\n def sleep(self, response: BaseHTTPResponse | None = None) -> None:\n \"\"\"Sleep between retry attempts.\n\n This method will respect a server's ``Retry-After`` response header\n and sleep the duration of the time requested. If that is not present, it\n will use an exponential backoff. By default, the backoff factor is 0 and\n this method will return immediately.\n \"\"\"\n\n if self.respect_retry_after_header and response:\n slept = self.sleep_for_retry(response)\n if slept:\n return\n\n self._sleep_backoff()\n\n def _is_connection_error(self, err: Exception) -> bool:\n \"\"\"Errors when we're fairly sure that the server did not receive the\n request, so it should be safe to retry.\n \"\"\"\n if isinstance(err, ProxyError):\n err = err.original_error\n return isinstance(err, ConnectTimeoutError)\n\n def _is_read_error(self, err: Exception) -> bool:\n \"\"\"Errors that occur after the request has been started, so we should\n assume that the server began processing it.\n \"\"\"\n return isinstance(err, (ReadTimeoutError, ProtocolError))\n\n def _is_method_retryable(self, method: str) -> bool:\n \"\"\"Checks if a given HTTP method should be retried upon, depending if\n it is included in the allowed_methods\n \"\"\"\n if self.allowed_methods and method.upper() not in self.allowed_methods:\n return False\n return True\n\n def is_retry(\n self, method: str, status_code: int, has_retry_after: bool = False\n ) -> bool:\n \"\"\"Is this method/status code retryable? (Based on allowlists and control\n variables such as the number of total retries to allow, whether to\n respect the Retry-After header, whether this header is present, and\n whether the returned status code is on the list of status codes to\n be retried upon on the presence of the aforementioned header)\n \"\"\"\n if not self._is_method_retryable(method):\n return False\n\n if self.status_forcelist and status_code in self.status_forcelist:\n return True\n\n return bool(\n self.total\n and self.respect_retry_after_header\n and has_retry_after\n and (status_code in self.RETRY_AFTER_STATUS_CODES)\n )\n\n def is_exhausted(self) -> bool:\n \"\"\"Are we out of retries?\"\"\"\n retry_counts = [\n x\n for x in (\n self.total,\n self.connect,\n self.read,\n self.redirect,\n self.status,\n self.other,\n )\n if x\n ]\n if not retry_counts:\n return False\n\n return min(retry_counts) < 0\n\n def increment(\n self,\n method: str | None = None,\n url: str | None = None,\n response: BaseHTTPResponse | None = None,\n error: Exception | None = None,\n _pool: ConnectionPool | None = None,\n _stacktrace: TracebackType | None = None,\n ) -> Retry:\n \"\"\"Return a new Retry object with incremented retry counters.\n\n :param response: A response object, or None, if the server did not\n return a response.\n :type response: :class:`~urllib3.response.BaseHTTPResponse`\n :param Exception error: An error encountered during the request, or\n None if the response was received successfully.\n\n :return: A new ``Retry`` object.\n \"\"\"\n if self.total is False and error:\n # Disabled, indicate to re-raise the error.\n raise reraise(type(error), error, _stacktrace)\n\n total = self.total\n if total is not None:\n total -= 1\n\n connect = self.connect\n read = self.read\n redirect = self.redirect\n status_count = self.status\n other = self.other\n cause = \"unknown\"\n status = None\n redirect_location = None\n\n if error and self._is_connection_error(error):\n # Connect retry?\n if connect is False:\n raise reraise(type(error), error, _stacktrace)\n elif connect is not None:\n connect -= 1\n\n elif error and self._is_read_error(error):\n # Read retry?\n if read is False or method is None or not self._is_method_retryable(method):\n raise reraise(type(error), error, _stacktrace)\n elif read is not None:\n read -= 1\n\n elif error:\n # Other retry?\n if other is not None:\n other -= 1\n\n elif response and response.get_redirect_location():\n # Redirect retry?\n if redirect is not None:\n redirect -= 1\n cause = \"too many redirects\"\n response_redirect_location = response.get_redirect_location()\n if response_redirect_location:\n redirect_location = response_redirect_location\n status = response.status\n\n else:\n # Incrementing because of a server error like a 500 in\n # status_forcelist and the given method is in the allowed_methods\n cause = ResponseError.GENERIC_ERROR\n if response and response.status:\n if status_count is not None:\n status_count -= 1\n cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)\n status = response.status\n\n history = self.history + (\n RequestHistory(method, url, error, status, redirect_location),\n )\n\n new_retry = self.new(\n total=total,\n connect=connect,\n read=read,\n redirect=redirect,\n status=status_count,\n other=other,\n history=history,\n )\n\n if new_retry.is_exhausted():\n reason = error or ResponseError(cause)\n raise MaxRetryError(_pool, url, reason) from reason # type: ignore[arg-type]\n\n log.debug(\"Incremented Retry for (url='%s'): %r\", url, new_retry)\n\n return new_retry\n\n def __repr__(self) -> str:\n return (\n f\"{type(self).__name__}(total={self.total}, connect={self.connect}, \"\n f\"read={self.read}, redirect={self.redirect}, status={self.status})\"\n )" }, { "identifier": "Timeout", "path": "py/Python38/site-packages/urllib3/util/timeout.py", "snippet": "class Timeout:\n \"\"\"Timeout configuration.\n\n Timeouts can be defined as a default for a pool:\n\n .. code-block:: python\n\n import urllib3\n\n timeout = urllib3.util.Timeout(connect=2.0, read=7.0)\n\n http = urllib3.PoolManager(timeout=timeout)\n\n resp = http.request(\"GET\", \"https://example.com/\")\n\n print(resp.status)\n\n Or per-request (which overrides the default for the pool):\n\n .. code-block:: python\n\n response = http.request(\"GET\", \"https://example.com/\", timeout=Timeout(10))\n\n Timeouts can be disabled by setting all the parameters to ``None``:\n\n .. code-block:: python\n\n no_timeout = Timeout(connect=None, read=None)\n response = http.request(\"GET\", \"https://example.com/\", timeout=no_timeout)\n\n\n :param total:\n This combines the connect and read timeouts into one; the read timeout\n will be set to the time leftover from the connect attempt. In the\n event that both a connect timeout and a total are specified, or a read\n timeout and a total are specified, the shorter timeout will be applied.\n\n Defaults to None.\n\n :type total: int, float, or None\n\n :param connect:\n The maximum amount of time (in seconds) to wait for a connection\n attempt to a server to succeed. Omitting the parameter will default the\n connect timeout to the system default, probably `the global default\n timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout for connection attempts.\n\n :type connect: int, float, or None\n\n :param read:\n The maximum amount of time (in seconds) to wait between consecutive\n read operations for a response from the server. Omitting the parameter\n will default the read timeout to the system default, probably `the\n global default timeout in socket.py\n <http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.\n None will set an infinite timeout.\n\n :type read: int, float, or None\n\n .. note::\n\n Many factors can affect the total amount of time for urllib3 to return\n an HTTP response.\n\n For example, Python's DNS resolver does not obey the timeout specified\n on the socket. Other factors that can affect total request time include\n high CPU load, high swap, the program running at a low priority level,\n or other behaviors.\n\n In addition, the read and total timeouts only measure the time between\n read operations on the socket connecting the client and the server,\n not the total amount of time for the request to return a complete\n response. For most requests, the timeout is raised because the server\n has not sent the first byte in the specified time. This is not always\n the case; if a server streams one byte every fifteen seconds, a timeout\n of 20 seconds will not trigger, even though the request will take\n several minutes to complete.\n\n If your goal is to cut off any request after a set amount of wall clock\n time, consider having a second \"watcher\" thread to cut off a slow\n request.\n \"\"\"\n\n #: A sentinel object representing the default timeout value\n DEFAULT_TIMEOUT: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT\n\n def __init__(\n self,\n total: _TYPE_TIMEOUT = None,\n connect: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n read: _TYPE_TIMEOUT = _DEFAULT_TIMEOUT,\n ) -> None:\n self._connect = self._validate_timeout(connect, \"connect\")\n self._read = self._validate_timeout(read, \"read\")\n self.total = self._validate_timeout(total, \"total\")\n self._start_connect: float | None = None\n\n def __repr__(self) -> str:\n return f\"{type(self).__name__}(connect={self._connect!r}, read={self._read!r}, total={self.total!r})\"\n\n # __str__ provided for backwards compatibility\n __str__ = __repr__\n\n @staticmethod\n def resolve_default_timeout(timeout: _TYPE_TIMEOUT) -> float | None:\n return getdefaulttimeout() if timeout is _DEFAULT_TIMEOUT else timeout\n\n @classmethod\n def _validate_timeout(cls, value: _TYPE_TIMEOUT, name: str) -> _TYPE_TIMEOUT:\n \"\"\"Check that a timeout attribute is valid.\n\n :param value: The timeout value to validate\n :param name: The name of the timeout attribute to validate. This is\n used to specify in error messages.\n :return: The validated and casted version of the given value.\n :raises ValueError: If it is a numeric value less than or equal to\n zero, or the type is not an integer, float, or None.\n \"\"\"\n if value is None or value is _DEFAULT_TIMEOUT:\n return value\n\n if isinstance(value, bool):\n raise ValueError(\n \"Timeout cannot be a boolean value. It must \"\n \"be an int, float or None.\"\n )\n try:\n float(value)\n except (TypeError, ValueError):\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n try:\n if value <= 0:\n raise ValueError(\n \"Attempted to set %s timeout to %s, but the \"\n \"timeout cannot be set to a value less \"\n \"than or equal to 0.\" % (name, value)\n )\n except TypeError:\n raise ValueError(\n \"Timeout value %s was %s, but it must be an \"\n \"int, float or None.\" % (name, value)\n ) from None\n\n return value\n\n @classmethod\n def from_float(cls, timeout: _TYPE_TIMEOUT) -> Timeout:\n \"\"\"Create a new Timeout from a legacy timeout value.\n\n The timeout value used by httplib.py sets the same timeout on the\n connect(), and recv() socket requests. This creates a :class:`Timeout`\n object that sets the individual timeouts to the ``timeout`` value\n passed to this function.\n\n :param timeout: The legacy timeout value.\n :type timeout: integer, float, :attr:`urllib3.util.Timeout.DEFAULT_TIMEOUT`, or None\n :return: Timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n return Timeout(read=timeout, connect=timeout)\n\n def clone(self) -> Timeout:\n \"\"\"Create a copy of the timeout object\n\n Timeout properties are stored per-pool but each request needs a fresh\n Timeout object to ensure each one has its own start/stop configured.\n\n :return: a copy of the timeout object\n :rtype: :class:`Timeout`\n \"\"\"\n # We can't use copy.deepcopy because that will also create a new object\n # for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to\n # detect the user default.\n return Timeout(connect=self._connect, read=self._read, total=self.total)\n\n def start_connect(self) -> float:\n \"\"\"Start the timeout clock, used during a connect() attempt\n\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to start a timer that has been started already.\n \"\"\"\n if self._start_connect is not None:\n raise TimeoutStateError(\"Timeout timer has already been started.\")\n self._start_connect = time.monotonic()\n return self._start_connect\n\n def get_connect_duration(self) -> float:\n \"\"\"Gets the time elapsed since the call to :meth:`start_connect`.\n\n :return: Elapsed time in seconds.\n :rtype: float\n :raises urllib3.exceptions.TimeoutStateError: if you attempt\n to get duration for a timer that hasn't been started.\n \"\"\"\n if self._start_connect is None:\n raise TimeoutStateError(\n \"Can't get connect duration for timer that has not started.\"\n )\n return time.monotonic() - self._start_connect\n\n @property\n def connect_timeout(self) -> _TYPE_TIMEOUT:\n \"\"\"Get the value to use when setting a connection timeout.\n\n This will be a positive float or integer, the value None\n (never timeout), or the default system timeout.\n\n :return: Connect timeout.\n :rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None\n \"\"\"\n if self.total is None:\n return self._connect\n\n if self._connect is None or self._connect is _DEFAULT_TIMEOUT:\n return self.total\n\n return min(self._connect, self.total) # type: ignore[type-var]\n\n @property\n def read_timeout(self) -> float | None:\n \"\"\"Get the value for the read timeout.\n\n This assumes some time has elapsed in the connection timeout and\n computes the read timeout appropriately.\n\n If self.total is set, the read timeout is dependent on the amount of\n time taken by the connect timeout. If the connection time has not been\n established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be\n raised.\n\n :return: Value to use for the read timeout.\n :rtype: int, float or None\n :raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`\n has not yet been called on this object.\n \"\"\"\n if (\n self.total is not None\n and self.total is not _DEFAULT_TIMEOUT\n and self._read is not None\n and self._read is not _DEFAULT_TIMEOUT\n ):\n # In case the connect timeout has not yet been established.\n if self._start_connect is None:\n return self._read\n return max(0, min(self.total - self.get_connect_duration(), self._read))\n elif self.total is not None and self.total is not _DEFAULT_TIMEOUT:\n return max(0, self.total - self.get_connect_duration())\n else:\n return self.resolve_default_timeout(self._read)" }, { "identifier": "Url", "path": "py/Python38/site-packages/urllib3/util/url.py", "snippet": "class Url(\n typing.NamedTuple(\n \"Url\",\n [\n (\"scheme\", typing.Optional[str]),\n (\"auth\", typing.Optional[str]),\n (\"host\", typing.Optional[str]),\n (\"port\", typing.Optional[int]),\n (\"path\", typing.Optional[str]),\n (\"query\", typing.Optional[str]),\n (\"fragment\", typing.Optional[str]),\n ],\n )\n):\n \"\"\"\n Data structure for representing an HTTP URL. Used as a return value for\n :func:`parse_url`. Both the scheme and host are normalized as they are\n both case-insensitive according to RFC 3986.\n \"\"\"\n\n def __new__( # type: ignore[no-untyped-def]\n cls,\n scheme: str | None = None,\n auth: str | None = None,\n host: str | None = None,\n port: int | None = None,\n path: str | None = None,\n query: str | None = None,\n fragment: str | None = None,\n ):\n if path and not path.startswith(\"/\"):\n path = \"/\" + path\n if scheme is not None:\n scheme = scheme.lower()\n return super().__new__(cls, scheme, auth, host, port, path, query, fragment)\n\n @property\n def hostname(self) -> str | None:\n \"\"\"For backwards-compatibility with urlparse. We're nice like that.\"\"\"\n return self.host\n\n @property\n def request_uri(self) -> str:\n \"\"\"Absolute path including the query string.\"\"\"\n uri = self.path or \"/\"\n\n if self.query is not None:\n uri += \"?\" + self.query\n\n return uri\n\n @property\n def authority(self) -> str | None:\n \"\"\"\n Authority component as defined in RFC 3986 3.2.\n This includes userinfo (auth), host and port.\n\n i.e.\n userinfo@host:port\n \"\"\"\n userinfo = self.auth\n netloc = self.netloc\n if netloc is None or userinfo is None:\n return netloc\n else:\n return f\"{userinfo}@{netloc}\"\n\n @property\n def netloc(self) -> str | None:\n \"\"\"\n Network location including host and port.\n\n If you need the equivalent of urllib.parse's ``netloc``,\n use the ``authority`` property instead.\n \"\"\"\n if self.host is None:\n return None\n if self.port:\n return f\"{self.host}:{self.port}\"\n return self.host\n\n @property\n def url(self) -> str:\n \"\"\"\n Convert self into a url\n\n This function should more or less round-trip with :func:`.parse_url`. The\n returned url may not be exactly the same as the url inputted to\n :func:`.parse_url`, but it should be equivalent by the RFC (e.g., urls\n with a blank port will have : removed).\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n U = urllib3.util.parse_url(\"https://google.com/mail/\")\n\n print(U.url)\n # \"https://google.com/mail/\"\n\n print( urllib3.util.Url(\"https\", \"username:password\",\n \"host.com\", 80, \"/path\", \"query\", \"fragment\"\n ).url\n )\n # \"https://username:[email protected]:80/path?query#fragment\"\n \"\"\"\n scheme, auth, host, port, path, query, fragment = self\n url = \"\"\n\n # We use \"is not None\" we want things to happen with empty strings (or 0 port)\n if scheme is not None:\n url += scheme + \"://\"\n if auth is not None:\n url += auth + \"@\"\n if host is not None:\n url += host\n if port is not None:\n url += \":\" + str(port)\n if path is not None:\n url += path\n if query is not None:\n url += \"?\" + query\n if fragment is not None:\n url += \"#\" + fragment\n\n return url\n\n def __str__(self) -> str:\n return self.url" }, { "identifier": "parse_url", "path": "py/Python38/site-packages/urllib3/util/url.py", "snippet": "def parse_url(url: str) -> Url:\n \"\"\"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n This parser is RFC 3986 and RFC 6874 compliant.\n\n The parser logic and helper functions are based heavily on\n work done in the ``rfc3986`` module.\n\n :param str url: URL to parse into a :class:`.Url` namedtuple.\n\n Partly backwards-compatible with :mod:`urllib.parse`.\n\n Example:\n\n .. code-block:: python\n\n import urllib3\n\n print( urllib3.util.parse_url('http://google.com/mail/'))\n # Url(scheme='http', host='google.com', port=None, path='/mail/', ...)\n\n print( urllib3.util.parse_url('google.com:80'))\n # Url(scheme=None, host='google.com', port=80, path=None, ...)\n\n print( urllib3.util.parse_url('/foo?bar'))\n # Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n \"\"\"\n if not url:\n # Empty\n return Url()\n\n source_url = url\n if not _SCHEME_RE.search(url):\n url = \"//\" + url\n\n scheme: str | None\n authority: str | None\n auth: str | None\n host: str | None\n port: str | None\n port_int: int | None\n path: str | None\n query: str | None\n fragment: str | None\n\n try:\n scheme, authority, path, query, fragment = _URI_RE.match(url).groups() # type: ignore[union-attr]\n normalize_uri = scheme is None or scheme.lower() in _NORMALIZABLE_SCHEMES\n\n if scheme:\n scheme = scheme.lower()\n\n if authority:\n auth, _, host_port = authority.rpartition(\"@\")\n auth = auth or None\n host, port = _HOST_PORT_RE.match(host_port).groups() # type: ignore[union-attr]\n if auth and normalize_uri:\n auth = _encode_invalid_chars(auth, _USERINFO_CHARS)\n if port == \"\":\n port = None\n else:\n auth, host, port = None, None, None\n\n if port is not None:\n port_int = int(port)\n if not (0 <= port_int <= 65535):\n raise LocationParseError(url)\n else:\n port_int = None\n\n host = _normalize_host(host, scheme)\n\n if normalize_uri and path:\n path = _remove_path_dot_segments(path)\n path = _encode_invalid_chars(path, _PATH_CHARS)\n if normalize_uri and query:\n query = _encode_invalid_chars(query, _QUERY_CHARS)\n if normalize_uri and fragment:\n fragment = _encode_invalid_chars(fragment, _FRAGMENT_CHARS)\n\n except (ValueError, AttributeError) as e:\n raise LocationParseError(source_url) from e\n\n # For the sake of backwards compatibility we put empty\n # string values for path if there are any defined values\n # beyond the path in the URL.\n # TODO: Remove this when we break backwards compatibility.\n if not path:\n if query is not None or fragment is not None:\n path = \"\"\n else:\n path = None\n\n return Url(\n scheme=scheme,\n auth=auth,\n host=host,\n port=port_int,\n path=path,\n query=query,\n fragment=fragment,\n )" } ]
import functools import logging import typing import warnings import ssl from types import TracebackType from urllib.parse import urljoin from ._collections import HTTPHeaderDict, RecentlyUsedContainer from ._request_methods import RequestMethods from .connection import ProxyConfig from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, URLSchemeUnknown, ) from .response import BaseHTTPResponse from .util.connection import _TYPE_SOCKET_OPTIONS from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.timeout import Timeout from .util.url import Url, parse_url from typing_extensions import Literal
21,111
key__socks_options: frozenset[tuple[str, str]] | None key_assert_hostname: bool | str | None key_assert_fingerprint: str | None key_server_hostname: str | None key_blocksize: int | None def _default_key_normalizer( key_class: type[PoolKey], request_context: dict[str, typing.Any] ) -> PoolKey: """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None # Default key_blocksize to _DEFAULT_BLOCKSIZE if missing from the context if context.get("key_blocksize") is None: context["key_blocksize"] = _DEFAULT_BLOCKSIZE return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example: .. code-block:: python import urllib3 http = urllib3.PoolManager(num_pools=2) resp1 = http.request("GET", "https://google.com/") resp2 = http.request("GET", "https://google.com/mail") resp3 = http.request("GET", "https://yahoo.com/") print(len(http.pools)) # 2 """ proxy: Url | None = None proxy_config: ProxyConfig | None = None def __init__( self, num_pools: int = 10, headers: typing.Mapping[str, str] | None = None, **connection_pool_kw: typing.Any, ) -> None: super().__init__(headers) self.connection_pool_kw = connection_pool_kw
from __future__ import annotations if typing.TYPE_CHECKING: __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ssl_minimum_version", "ssl_maximum_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # Default value for `blocksize` - a new parameter introduced to # http.client.HTTPConnection & http.client.HTTPSConnection in Python 3.7 _DEFAULT_BLOCKSIZE = 16384 _SelfT = typing.TypeVar("_SelfT") class PoolKey(typing.NamedTuple): """ All known keyword arguments that could be provided to the pool manager, its pools, or the underlying connections. All custom key schemes should include the fields in this key at a minimum. """ key_scheme: str key_host: str key_port: int | None key_timeout: Timeout | float | int | None key_retries: Retry | bool | int | None key_block: bool | None key_source_address: tuple[str, int] | None key_key_file: str | None key_key_password: str | None key_cert_file: str | None key_cert_reqs: str | None key_ca_certs: str | None key_ssl_version: int | str | None key_ssl_minimum_version: ssl.TLSVersion | None key_ssl_maximum_version: ssl.TLSVersion | None key_ca_cert_dir: str | None key_ssl_context: ssl.SSLContext | None key_maxsize: int | None key_headers: frozenset[tuple[str, str]] | None key__proxy: Url | None key__proxy_headers: frozenset[tuple[str, str]] | None key__proxy_config: ProxyConfig | None key_socket_options: _TYPE_SOCKET_OPTIONS | None key__socks_options: frozenset[tuple[str, str]] | None key_assert_hostname: bool | str | None key_assert_fingerprint: str | None key_server_hostname: str | None key_blocksize: int | None def _default_key_normalizer( key_class: type[PoolKey], request_context: dict[str, typing.Any] ) -> PoolKey: """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None # Default key_blocksize to _DEFAULT_BLOCKSIZE if missing from the context if context.get("key_blocksize") is None: context["key_blocksize"] = _DEFAULT_BLOCKSIZE return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example: .. code-block:: python import urllib3 http = urllib3.PoolManager(num_pools=2) resp1 = http.request("GET", "https://google.com/") resp2 = http.request("GET", "https://google.com/mail") resp3 = http.request("GET", "https://yahoo.com/") print(len(http.pools)) # 2 """ proxy: Url | None = None proxy_config: ProxyConfig | None = None def __init__( self, num_pools: int = 10, headers: typing.Mapping[str, str] | None = None, **connection_pool_kw: typing.Any, ) -> None: super().__init__(headers) self.connection_pool_kw = connection_pool_kw
self.pools: RecentlyUsedContainer[PoolKey, HTTPConnectionPool]
1
2023-10-11 09:08:57+00:00
24k
MTgeophysics/mtpy-v2
mtpy/modeling/modem/data.py
[ { "identifier": "MTDataFrame", "path": "mtpy/core/mt_dataframe.py", "snippet": "class MTDataFrame:\n \"\"\"\n Dataframe for a single station\n\n Tried subclassing pandas.DataFrame, but that turned out to not be straight\n forward, so when with compilation instead.\n\n Think about having period as an index?\n \"\"\"\n\n def __init__(self, data=None, n_entries=0, **kwargs):\n self._dtype_list = [\n (\"survey\", \"U25\"),\n (\"station\", \"U25\"),\n (\"latitude\", float),\n (\"longitude\", float),\n (\"elevation\", float),\n (\"datum_epsg\", \"U6\"),\n (\"east\", float),\n (\"north\", float),\n (\"utm_epsg\", \"U6\"),\n (\"model_east\", float),\n (\"model_north\", float),\n (\"model_elevation\", float),\n (\"profile_offset\", float),\n (\"period\", float),\n (\"zxx\", complex),\n (\"zxx_error\", float),\n (\"zxx_model_error\", float),\n (\"zxy\", complex),\n (\"zxy_error\", float),\n (\"zxy_model_error\", float),\n (\"zyx\", complex),\n (\"zyx_error\", float),\n (\"zyx_model_error\", float),\n (\"zyy\", complex),\n (\"zyy_error\", float),\n (\"zyy_model_error\", float),\n (\"tzx\", complex),\n (\"tzx_error\", float),\n (\"tzx_model_error\", float),\n (\"tzy\", complex),\n (\"tzy_error\", float),\n (\"tzy_model_error\", float),\n (\"res_xx\", float),\n (\"res_xx_error\", float),\n (\"res_xx_model_error\", float),\n (\"res_xy\", float),\n (\"res_xy_error\", float),\n (\"res_xy_model_error\", float),\n (\"res_yx\", float),\n (\"res_yx_error\", float),\n (\"res_yx_model_error\", float),\n (\"res_yy\", float),\n (\"res_yy_error\", float),\n (\"res_yy_model_error\", float),\n (\"phase_xx\", float),\n (\"phase_xx_error\", float),\n (\"phase_xx_model_error\", float),\n (\"phase_xy\", float),\n (\"phase_xy_error\", float),\n (\"phase_xy_model_error\", float),\n (\"phase_yx\", float),\n (\"phase_yx_error\", float),\n (\"phase_yx_model_error\", float),\n (\"phase_yy\", float),\n (\"phase_yy_error\", float),\n (\"phase_yy_model_error\", float),\n (\"ptxx\", float),\n (\"ptxx_error\", float),\n (\"ptxx_model_error\", float),\n (\"ptxy\", float),\n (\"ptxy_error\", float),\n (\"ptxy_model_error\", float),\n (\"ptyx\", float),\n (\"ptyx_error\", float),\n (\"ptyx_model_error\", float),\n (\"ptyy\", float),\n (\"ptyy_error\", float),\n (\"ptyy_model_error\", float),\n (\"rms_zxx\", float),\n (\"rms_zxy\", float),\n (\"rms_zyx\", float),\n (\"rms_zyy\", float),\n (\"rms_tzx\", float),\n (\"rms_tzy\", float),\n ]\n\n if data is not None:\n self.dataframe = self._validate_data(data)\n\n else:\n self.dataframe = self._get_initial_df(n_entries)\n\n self.working_survey = None\n self.working_station = None\n\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n def __str__(self):\n if self._has_data():\n return self.dataframe.__str__()\n\n else:\n return \"Empty MTStationDataFrame\"\n\n def __repr__(self):\n if self._has_data():\n return self.dataframe.__repr__()\n else:\n return \"MTStationDataFrame()\"\n\n @property\n def _column_names(self):\n return [col[0] for col in self._dtype_list]\n\n def __eq__(self, other):\n other = self._validata_data(other)\n return self.dataframe == other\n\n @property\n def nonzero_items(self):\n \"\"\"return number of non zero entries\"\"\"\n\n if self._has_data():\n cols = [\n dtype[0]\n for dtype in self._dtype_list[14:]\n if \"error\" not in dtype[0]\n ]\n\n return np.count_nonzero(self.dataframe[cols])\n else:\n return 0\n\n def _validate_data(self, data):\n \"\"\"\n\n :param data: DESCRIPTION\n :type data: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if data is None:\n return\n\n if isinstance(data, (dict, np.ndarray, pd.DataFrame)):\n df = pd.DataFrame(data)\n\n elif isinstance(data, (MTDataFrame)):\n df = data.dataframe\n\n else:\n raise TypeError(\n f\"Input data must be a pandas.DataFrame not {type(data)}\"\n )\n\n for col in self._dtype_list:\n if col[0] not in df.columns:\n\n df[col[0]] = np.zeros(df.shape[0], dtype=col[1])\n\n # resort to the desired column order\n if df.columns.to_list() != self._column_names:\n df = df[self._column_names]\n\n return df\n\n def _get_initial_df(self, n_entries=0):\n\n return pd.DataFrame(\n np.empty(n_entries, dtype=np.dtype(self._dtype_list))\n )\n\n def _has_data(self):\n if self.dataframe is None:\n return False\n elif self.dataframe.shape[0] > 0:\n return True\n return False\n\n def get_station_df(self, station=None):\n \"\"\"\n get a single station df\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n if station is not None:\n self.working_station = station\n if self._has_data():\n if self.working_station is None:\n self.working_station = self.dataframe.station.unique()[0]\n\n if self.working_station not in self.dataframe.station.values:\n raise ValueError(\n f\"Could not find station {self.working_station} in dataframe.\"\n )\n\n return self.dataframe[\n self.dataframe.station == self.working_station\n ]\n\n @property\n def size(self):\n if self._has_data():\n return self.period.size\n\n @property\n def _index_dict(self):\n return {\n \"xx\": {\"ii\": 0, \"jj\": 0},\n \"xy\": {\"ii\": 0, \"jj\": 1},\n \"yx\": {\"ii\": 1, \"jj\": 0},\n \"yy\": {\"ii\": 1, \"jj\": 1},\n \"zx\": {\"ii\": 0, \"jj\": 0},\n \"zy\": {\"ii\": 0, \"jj\": 1},\n }\n\n def _get_index(self, key):\n \"\"\" \"\"\"\n\n if key.startswith(\"z\") or key.startswith(\"t\"):\n return self._index_dict[key[1:3]]\n\n elif key.startswith(\"res\"):\n return self._index_dict[key[4:6]]\n elif key.startswith(\"phase\"):\n return self._index_dict[key[6:8]]\n elif key.startswith(\"pt\"):\n return self._index_dict[key[2:4]]\n else:\n return None\n\n @property\n def period(self):\n \"\"\"\n Get frequencies\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if self._has_data():\n return np.sort(self.dataframe.period.unique())\n\n @property\n def frequency(self):\n \"\"\"\n Get frequencies\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if self._has_data():\n return 1.0 / self.period\n\n @property\n def survey(self):\n \"\"\"survey name\"\"\"\n if self._has_data():\n if self.working_survey is None:\n self.working_survey = self.dataframe.survey.unique()[0]\n return self.working_survey\n\n @survey.setter\n def survey(self, value):\n \"\"\"survey name\"\"\"\n if self._has_data():\n if self.working_survey in [None, \"\"]:\n self.dataframe.loc[\n self.dataframe.survey == \"\", \"survey\"\n ] = value\n self.working_survey = value\n\n @property\n def station(self):\n \"\"\"station name\"\"\"\n if self._has_data():\n if self.working_station is None:\n self.working_station = self.dataframe.station.unique()[0]\n return self.working_station\n\n @station.setter\n def station(self, value):\n \"\"\"station name\"\"\"\n if self._has_data():\n if self.working_station in [None, \"\"]:\n self.dataframe.loc[\n self.dataframe.station == \"\", \"station\"\n ] = value\n self.working_station = value\n\n @property\n def latitude(self):\n \"\"\"latitude\"\"\"\n if self._has_data():\n return self.dataframe.loc[\n self.dataframe.station == self.station, \"latitude\"\n ].unique()[0]\n\n @latitude.setter\n def latitude(self, value):\n \"\"\"latitude\"\"\"\n if self._has_data():\n self.dataframe.loc[\n self.dataframe.station == self.station, \"latitude\"\n ] = value\n\n @property\n def longitude(self):\n \"\"\"longitude\"\"\"\n if self._has_data():\n return self.dataframe.loc[\n self.dataframe.station == self.station, \"longitude\"\n ].unique()[0]\n\n @longitude.setter\n def longitude(self, value):\n \"\"\"longitude\"\"\"\n if self._has_data():\n self.dataframe.loc[\n self.dataframe.station == self.station, \"longitude\"\n ] = value\n\n @property\n def elevation(self):\n \"\"\"elevation\"\"\"\n if self._has_data():\n return self.dataframe.loc[\n self.dataframe.station == self.station, \"elevation\"\n ].unique()[0]\n\n @elevation.setter\n def elevation(self, value):\n \"\"\"elevation\"\"\"\n if self._has_data():\n self.dataframe.loc[\n self.dataframe.station == self.station, \"elevation\"\n ] = value\n\n @property\n def datum_epsg(self):\n \"\"\"datum_epsg\"\"\"\n if self._has_data():\n return self.dataframe.loc[\n self.dataframe.station == self.station, \"datum_epsg\"\n ].unique()[0]\n\n @datum_epsg.setter\n def datum_epsg(self, value):\n \"\"\"datum_epsg\"\"\"\n if self._has_data():\n self.dataframe.loc[\n self.dataframe.station == self.station, \"datum_epsg\"\n ] = value\n\n @property\n def east(self):\n \"\"\"station\"\"\"\n if self._has_data():\n return self.dataframe.loc[\n self.dataframe.station == self.station, \"east\"\n ].unique()[0]\n\n @east.setter\n def east(self, value):\n \"\"\"east\"\"\"\n if self._has_data():\n self.dataframe.loc[\n self.dataframe.station == self.station, \"east\"\n ] = value\n\n @property\n def north(self):\n \"\"\"north\"\"\"\n if self._has_data():\n return self.dataframe.loc[\n self.dataframe.station == self.station, \"north\"\n ].unique()[0]\n\n @north.setter\n def north(self, value):\n \"\"\"north\"\"\"\n if self._has_data():\n self.dataframe.loc[\n self.dataframe.station == self.station, \"north\"\n ] = value\n\n @property\n def utm_epsg(self):\n \"\"\"utm_epsg\"\"\"\n if self._has_data():\n return self.dataframe.loc[\n self.dataframe.station == self.station, \"utm_epsg\"\n ].unique()[0]\n\n @utm_epsg.setter\n def utm_epsg(self, value):\n \"\"\"utm_epsg\"\"\"\n if self._has_data():\n self.dataframe.loc[\n self.dataframe.station == self.station, \"utm_epsg\"\n ] = value\n\n @property\n def model_east(self):\n \"\"\"model_east\"\"\"\n if self._has_data():\n return self.dataframe.loc[\n self.dataframe.station == self.station, \"model_east\"\n ].unique()[0]\n\n @model_east.setter\n def model_east(self, value):\n \"\"\"model_east\"\"\"\n if self._has_data():\n self.dataframe.loc[\n self.dataframe.station == self.station, \"model_east\"\n ] = value\n\n @property\n def model_north(self):\n \"\"\"model_north\"\"\"\n if self._has_data():\n return self.dataframe.loc[\n self.dataframe.station == self.station, \"model_north\"\n ].unique()[0]\n\n @model_north.setter\n def model_north(self, value):\n \"\"\"model_north\"\"\"\n if self._has_data():\n self.dataframe.loc[\n self.dataframe.station == self.station, \"model_north\"\n ] = value\n\n @property\n def model_elevation(self):\n \"\"\"model_elevation\"\"\"\n if self._has_data():\n return self.dataframe.loc[\n self.dataframe.station == self.station, \"model_elevation\"\n ].unique()[0]\n\n @model_elevation.setter\n def model_elevation(self, value):\n \"\"\"model_elevation\"\"\"\n if self._has_data():\n self.dataframe.loc[\n self.dataframe.station == self.station,\n \"model_elevation\",\n ] = value\n\n @property\n def profile_offset(self):\n \"\"\"profile_offset\"\"\"\n if self._has_data():\n return self.dataframe.loc[\n self.dataframe.station == self.station, \"profile_offset\"\n ].unique()[0]\n\n @profile_offset.setter\n def profile_offset(self, value):\n \"\"\"profile_offset\"\"\"\n if self._has_data():\n self.dataframe.loc[\n self.dataframe.station == self.station,\n \"profile_offset\",\n ] = value\n\n def from_z_object(self, z_object):\n \"\"\"\n Fill impedance\n :param impedance: DESCRIPTION\n :type impedance: TYPE\n :param index: DESCRIPTION\n :type index: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n for key in self.dataframe.dtypes.keys():\n if key in [\"period\"]:\n self.dataframe.loc[\n self.dataframe.station == self.station, \"period\"\n ] = z_object.period\n\n index = self._get_index(key)\n if index is None:\n continue\n\n if key in [\"zxx\", \"zxy\", \"zyx\", \"zyy\"]:\n if z_object._has_tf():\n self.dataframe.loc[\n self.dataframe.station == self.station, key\n ] = z_object.z[:, index[\"ii\"], index[\"jj\"]]\n elif key in [\"zxx_error\", \"zxy_error\", \"zyx_error\", \"zyy_error\"]:\n if z_object._has_tf_error():\n self.dataframe.loc[\n self.dataframe.station == self.station, key\n ] = z_object.z_error[:, index[\"ii\"], index[\"jj\"]]\n elif key in [\n \"zxx_model_error\",\n \"zxy_model_error\",\n \"zyx_model_error\",\n \"zyy_model_error\",\n ]:\n if z_object._has_tf_model_error():\n self.dataframe.loc[\n self.dataframe.station == self.station, key\n ] = z_object.z_model_error[:, index[\"ii\"], index[\"jj\"]]\n elif key in [\"res_xx\", \"res_xy\", \"res_yx\", \"res_yy\"]:\n if z_object._has_tf():\n self.dataframe.loc[\n self.dataframe.station == self.station, key\n ] = z_object.resistivity[:, index[\"ii\"], index[\"jj\"]]\n elif key in [\n \"res_xx_error\",\n \"res_xy_error\",\n \"res_yx_error\",\n \"res_yy_error\",\n ]:\n if z_object._has_tf_error():\n self.dataframe.loc[\n self.dataframe.station == self.station, key\n ] = z_object.resistivity_error[:, index[\"ii\"], index[\"jj\"]]\n elif key in [\n \"res_xx_model_error\",\n \"res_xy_model_error\",\n \"res_yx_model_error\",\n \"res_yy_model_error\",\n ]:\n if z_object._has_tf_model_error():\n self.dataframe.loc[\n :, key\n ] = z_object.resistivity_model_error[\n :, index[\"ii\"], index[\"jj\"]\n ]\n\n elif key in [\"phase_xx\", \"phase_xy\", \"phase_yx\", \"phase_yy\"]:\n if z_object._has_tf():\n self.dataframe.loc[\n self.dataframe.station == self.station, key\n ] = z_object.phase[:, index[\"ii\"], index[\"jj\"]]\n elif key in [\n \"phase_xx_error\",\n \"phase_xy_error\",\n \"phase_yx_error\",\n \"phase_yy_error\",\n ]:\n if z_object._has_tf_error():\n self.dataframe.loc[\n self.dataframe.station == self.station, key\n ] = z_object.phase_error[:, index[\"ii\"], index[\"jj\"]]\n elif key in [\n \"phase_xx_model_error\",\n \"phase_xy_model_error\",\n \"phase_yx_model_error\",\n \"phase_yy_model_error\",\n ]:\n if z_object._has_tf_model_error():\n self.dataframe.loc[\n self.dataframe.station == self.station, key\n ] = z_object.phase_model_error[:, index[\"ii\"], index[\"jj\"]]\n\n def from_t_object(self, t_object):\n \"\"\"\n Fill tipper\n :param tipper: DESCRIPTION\n :type tipper: TYPE\n :param tipper_error: DESCRIPTION\n :type tipper_error: TYPE\n :param index: DESCRIPTION\n :type index: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n for key in self.dataframe.dtypes.keys():\n if key in [\"period\"]:\n self.dataframe.loc[\n self.dataframe.station == self.station, \"period\"\n ] = t_object.period\n\n index = self._get_index(key)\n if index is None:\n continue\n if key in [\"tzx\", \"tzy\"]:\n if t_object._has_tf():\n self.dataframe.loc[\n self.dataframe.station == self.station, key\n ] = t_object.tipper[:, index[\"ii\"], index[\"jj\"]]\n elif key in [\"tzx_error\", \"tzy_error\"]:\n if t_object._has_tf_error():\n self.dataframe.loc[\n self.dataframe.station == self.station, key\n ] = t_object.tipper_error[:, index[\"ii\"], index[\"jj\"]]\n elif key in [\"tzx_model_error\", \"tzy_model_error\"]:\n if t_object._has_tf_model_error():\n self.dataframe.loc[\n self.dataframe.station == self.station, key\n ] = t_object.tipper_model_error[\n :, index[\"ii\"], index[\"jj\"]\n ]\n\n def to_z_object(self):\n \"\"\"\n fill z_object from dataframe\n\n Need to have the components this way for transposing the elements so\n that the shape is (nf, 2, 2)\n \"\"\"\n\n nf = self.period.size\n z = np.zeros((nf, 2, 2), dtype=complex)\n z_err = np.zeros((nf, 2, 2), dtype=float)\n z_model_err = np.zeros((nf, 2, 2), dtype=float)\n\n res = np.zeros((nf, 2, 2), dtype=float)\n res_err = np.zeros((nf, 2, 2), dtype=float)\n res_model_err = np.zeros((nf, 2, 2), dtype=float)\n\n phase = np.zeros((nf, 2, 2), dtype=float)\n phase_err = np.zeros((nf, 2, 2), dtype=float)\n phase_model_err = np.zeros((nf, 2, 2), dtype=float)\n\n for key in self.dataframe.columns:\n index = self._get_index(key)\n if index is None:\n continue\n\n if key in [\"zxx\", \"zxy\", \"zyx\", \"zyy\"]:\n z[:, index[\"ii\"], index[\"jj\"]] = self.dataframe.loc[\n self.dataframe.station == self.station, key\n ]\n elif key in [\"zxx_error\", \"zxy_error\", \"zyx_error\", \"zyy_error\"]:\n z_err[:, index[\"ii\"], index[\"jj\"]] = self.dataframe.loc[\n self.dataframe.station == self.station, key\n ]\n elif key in [\n \"zxx_model_error\",\n \"zxy_model_error\",\n \"zyx_model_error\",\n \"zyy_model_error\",\n ]:\n z_model_err[:, index[\"ii\"], index[\"jj\"]] = self.dataframe.loc[\n self.dataframe.station == self.station, key\n ]\n\n ### resistivity\n elif key in [\"res_xx\", \"res_xy\", \"res_yx\", \"res_yy\"]:\n res[:, index[\"ii\"], index[\"jj\"]] = self.dataframe.loc[\n self.dataframe.station == self.station, key\n ]\n elif key in [\n \"res_xx_error\",\n \"res_xy_error\",\n \"res_yx_error\",\n \"res_yy_error\",\n ]:\n res_err[:, index[\"ii\"], index[\"jj\"]] = self.dataframe.loc[\n self.dataframe.station == self.station, key\n ]\n elif key in [\n \"res_xx_model_error\",\n \"res_xy_model_error\",\n \"res_yx_model_error\",\n \"res_yy_model_error\",\n ]:\n res_model_err[\n :, index[\"ii\"], index[\"jj\"]\n ] = self.dataframe.loc[\n self.dataframe.station == self.station, key\n ]\n\n ### Phase\n elif key in [\"phase_xx\", \"phase_xy\", \"phase_yx\", \"phase_yy\"]:\n phase[:, index[\"ii\"], index[\"jj\"]] = self.dataframe.loc[\n self.dataframe.station == self.station, key\n ]\n elif key in [\n \"phase_xx_error\",\n \"phase_xy_error\",\n \"phase_yx_error\",\n \"phase_yy_error\",\n ]:\n phase_err[:, index[\"ii\"], index[\"jj\"]] = self.dataframe.loc[\n self.dataframe.station == self.station, key\n ]\n elif key in [\n \"phase_xx_model_error\",\n \"phase_xy_model_error\",\n \"phase_yx_model_error\",\n \"phase_yy_model_error\",\n ]:\n phase_model_err[\n :, index[\"ii\"], index[\"jj\"]\n ] = self.dataframe.loc[\n self.dataframe.station == self.station, key\n ]\n\n z_object = Z(z, z_err, self.frequency, z_model_err)\n\n if (z == 0).all():\n # only load in resistivity and phase if impedance is 0, otherwise\n # its recreated from z.\n if not (res == 0).all():\n if not (phase == 0).all():\n z_object.set_resistivity_phase(\n res,\n phase,\n self.frequency,\n res_error=res_err,\n phase_error=phase_err,\n res_model_error=res_model_err,\n phase_model_error=phase_model_err,\n )\n else:\n raise ValueError(\n \"cannot estimate Z without phase information\"\n )\n\n return z_object\n\n def to_t_object(self):\n \"\"\"\n To a tipper object\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n nf = self.period.size\n t = np.zeros((nf, 1, 2), dtype=complex)\n t_err = np.zeros((nf, 1, 2), dtype=float)\n t_model_err = np.zeros((nf, 1, 2), dtype=float)\n\n for key in self.dataframe.columns:\n index = self._get_index(key)\n if index is None:\n continue\n\n if key in [\"tzx\", \"tzy\"]:\n t[:, index[\"ii\"], index[\"jj\"]] = self.dataframe.loc[\n self.dataframe.station == self.station, key\n ]\n elif key in [\"tzx_error\", \"tzy_error\"]:\n t_err[:, index[\"ii\"], index[\"jj\"]] = self.dataframe.loc[\n self.dataframe.station == self.station, key\n ]\n elif key in [\"tzx_model_error\", \"tzy_model_error\"]:\n t_model_err[:, index[\"ii\"], index[\"jj\"]] = self.dataframe.loc[\n self.dataframe.station == self.station, key\n ]\n\n return Tipper(t, t_err, self.frequency, t_model_err)\n\n @property\n def station_locations(self):\n return (\n self.dataframe.groupby(\"station\")\n .first()[\n [\n \"survey\",\n \"latitude\",\n \"longitude\",\n \"latitude\",\n \"elevation\",\n \"datum_epsg\",\n \"east\",\n \"north\",\n \"utm_epsg\",\n \"model_east\",\n \"model_north\",\n \"model_elevation\",\n \"profile_offset\",\n ]\n ]\n .reset_index()\n )" }, { "identifier": "MTLocation", "path": "mtpy/core/mt_location.py", "snippet": "class MTLocation:\n \"\"\"\n Location for a MT site or point measurement\n\n \"\"\"\n\n def __init__(self, survey_metadata=None, **kwargs):\n\n self.logger = logger\n if survey_metadata is None:\n self._survey_metadata = self._initiate_metadata()\n else:\n self._survey_metadata = self._validate_metadata(survey_metadata)\n\n self._east = 0\n self._north = 0\n self._datum_crs = CRS.from_epsg(4326)\n self._utm_crs = None\n self._geoid_crs = None\n self.model_east = 0\n self.model_north = 0\n self.model_elevation = 0\n self.profile_offset = 0\n\n self._key_attrs = [\n \"latitude\",\n \"longitude\",\n \"elevation\",\n \"east\",\n \"north\",\n \"model_east\",\n \"model_north\",\n \"model_elevation\",\n \"datum_crs\",\n \"utm_crs\",\n \"datum_epsg\",\n \"utm_epsg\",\n \"profile_offset\",\n ]\n\n for key, value in kwargs.items():\n if key in self._key_attrs:\n setattr(self, key, value)\n\n if self.east != 0 and self.north != None:\n if self.utm_crs is None:\n raise ValueError(\n \"Need to input UTM CRS if only setting east and north\"\n )\n\n def _initiate_metadata(self):\n survey_metadata = Survey(id=0)\n survey_metadata.add_station(Station(id=0))\n survey_metadata.stations[0].add_run(Run(id=0))\n\n return survey_metadata\n\n def _validate_metadata(self, survey_metadata):\n if not isinstance(survey_metadata, Survey):\n raise TypeError(\n \"Input metadata must be type \"\n \"mt_metadata.transfer_functions.tf.Survey, \"\n f\"not {type(survey_metadata)}.\"\n )\n if len(survey_metadata.stations) < 1:\n survey_metadata.add_station(Station(id=0))\n\n if len(survey_metadata.stations[0].runs) < 1:\n survey_metadata.stations[0].add_run(Run(id=0))\n\n return survey_metadata\n\n def __str__(self):\n lines = [\"MT Location: \", \"-\" * 20]\n lines.append(f\" Latitude (deg): {self.latitude:.6f}\")\n lines.append(f\" Longitude (deg): {self.longitude:.6f}\")\n lines.append(f\" Elevation (m): {self.elevation:.4f}\")\n lines.append(f\" Datum crs: {self.datum_crs}\")\n lines.append(\"\")\n lines.append(f\" Easting (m): {self.east:.3f}\")\n lines.append(f\" Northing (m): {self.north:.3f}\")\n lines.append(f\" UTM crs: {self.utm_crs}\")\n lines.append(\"\")\n lines.append(f\" Model Easting (m): {self.model_east:.3f}\")\n lines.append(f\" Model Northing (m): {self.model_north:.3f}\")\n lines.append(f\" Model Elevation (m): {self.model_elevation:.3f}\")\n lines.append(f\" Profile Offset (m): {self.profile_offset:.3f}\")\n\n return \"\\n\".join(lines)\n\n def __repr__(self):\n return self.__str__()\n\n def __eq__(self, other):\n \"\"\"\n equals\n :param other: DESCRIPTION\n :type other: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if not isinstance(other, MTLocation):\n raise TypeError(f\"Can not compare MTLocation with {type(other)}\")\n\n for key in self._key_attrs:\n og_value = getattr(self, key)\n other_value = getattr(other, key)\n\n if isinstance(og_value, float):\n if not np.isclose(og_value, other_value):\n self.logger.info(\n f\"{key} not equal {og_value} != {other_value}\"\n )\n return False\n else:\n if not og_value == other_value:\n self.logger.info(\n f\"{key} not equal {og_value} != {other_value}\"\n )\n return False\n return True\n\n def copy(self):\n copied = type(self)()\n copied._survey_metadata = self._survey_metadata.copy()\n # not sure why this is needed, survey metadata copies fine, but here\n # it does not.\n if len(copied._survey_metadata.stations) == 0:\n copied._survey_metadata.add_station(\n self._survey_metadata.stations[0]\n )\n for key in self._key_attrs:\n setattr(copied, key, deepcopy(getattr(self, key)))\n\n return copied\n\n @property\n def datum_crs(self):\n if self._datum_crs is not None:\n return self._datum_crs\n\n @property\n def datum_name(self):\n if self._datum_crs is not None:\n return self._datum_crs.name\n\n @property\n def datum_epsg(self):\n if self._datum_crs is not None:\n return self._datum_crs.to_epsg()\n\n @datum_epsg.setter\n def datum_epsg(self, value):\n if value not in [\"\", None, \"None\"]:\n self.datum_crs = value\n\n @datum_crs.setter\n def datum_crs(self, value):\n if value in [None, \"None\", \"none\", \"null\", \"\"]:\n return\n\n new_crs = CRS.from_user_input(value)\n\n if new_crs != self._datum_crs:\n if (\n self._datum_crs is not None\n and self.latitude != 0\n and self.longitude != 0\n ):\n (\n self._survey_metadata.stations[0].location.longitude,\n self._survey_metadata.stations[0].location.latitude,\n ) = project_point(\n self.longitude, self.latitude, self._datum_crs, new_crs\n )\n\n self._east, self._north = project_point(\n self.longitude, self.latitude, new_crs, self.utm_crs\n )\n\n elif (\n self.datum_crs is not None\n and self.east != 0\n and self.north != 0\n and self.latitude == 0\n and self.longitude == 0\n ):\n (\n self._survey_metadata.stations[0].location.longitude,\n self._survey_metadata.stations[0].location.latitude,\n ) = project_point(\n self.east,\n self.north,\n self.utm_crs,\n new_crs,\n )\n self._datum_crs = new_crs\n\n @property\n def utm_crs(self):\n if self._utm_crs is not None:\n return self._utm_crs\n\n @property\n def utm_name(self):\n if self._utm_crs is not None:\n return self._utm_crs.name\n\n @property\n def utm_epsg(self):\n if self._utm_crs is not None:\n return self._utm_crs.to_epsg()\n\n @utm_epsg.setter\n def utm_epsg(self, value):\n if value not in [\"\", None, \"None\"]:\n self.utm_crs = value\n\n @property\n def utm_zone(self):\n if self._utm_crs is not None:\n return self._utm_crs.utm_zone\n\n @utm_crs.setter\n def utm_crs(self, value):\n if value in [None, \"None\", \"none\", \"null\", \"\"]:\n return\n\n new_crs = CRS.from_user_input(value)\n if value != self._utm_crs:\n # reproject easting, northing to new zone\n if (\n self._utm_crs is not None\n and self.east != 0\n and self.north != 0\n ):\n self._east, self._north = project_point(\n self.east, self.north, self._utm_crs, new_crs\n )\n\n if (\n self.datum_crs is not None\n and self.east != 0\n and self.north != 0\n ):\n # reproject lat and lon base on new UTM datum\n (\n self._survey_metadata.stations[0].location.longitude,\n self._survey_metadata.stations[0].location.latitude,\n ) = project_point(\n self.east,\n self.north,\n new_crs,\n self.datum_crs,\n )\n\n # if east and north == 0 and lat and lon != 0 project to utm\n elif (\n self.datum_crs is not None\n and self.east == 0\n and self.north == 0\n and self.latitude != 0\n and self.longitude != 0\n ):\n self._east, self._north = project_point(\n self.longitude,\n self.latitude,\n self.datum_crs,\n new_crs,\n )\n\n self._utm_crs = new_crs\n\n @property\n def east(self):\n \"\"\"easting\"\"\"\n return self._east\n\n @east.setter\n def east(self, value):\n \"\"\"set east\"\"\"\n self._east = value\n if (\n self.datum_crs is not None\n and self.utm_crs is not None\n and self._north != 0\n ):\n (\n self._survey_metadata.stations[0].location.longitude,\n self._survey_metadata.stations[0].location.latitude,\n ) = project_point(\n self._east, self._north, self.utm_crs, self.datum_crs\n )\n\n @property\n def north(self):\n \"\"\"northing\"\"\"\n return self._north\n\n @north.setter\n def north(self, value):\n \"\"\"set north\"\"\"\n self._north = value\n if (\n self.datum_crs is not None\n and self.utm_crs is not None\n and self._east != 0\n ):\n (\n self._survey_metadata.stations[0].location.longitude,\n self._survey_metadata.stations[0].location.latitude,\n ) = project_point(\n self._east, self._north, self.utm_crs, self.datum_crs\n )\n\n @property\n def latitude(self):\n return self._survey_metadata.stations[0].location.latitude\n\n @latitude.setter\n def latitude(self, lat):\n self._survey_metadata.stations[0].location.latitude = lat\n if (\n self.utm_crs is not None\n and self.datum_crs is not None\n and self._survey_metadata.stations[0].location.longitude != 0\n ):\n self._east, self._north = project_point(\n self._survey_metadata.stations[0].location.longitude,\n self._survey_metadata.stations[0].location.latitude,\n self.datum_crs,\n self.utm_crs,\n )\n\n @property\n def longitude(self):\n return self._survey_metadata.stations[0].location.longitude\n\n @longitude.setter\n def longitude(self, lon):\n self._survey_metadata.stations[0].location.longitude = lon\n if (\n self.utm_crs is not None\n and self.datum_crs is not None\n and self._survey_metadata.stations[0].location.latitude != 0\n ):\n self._east, self._north = project_point(\n self._survey_metadata.stations[0].location.longitude,\n self._survey_metadata.stations[0].location.latitude,\n self.datum_crs,\n self.utm_crs,\n )\n\n @property\n def elevation(self):\n return self._survey_metadata.stations[0].location.elevation\n\n @elevation.setter\n def elevation(self, elev):\n self._survey_metadata.stations[0].location.elevation = elev\n\n @property\n def model_east(self):\n return self._model_east\n\n @model_east.setter\n def model_east(self, value):\n try:\n self._model_east = float(value)\n except (TypeError, ValueError):\n raise ValueError(f\"Input should be a float not type {type(value)}\")\n\n @property\n def model_north(self):\n return self._model_north\n\n @model_north.setter\n def model_north(self, value):\n try:\n self._model_north = float(value)\n except (TypeError, ValueError):\n raise ValueError(f\"Input should be a float not type {type(value)}\")\n\n @property\n def model_elevation(self):\n return self._model_elevation\n\n @model_elevation.setter\n def model_elevation(self, value):\n try:\n self._model_elevation = float(value)\n except (TypeError, ValueError):\n raise ValueError(f\"Input should be a float not type {type(value)}\")\n\n def compute_model_location(self, center_location):\n \"\"\"\n compute model location based on model center and model epsg\n\n :param model_center: DESCRIPTION\n :type model_center: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n self.model_east = self.east - center_location.model_east\n self.model_north = self.north - center_location.model_north\n self.model_elevation = self.elevation - center_location.model_elevation\n\n def project_onto_profile_line(self, profile_slope, profile_intersection):\n \"\"\"\n\n :param profile_slope: DESCRIPTION\n :type profile_slope: TYPE\n :param profile_intersection: DESCRIPTION\n :type profile_intersection: TYPE\n :param units: DESCRIPTION, defaults to \"deg\"\n :type units: TYPE, optional\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if self.utm_crs is None:\n raise ValueError(\n \"utm_crs is None, cannot project onto profile line.\"\n )\n\n profile_vector = np.array([1, profile_slope], dtype=float)\n profile_vector /= np.linalg.norm(profile_vector)\n\n station_vector = np.array(\n [self.east, self.north - profile_intersection]\n )\n\n self.profile_offset = np.linalg.norm(\n np.dot(profile_vector, station_vector) * profile_vector\n )\n\n def get_elevation_from_national_map(self):\n \"\"\"\n Get elevation from DEM data of the US National Map. Plan to extend\n this to the globe.\n\n Pulls data from the USGS national map DEM\n\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n elev = get_nm_elev(self.latitude, self.longitude)\n if elev != 0:\n self.elevation = elev\n else:\n self.logger.warning(\n \"Could not get elevation data, not setting elevation\"\n )" }, { "identifier": "ModelErrors", "path": "mtpy/modeling/errors.py", "snippet": "class ModelErrors:\n def __init__(self, data=None, measurement_error=None, **kwargs):\n\n self._functions = {\n \"egbert\": self.compute_geometric_mean_error,\n \"geometric_mean\": self.compute_geometric_mean_error,\n \"arithmetic_mean\": self.compute_arithmetic_mean_error,\n \"row\": self.compute_row_error,\n \"mean_od\": self.compute_arithmetic_mean_error,\n \"median\": self.compute_median_error,\n \"eigen\": self.compute_eigen_value_error,\n \"percent\": self.compute_percent_error,\n \"absolute\": self.compute_absolute_error,\n \"abs\": self.compute_absolute_error,\n \"data\": self.use_measurement_error,\n }\n\n self._array_shapes = {\n \"impedance\": (2, 2),\n \"z\": (2, 2),\n \"transfer_function\": (3, 2),\n \"tipper\": (1, 2),\n \"t\": (1, 2),\n }\n\n self.error_value = 5\n self.error_type = \"percent\"\n self.floor = True\n self.mode = \"impedance\"\n\n for key, value in kwargs.items():\n setattr(self, key, value)\n\n self.data = data\n self.measurement_error = measurement_error\n\n def __str__(self):\n lines = [\"Model Errors:\", \"-\" * 20]\n lines += [f\"\\terror_type: {self.error_type}\"]\n lines += [f\"\\terror_value: {self.error_value}\"]\n lines += [f\"\\tfloor: {self.floor}\"]\n lines += [f\"\\tmode: {self.mode}\"]\n\n return \"\\n\".join(lines)\n\n def __repr__(self):\n return self.__str__()\n\n def __eq__(self, other):\n if not isinstance(other, ModelErrors):\n raise TypeError(\n f\"Cannot compare ModelErrors to type {type(other)}\"\n )\n\n for key in [\"error_value\", \"error_type\", \"floor\", \"mode\"]:\n value_og = getattr(self, key)\n value_other = getattr(other, key)\n\n if value_og != value_other:\n return False\n return True\n\n def validate_percent(self, value):\n \"\"\"\n Make sure the percent is a decimal\n\n :param value: DESCRIPTION\n :type value: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if value >= 1:\n value /= 100.0\n\n return value\n\n @property\n def error_parameters(self):\n return {\n \"error_value\": self.error_value,\n \"error_type\": self.error_type,\n \"floor\": self.floor,\n }\n\n @property\n def error_type(self):\n return self._error_type\n\n @error_type.setter\n def error_type(self, value):\n if value not in self._functions.keys():\n raise NotImplementedError(f\"Error Type {value} not supported.\")\n self._error_type = value\n\n @property\n def floor(self):\n return self._floor\n\n @floor.setter\n def floor(self, value):\n if value not in [False, True]:\n raise ValueError(\"Floor must be True or False\")\n self._floor = value\n\n @property\n def error_value(self):\n return self._error_value\n\n @error_value.setter\n def error_value(self, value):\n self._error_value = self.validate_percent(value)\n\n @property\n def mode(self):\n return self._mode\n\n @mode.setter\n def mode(self, value):\n if value not in self._array_shapes.keys():\n raise NotImplementedError(f\"Mode {value} not supported.\")\n self._mode = value\n\n def _get_shape(self):\n try:\n return self._array_shapes[self.mode]\n\n except KeyError:\n raise NotImplementedError(f\"Mode {self.mode} not supported.\")\n\n def validate_array_shape(self, data):\n \"\"\"\n\n :param data: DESCRIPTION\n :type data: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if not isinstance(data, np.ndarray):\n data = np.array(data)\n\n expected_shape = self._get_shape()\n if data.shape == expected_shape:\n data = data.reshape((1, expected_shape[0], expected_shape[1]))\n\n if (\n data.shape[1] != expected_shape[0]\n or data.shape[2] != expected_shape[1]\n ):\n raise ValueError(\n f\"Shape {data.shape} is not expected shape of (n, \"\n f\"{expected_shape[0]}, {expected_shape[1]})\"\n )\n\n return data\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, value):\n if value is not None:\n self._data = self.validate_array_shape(value)\n else:\n self._data = None\n\n @property\n def measurement_error(self):\n return self._measurement_error\n\n @measurement_error.setter\n def measurement_error(self, value):\n if value is not None:\n self._measurement_error = self.validate_array_shape(value)\n else:\n self._measurement_error = None\n\n def mask_zeros(self, data):\n \"\"\"\n mask zeros\n\n :param data: DESCRIPTION\n :type data: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n dshape = data.shape\n data = np.nan_to_num(data).reshape(dshape)\n return np.ma.masked_equal(data, 0)\n\n def resize_output(self, error_array):\n \"\"\"\n resize the error estimtion to the same size as the input data\n\n :param error_array: DESCRIPTION\n :type error_array: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if error_array.shape != self.data.shape:\n if error_array.shape[0] == self.data.shape[0]:\n err = np.zeros_like(self.data, dtype=float)\n for index in range(self.data.shape[0]):\n err[index] = error_array[index]\n return err\n\n return error_array\n\n def set_floor(self, error_array):\n \"\"\"\n Set error floor\n\n :param array: DESCRIPTION\n :type data: TYPE\n :param floor: DESCRIPTION\n :type floor: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if self.measurement_error is not None:\n\n index = np.where(error_array < self.measurement_error)\n error_array[index] = self.measurement_error[index]\n\n return error_array\n\n def use_measurement_error(self):\n return self.measurement_error\n\n def compute_percent_error(self):\n \"\"\"\n Percent error\n\n :param data: DESCRIPTION\n :type data: TYPE\n :param percent: DESCRIPTION\n :type percent: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n err = self.error_value * np.abs(self.data)\n if self.floor:\n err = self.set_floor(err)\n\n return err\n\n def compute_arithmetic_mean_error(self):\n \"\"\"\n error_value * (Zxy + Zyx) / 2\n\n\n :param data: DESCRIPTION\n :type data: TYPE\n :param error_value: DESCRIPTION\n :type error_value: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if self.data.shape[1] < 2:\n od = self.mask_zeros(\n np.array([self.data[:, 0, 0], self.data[:, 0, 1]])\n )\n\n else:\n od = self.mask_zeros(\n np.array([self.data[:, 0, 1], self.data[:, 1, 0]])\n )\n err = self.resize_output(\n self.error_value * np.ma.mean(np.ma.abs(od), axis=0)\n )\n\n if self.floor:\n err = self.set_floor(err)\n\n if isinstance(err, np.ma.core.MaskedArray):\n return err.data\n\n return err\n\n def compute_median_error(self):\n \"\"\"\n median(array) * error_value\n\n :param array: DESCRIPTION\n :type array: TYPE\n :param error_value: DESCRIPTION\n :type error_value: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n data = self.mask_zeros(self.data)\n err = self.resize_output(\n np.abs(np.ma.median(data, axis=(1, 2))) * self.error_value\n )\n\n if self.floor:\n err = self.set_floor(err)\n\n if isinstance(err, np.ma.core.MaskedArray):\n return err.data\n\n return err\n\n def compute_eigen_value_error(self):\n \"\"\"\n error_value * eigen(data).mean()\n\n :param data: DESCRIPTION\n :type data: TYPE\n :param error_value: DESCRIPTION\n :type error_value: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if self.data.shape[1] < 2:\n raise IndexError(\n \"Cannot compute eigen value error with an array of shape \"\n f\"{self.data.shape}\"\n )\n\n data = self.mask_zeros(self.data)\n\n try:\n err = self.error_value * np.abs(np.linalg.eigvals(data)).mean(\n axis=1\n )\n except Exception:\n data_shape = data.shape\n err = (\n self.error_value\n * np.abs(\n np.linalg.eigvals(np.nan_to_num(data).reshape(data_shape))\n ).mean()\n )\n\n if np.atleast_1d(err).sum(axis=0) == 0:\n err = self.error_value * data[np.nonzero(data)].mean()\n\n err = self.resize_output(err)\n\n if self.floor:\n err = self.set_floor(err)\n return err\n\n def compute_geometric_mean_error(self):\n \"\"\"\n error_value * sqrt(Zxy * Zyx)\n\n :param data: DESCRIPTION\n :type data: TYPE\n :param error_value: DESCRIPTION\n :type error_value: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n data = self.data.copy()\n\n if self.data.shape[1] < 2:\n zero_xy = np.where(data[:, 0, 0] == 0)\n data[zero_xy, 0, 0] = data[zero_xy, 0, 0]\n\n zero_yx = np.where(data[:, 0, 1] == 0)\n data[zero_yx, 0, 1] = data[zero_yx, 0, 1]\n\n data = self.mask_zeros(data)\n\n err = self.resize_output(\n self.error_value\n * np.ma.sqrt(np.ma.abs(data[:, 0, 0] * data[:, 0, 1]))\n )\n\n else:\n zero_xy = np.where(data[:, 0, 1] == 0)\n data[zero_xy, 0, 1] = data[zero_xy, 1, 0]\n\n zero_yx = np.where(data[:, 1, 0] == 0)\n data[zero_yx, 1, 0] = data[zero_yx, 0, 1]\n\n data = self.mask_zeros(data)\n\n err = self.resize_output(\n self.error_value\n * np.ma.sqrt(np.ma.abs(data[:, 0, 1] * data[:, 1, 0]))\n )\n\n if self.floor:\n err = self.set_floor(err)\n\n if isinstance(err, np.ma.core.MaskedArray):\n return err.data\n\n return err\n\n def compute_row_error(self):\n \"\"\"\n set zxx and zxy the same error and zyy and zyx the same error\n\n :param data: DESCRIPTION\n :type data: TYPE\n :param error_value: DESCRIPTION\n :type error_value: TYPE\n :param floor: DESCRIPTION, defaults to True\n :type floor: TYPE, optional\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if self.data.shape[1] < 2:\n err_xy = np.abs(self.data[:, 0, 0]) * self.error_value\n err_yx = np.abs(self.data[:, 0, 1]) * self.error_value\n\n err = np.zeros_like(self.data, dtype=float)\n err[:, 0, 0] = err_xy\n err[:, 0, 1] = err_yx\n\n else:\n err_xy = np.abs(self.data[:, 0, 1]) * self.error_value\n err_yx = np.abs(self.data[:, 1, 0]) * self.error_value\n\n err = np.zeros_like(self.data, dtype=float)\n err[:, 0, 0] = err_xy\n err[:, 0, 1] = err_xy\n err[:, 1, 0] = err_yx\n err[:, 1, 1] = err_yx\n\n err = self.resize_output(err)\n if self.floor:\n err = self.set_floor(err)\n\n return err\n\n def compute_absolute_error(self):\n \"\"\"\n\n :param data: DESCRIPTION\n :type data: TYPE\n :param error_value: DESCRIPTION\n :type error_value: TYPE\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n err = np.ones_like(self.data, dtype=float) * self.error_value\n\n if self.floor:\n err = self.set_floor(err)\n return err\n\n def compute_error(\n self, data=None, error_type=None, error_value=None, floor=None\n ):\n \"\"\"\n\n :param data: DESCRIPTION, defaults to None\n :type data: TYPE, optional\n :param error_type: DESCRIPTION, defaults to None\n :type error_type: TYPE, optional\n :param error_value: DESCRIPTION, defaults to None\n :type error_value: TYPE, optional\n :param floor: DESCRIPTION, defaults to None\n :type floor: TYPE, optional\n :return: DESCRIPTION\n :rtype: TYPE\n\n \"\"\"\n\n if data is not None:\n self.data = data\n if error_type is not None:\n self.error_type = error_type\n if error_value is not None:\n self.error_value = error_value\n if floor is not None:\n self.floor = floor\n\n return self._functions[self.error_type]()" } ]
import numpy as np import pandas as pd from pathlib import Path from loguru import logger from mtpy.core.mt_dataframe import MTDataFrame from mtpy.core.mt_location import MTLocation from mtpy.modeling.errors import ModelErrors
15,848
error_value_z percentage to multiply Z by to set error *default* is 5 for 5% of Z as error A 2x2 numpy array of values can be specified to explicitly set the error_value_z for each component. error_value_tipper absolute error between 0 and 1. fn_basename basename of data file. *default* is 'ModEM_Data.dat' formatting ['1' | '2'], format of the output data file, *default* is '1' header_strings strings for header of data file following the format outlined in the ModEM documentation inv_comp_dict dictionary of inversion components inv_mode inversion mode, options are: *default* is '1' * '1' --> for 'Full_Impedance' and 'Full_Vertical_Components' * '2' --> 'Full_Impedance' * '3' --> 'Off_Diagonal_Impedance' and 'Full_Vertical_Components' * '4' --> 'Off_Diagonal_Impedance' * '5' --> 'Full_Vertical_Components' * '6' --> 'Full_Interstation_TF' * '7' --> 'Off_Diagonal_Rho_Phase' inv_mode_dict dictionary for inversion modes max_num_periods maximum number of periods model_epsg epsg code for model projection, provide this to project model to non-utm coordinates. Find the epsg code for your projection on http://spatialreference.org/ref/ or google search epsg "your projection" model_utm_zone alternative to model_epsg, choose a utm zone to project all sites to (e.g. '55S') mt_dict dictionary of mtpy.core.mt.MT objects with keys being station names period_buffer float or int if specified, apply a buffer so that interpolation doesn't stretch too far over periods period_dict dictionary of period index for period_list period_list list of periods to invert for period_max maximum value of period to invert for period_min minimum value of period to invert for period_buffer buffer so that interpolation doesn't stretch too far over periods. Provide a float or integer factor, greater than which interpolation will not stretch. e.g. 1.5 means only interpolate to a maximum of 1.5 times each side of each frequency value rotate_angle Angle to rotate data to assuming 0 is N and E is 90 save_path path to save data file to units [ [V/m]/[T] | [mV/km]/[nT] | Ohm ] units of Z *default* is [mV/km]/[nT] wave_sign_impedance [ + | - ] sign of time dependent wave. *default* is '+' as positive downwards. wave_sign_tipper [ + | - ] sign of time dependent wave. *default* is '+' as positive downwards. ====================== ==================================================== :Example 1 --> create inversion period list: :: >>> from pathlib import Path >>> import mtpy.modeling.modem as modem >>> edi_path = Path(r"/home/mt/edi_files") >>> edi_list = list(edi_path.glob("*.edi")) >>> md = modem.Data(edi_list, period_min=.1, period_max=300,\ >>> ... max_num_periods=12) >>> md.write_data_file(save_path=r"/home/modem/inv1") >>> md :Example 2 --> set inverions period list from data: :: >>> md = modem.Data(edi_list) >>> #get period list from an .edi file >>> inv_period_list = 1./md.mt_dict["mt01"].Z.freq >>> #invert for every third period in inv_period_list >>> inv_period_list = inv_period_list[np.arange(0, len(inv_period_list, 3))] >>> md.period_list = inv_period_list >>> md.write_data_file(save_path=r"/home/modem/inv1") :Example 3 --> change error values: :: >>> mdr.error_type = 'floor' >>> mdr.error_floor = 10 >>> mdr.error_tipper = .03 >>> mdr.write_data_file(save_path=r"/home/modem/inv2") :Example 4 --> change inversion type: :: >>> mdr.inv_mode = '3' >>> mdr.write_data_file(save_path=r"/home/modem/inv2") :Example 5 --> rotate data: :: >>> md.rotation_angle = 60 >>> md.write_data_file(save_path=r"/home/modem/Inv1") >>> # or >>> md.write_data_file(save_path=r"/home/modem/Inv1", \ rotation_angle=60) """ def __init__(self, dataframe=None, center_point=None, **kwargs): self.logger = logger self.dataframe = dataframe if center_point is None: self.center_point = MTLocation() else: self.center_point = center_point self.wave_sign_impedance = "+" self.wave_sign_tipper = "+" self.z_units = "[mV/km]/[nT]" self.t_units = "" self.inv_mode = "1" self.formatting = "1" self.rotation_angle = 0
""" ================== ModEM ================== # Generate files for ModEM # revised by JP 2017 # revised by AK 2017 to bring across functionality from ak branch # revised by JP 2021 adding functionality and updating. # revised by JP 2022 to work with new structure of a central object """ # ============================================================================= # Imports # ============================================================================= # ============================================================================= class Data: """ Data will read and write .dat files for ModEM and convert a WS data file to ModEM format. ..note: :: the data is interpolated onto the given periods such that all stations invert for the same periods. The interpolation is a linear interpolation of each of the real and imaginary parts of the impedance tensor and induction tensor. See mtpy.core.mt.MT.interpolate for more details :param edi_list: list of edi files to read ====================== ==================================================== Attributes Description ====================== ==================================================== _dtype internal variable defining the data type of data_array _logger python logging object that put messages in logging format defined in logging configure file, see MtPyLog for more information _t_shape internal variable defining shape of tipper array in _dtype _z_shape internal variable defining shape of Z array in _dtype center_position (east, north, evel) for center point of station array. All stations are relative to this location for plotting purposes. comp_index_dict dictionary for index values of component of Z and T station_locations Stations object data_array numpy.ndarray (num_stations) structured to store data. keys are: * station --> station name * lat --> latitude in decimal degrees * lon --> longitude in decimal degrees * elev --> elevation (m) * rel_east -- > relative east location to center_position (m) * rel_north --> relative north location to center_position (m) * east --> UTM east (m) * north --> UTM north (m) * zone --> UTM zone * z --> impedance tensor array with shape (num_freq, 2, 2) * z_err --> impedance tensor error array with shape (num_freq, 2, 2) * tip --> Tipper array with shape (num_freq, 1, 2) * tipperr --> Tipper array with shape (num_freq, 1, 2) data_fn full path to data file data_period_list period list from all the data edi_list list of full paths to edi files error_type_tipper [ 'abs' | 'floor' ] *default* is 'abs' error_type_z [ 'egbert' | 'mean_od' | 'eigen' | 'median'] *default* is 'egbert_floor' * add '_floor' to any of the above to set the error as an error floor, otherwise all components are give weighted the same * 'egbert' sets error to error_value_z * sqrt(abs(zxy*zyx)) * 'mean_od' sets error to error_value_z * mean([Zxy, Zyx]) (non zeros) * 'eigen' sets error to error_value_z * eigenvalues(Z[ii]) * 'median' sets error to error_value_z * median([Zxx, Zxy, Zyx, Zyy]) (non zeros) A 2x2 numpy array of error_type_z can be specified to explicitly set the error_type_z for each component. error_value_z percentage to multiply Z by to set error *default* is 5 for 5% of Z as error A 2x2 numpy array of values can be specified to explicitly set the error_value_z for each component. error_value_tipper absolute error between 0 and 1. fn_basename basename of data file. *default* is 'ModEM_Data.dat' formatting ['1' | '2'], format of the output data file, *default* is '1' header_strings strings for header of data file following the format outlined in the ModEM documentation inv_comp_dict dictionary of inversion components inv_mode inversion mode, options are: *default* is '1' * '1' --> for 'Full_Impedance' and 'Full_Vertical_Components' * '2' --> 'Full_Impedance' * '3' --> 'Off_Diagonal_Impedance' and 'Full_Vertical_Components' * '4' --> 'Off_Diagonal_Impedance' * '5' --> 'Full_Vertical_Components' * '6' --> 'Full_Interstation_TF' * '7' --> 'Off_Diagonal_Rho_Phase' inv_mode_dict dictionary for inversion modes max_num_periods maximum number of periods model_epsg epsg code for model projection, provide this to project model to non-utm coordinates. Find the epsg code for your projection on http://spatialreference.org/ref/ or google search epsg "your projection" model_utm_zone alternative to model_epsg, choose a utm zone to project all sites to (e.g. '55S') mt_dict dictionary of mtpy.core.mt.MT objects with keys being station names period_buffer float or int if specified, apply a buffer so that interpolation doesn't stretch too far over periods period_dict dictionary of period index for period_list period_list list of periods to invert for period_max maximum value of period to invert for period_min minimum value of period to invert for period_buffer buffer so that interpolation doesn't stretch too far over periods. Provide a float or integer factor, greater than which interpolation will not stretch. e.g. 1.5 means only interpolate to a maximum of 1.5 times each side of each frequency value rotate_angle Angle to rotate data to assuming 0 is N and E is 90 save_path path to save data file to units [ [V/m]/[T] | [mV/km]/[nT] | Ohm ] units of Z *default* is [mV/km]/[nT] wave_sign_impedance [ + | - ] sign of time dependent wave. *default* is '+' as positive downwards. wave_sign_tipper [ + | - ] sign of time dependent wave. *default* is '+' as positive downwards. ====================== ==================================================== :Example 1 --> create inversion period list: :: >>> from pathlib import Path >>> import mtpy.modeling.modem as modem >>> edi_path = Path(r"/home/mt/edi_files") >>> edi_list = list(edi_path.glob("*.edi")) >>> md = modem.Data(edi_list, period_min=.1, period_max=300,\ >>> ... max_num_periods=12) >>> md.write_data_file(save_path=r"/home/modem/inv1") >>> md :Example 2 --> set inverions period list from data: :: >>> md = modem.Data(edi_list) >>> #get period list from an .edi file >>> inv_period_list = 1./md.mt_dict["mt01"].Z.freq >>> #invert for every third period in inv_period_list >>> inv_period_list = inv_period_list[np.arange(0, len(inv_period_list, 3))] >>> md.period_list = inv_period_list >>> md.write_data_file(save_path=r"/home/modem/inv1") :Example 3 --> change error values: :: >>> mdr.error_type = 'floor' >>> mdr.error_floor = 10 >>> mdr.error_tipper = .03 >>> mdr.write_data_file(save_path=r"/home/modem/inv2") :Example 4 --> change inversion type: :: >>> mdr.inv_mode = '3' >>> mdr.write_data_file(save_path=r"/home/modem/inv2") :Example 5 --> rotate data: :: >>> md.rotation_angle = 60 >>> md.write_data_file(save_path=r"/home/modem/Inv1") >>> # or >>> md.write_data_file(save_path=r"/home/modem/Inv1", \ rotation_angle=60) """ def __init__(self, dataframe=None, center_point=None, **kwargs): self.logger = logger self.dataframe = dataframe if center_point is None: self.center_point = MTLocation() else: self.center_point = center_point self.wave_sign_impedance = "+" self.wave_sign_tipper = "+" self.z_units = "[mV/km]/[nT]" self.t_units = "" self.inv_mode = "1" self.formatting = "1" self.rotation_angle = 0
self.z_model_error = ModelErrors(
2
2023-10-11 22:24:50+00:00
24k
weavel-ai/promptmodel-python
promptmodel/llms/llm_proxy.py
[ { "identifier": "LLM", "path": "promptmodel/llms/llm.py", "snippet": "class LLM:\n def __init__(self):\n pass\n\n @classmethod\n def __parse_output_pattern__(\n cls,\n raw_output: Optional[str] = None,\n parsing_type: Optional[ParsingType] = None,\n ) -> ParseResult:\n if parsing_type is None:\n return ParseResult(parsed_outputs={}, error=False, error_log=None)\n if raw_output is None:\n return ParseResult(parsed_outputs={}, error=True, error_log=\"No content\")\n parsing_pattern = get_pattern_by_type(parsing_type)\n whole_pattern = parsing_pattern[\"whole\"]\n parsed_results = re.findall(whole_pattern, raw_output, flags=re.DOTALL)\n parsed_outputs = {}\n error: bool = False\n error_log: str = None\n\n try:\n for parsed_result in parsed_results:\n key = parsed_result[0]\n type_str = parsed_result[1]\n value = convert_str_to_type(parsed_result[2], type_str)\n parsed_outputs[key] = value\n except Exception as e:\n error = True\n error_log = str(e)\n\n return ParseResult(\n parsed_outputs=parsed_outputs,\n error=error,\n error_log=error_log,\n )\n\n def __validate_openai_messages(\n self, messages: List[Dict[str, str]]\n ) -> List[OpenAIMessage]:\n \"\"\"Validate and convert list of dictionaries to list of OpenAIMessage.\"\"\"\n res = []\n for message in messages:\n res.append(OpenAIMessage(**message))\n return res\n\n def run(\n self,\n messages: List[Dict[str, str]],\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n *args,\n **kwargs,\n ) -> LLMResponse:\n \"\"\"Return the response from openai chat completion.\"\"\"\n response = None\n if functions == []:\n functions = None\n try:\n response: ModelResponse = completion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n content: Optional[str] = getattr(\n response.choices[0].message, \"content\", None\n )\n\n call_func: Optional[FunctionCall] = getattr(\n response.choices[0].message, \"function_call\", None\n )\n\n call_tools: Optional[List[ChatCompletionMessageToolCall]] = getattr(\n response.choices[0].message, \"tool_calls\", None\n )\n\n return LLMResponse(\n api_response=response,\n raw_output=content,\n function_call=call_func if call_func else None,\n tool_calls=call_tools if call_tools else None,\n )\n except Exception as e:\n if response is not None:\n return LLMResponse(api_response=response, error=True, error_log=str(e))\n else:\n return LLMResponse(api_response=None, error=True, error_log=str(e))\n\n async def arun(\n self,\n messages: List[Dict[str, str]],\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n *args,\n **kwargs,\n ) -> LLMResponse:\n \"\"\"Return the response from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n response: ModelResponse = await acompletion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n content: Optional[str] = getattr(\n response.choices[0].message, \"content\", None\n )\n\n call_func: Optional[FunctionCall] = getattr(\n response.choices[0].message, \"function_call\", None\n )\n\n call_tools: Optional[ChatCompletionMessageToolCall] = getattr(\n response.choices[0].message, \"tool_calls\", None\n )\n\n return LLMResponse(\n api_response=response,\n raw_output=content,\n function_call=call_func if call_func else None,\n tool_calls=call_tools if call_tools else None,\n )\n\n except Exception as e:\n if response is not None:\n return LLMResponse(api_response=response, error=True, error_log=str(e))\n else:\n return LLMResponse(api_response=None, error=True, error_log=str(e))\n\n def stream(\n self,\n messages: List[Dict[str, str]], # input\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n *args,\n **kwargs,\n ) -> Generator[LLMStreamResponse, None, None]:\n \"\"\"Stream openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n # load_prompt()\n start_time = datetime.datetime.now()\n response = completion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n stream=True,\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n for chunk in self.__llm_stream_response_generator__(\n messages, response, start_time, functions, tools\n ):\n yield chunk\n except Exception as e:\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def astream(\n self,\n messages: List[Dict[str, str]],\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n *args,\n **kwargs,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n \"\"\"Parse & stream output from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n start_time = datetime.datetime.now()\n response = await acompletion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n stream=True,\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n async for chunk in self.__llm_stream_response_agenerator__(\n messages, response, start_time, functions, tools\n ):\n yield chunk\n except Exception as e:\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n def run_and_parse(\n self,\n messages: List[Dict[str, str]],\n parsing_type: Optional[ParsingType] = None,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n output_keys: Optional[List[str]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n \"\"\"Parse and return output from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n parsed_success = True\n parse_result = None\n error_log = None\n try:\n response: ModelResponse = completion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n raw_output = getattr(response.choices[0].message, \"content\", None)\n\n call_func: Optional[FunctionCall] = getattr(\n response.choices[0].message, \"function_call\", None\n )\n\n call_tools: Optional[List[ChatCompletionMessageToolCall]] = getattr(\n response.choices[0].message, \"tool_calls\", None\n )\n\n if not call_func and not call_tools:\n # function call does not appear in output\n\n parse_result: ParseResult = self.__parse_output_pattern__(\n raw_output, parsing_type\n )\n\n # if output_keys exist & parsed_outputs does not match with output_keys -> error\n # if parse_result.error -> error\n if (\n output_keys is not None\n and set(parse_result.parsed_outputs.keys()) != set(output_keys)\n ) or parse_result.error:\n parsed_success = False\n error_log = (\n \"Output keys do not match with parsed output keys\"\n if not parse_result.error_log\n else parse_result.error_log\n )\n\n return LLMResponse(\n api_response=response,\n raw_output=raw_output,\n parsed_outputs=parse_result.parsed_outputs if parse_result else None,\n function_call=call_func if call_func else None,\n tool_calls=call_tools if call_tools else None,\n error=not parsed_success,\n error_log=error_log,\n )\n except Exception as e:\n if response is not None:\n return LLMResponse(api_response=response, error=True, error_log=str(e))\n else:\n return LLMResponse(api_response=None, error=True, error_log=str(e))\n\n async def arun_and_parse(\n self,\n messages: List[Dict[str, str]],\n parsing_type: Optional[ParsingType] = None,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n output_keys: Optional[List[str]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n ) -> LLMResponse:\n \"\"\"Generate openai chat completion asynchronously, and parse the output.\n Example prompt is as follows:\n -----\n Given a topic, you are required to generate a story.\n You must follow the provided output format.\n\n Topic:\n {topic}\n\n Output format:\n [Story]\n ...\n [/Story]\n\n Now generate the output:\n \"\"\"\n if functions == []:\n functions = None\n response = None\n parsed_success = True\n parse_result = None\n error_log = None\n try:\n response: ModelResponse = await acompletion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n raw_output = getattr(response.choices[0].message, \"content\", None)\n\n call_func: Optional[FunctionCall] = getattr(\n response.choices[0].message, \"function_call\", None\n )\n\n call_tools: Optional[List[ChatCompletionMessageToolCall]] = getattr(\n response.choices[0].message, \"tool_calls\", None\n )\n\n if not call_func and not call_tools:\n # function call does not appear in output\n parse_result: ParseResult = self.__parse_output_pattern__(\n raw_output, parsing_type\n )\n\n # if output_keys exist & parsed_outputs does not match with output_keys -> error\n # if parse_result.error -> error\n if (\n output_keys is not None\n and set(parse_result.parsed_outputs.keys()) != set(output_keys)\n ) or parse_result.error:\n parsed_success = False\n error_log = (\n \"Output keys do not match with parsed output keys\"\n if not parse_result.error_log\n else parse_result.error_log\n )\n\n return LLMResponse(\n api_response=response,\n raw_output=raw_output,\n parsed_outputs=parse_result.parsed_outputs if parse_result else None,\n function_call=call_func if call_func else None,\n tool_calls=call_tools if call_tools else None,\n error=not parsed_success,\n error_log=error_log,\n )\n except Exception as e:\n if response is not None:\n return LLMResponse(api_response=response, error=True, error_log=str(e))\n else:\n return LLMResponse(api_response=None, error=True, error_log=str(e))\n\n def stream_and_parse(\n self,\n messages: List[Dict[str, str]],\n parsing_type: Optional[ParsingType] = None,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n output_keys: Optional[List[str]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n **kwargs,\n ) -> Generator[LLMStreamResponse, None, None]:\n \"\"\"Parse & stream output from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n if parsing_type == ParsingType.COLON.value:\n # cannot stream colon type\n yield LLMStreamResponse(\n error=True, error_log=\"Cannot stream colon type\"\n )\n return\n start_time = datetime.datetime.now()\n response = completion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n stream=True,\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n parsed_outputs = {}\n error_occurs = False\n error_log = None\n\n if (functions and len(functions) > 0) or (tools and len(tools) > 0):\n # if function exists, cannot parsing in stream time\n # just stream raw output and parse after stream\n streamed_outputs = {\n \"content\": \"\",\n \"function_call\": None,\n \"api_response\": None,\n }\n response_with_api_res = None\n for chunk in self.__llm_stream_response_generator__(\n messages, response, start_time, functions, tools\n ):\n if chunk.raw_output:\n streamed_outputs[\"content\"] += chunk.raw_output\n if chunk.function_call:\n streamed_outputs[\"function_call\"] = chunk.function_call\n if (\n chunk.api_response\n and getattr(chunk.api_response.choices[0], \"delta\", None)\n is None\n ): # only get the last api_response, not delta response\n streamed_outputs[\"api_response\"] = chunk.api_response\n response_with_api_res = chunk\n else:\n yield chunk\n\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n if not streamed_outputs[\"function_call\"]:\n # if function call does not exist in output\n # able to parse\n parse_result: ParseResult = self.__parse_output_pattern__(\n streamed_outputs[\"content\"], parsing_type\n )\n\n error_occurs = parse_result.error or error_occurs\n error_log = parse_result.error_log if not error_log else error_log\n\n if (\n output_keys is not None\n and set(parse_result.parsed_outputs.keys()) != set(output_keys)\n ) or error_occurs:\n error_occurs = True\n error_log = (\n \"Output keys do not match with parsed output keys\"\n if not error_log\n else error_log\n )\n yield LLMStreamResponse(\n api_response=streamed_outputs[\"api_response\"],\n error=True,\n error_log=error_log,\n )\n else:\n response_with_api_res.parsed_outputs = (\n parse_result.parsed_outputs\n )\n yield response_with_api_res\n else:\n yield response_with_api_res\n else:\n if parsing_type is None:\n for chunk in self.__llm_stream_response_generator__(\n messages, response, start_time, functions, tools\n ):\n yield chunk\n\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n elif parsing_type == ParsingType.DOUBLE_SQUARE_BRACKET.value:\n for chunk in self.__double_type_sp_generator__(\n messages, response, parsing_type, start_time, functions, tools\n ):\n yield chunk\n if chunk.parsed_outputs:\n parsed_outputs = update_dict(\n parsed_outputs, chunk.parsed_outputs\n )\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n else:\n for chunk in self.__single_type_sp_generator__(\n messages, response, parsing_type, start_time, functions, tools\n ):\n yield chunk\n if chunk.parsed_outputs:\n parsed_outputs = update_dict(\n parsed_outputs, chunk.parsed_outputs\n )\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n if (\n output_keys is not None\n and set(parsed_outputs.keys()) != set(output_keys)\n ) and not error_occurs:\n error_occurs = True\n error_log = \"Output keys do not match with parsed output keys\"\n yield LLMStreamResponse(error=True, error_log=error_log)\n\n except Exception as e:\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def astream_and_parse(\n self,\n messages: List[Dict[str, str]],\n parsing_type: Optional[ParsingType] = None,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n output_keys: Optional[List[str]] = None,\n model: Optional[str] = DEFAULT_MODEL,\n api_key: Optional[str] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n \"\"\"Parse & stream output from openai chat completion.\"\"\"\n if functions == []:\n functions = None\n response = None\n try:\n if parsing_type == ParsingType.COLON.value:\n # cannot stream colon type\n yield LLMStreamResponse(\n error=True, error_log=\"Cannot stream colon type\"\n )\n return\n start_time = datetime.datetime.now()\n response = await acompletion(\n model=model,\n messages=[\n message.model_dump(exclude_none=True)\n for message in self.__validate_openai_messages(messages)\n ],\n stream=True,\n functions=functions,\n tools=tools,\n api_key=api_key,\n )\n\n parsed_outputs = {}\n error_occurs = False # error in stream time\n error_log = None\n if (functions and len(functions) > 0) or (tools and len(tools) > 0):\n # if function exists, cannot parsing in stream time\n # just stream raw output and parse after stream\n streamed_outputs = {\n \"content\": \"\",\n \"function_call\": None,\n \"api_response\": None,\n }\n response_with_api_res = None\n async for chunk in self.__llm_stream_response_agenerator__(\n messages, response, start_time, functions, tools\n ):\n if chunk.raw_output:\n streamed_outputs[\"content\"] += chunk.raw_output\n if chunk.function_call:\n streamed_outputs[\"function_call\"] = chunk.function_call\n if (\n chunk.api_response\n and getattr(chunk.api_response.choices[0], \"delta\", None)\n is None\n ):\n streamed_outputs[\"api_response\"] = chunk.api_response\n response_with_api_res = chunk\n else:\n yield chunk\n\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n if not streamed_outputs[\"function_call\"]:\n # if function call does not exist in output\n # able to parse\n parse_result: ParseResult = self.__parse_output_pattern__(\n streamed_outputs[\"content\"], parsing_type\n )\n\n error_occurs = parse_result.error or error_occurs\n error_log = parse_result.error_log if not error_log else error_log\n if (\n output_keys is not None\n and set(parse_result.parsed_outputs.keys()) != set(output_keys)\n ) or error_occurs:\n error_occurs = True\n error_log = (\n \"Output keys do not match with parsed output keys\"\n if not error_log\n else error_log\n )\n yield LLMStreamResponse(\n api_response=streamed_outputs[\"api_response\"],\n error=True,\n error_log=error_log,\n )\n else:\n response_with_api_res.parsed_outputs = (\n parse_result.parsed_outputs\n )\n yield response_with_api_res\n else:\n yield response_with_api_res\n else:\n if parsing_type is None:\n async for chunk in self.__llm_stream_response_agenerator__(\n messages, response, start_time, functions, tools\n ):\n yield chunk\n\n if chunk.error and not error_occurs:\n error_occurs = True\n error_log = chunk.error_log\n\n elif parsing_type == ParsingType.DOUBLE_SQUARE_BRACKET.value:\n async for chunk in self.__double_type_sp_agenerator__(\n messages, response, parsing_type, start_time, functions, tools\n ):\n yield chunk\n if chunk.parsed_outputs:\n parsed_outputs = update_dict(\n parsed_outputs, chunk.parsed_outputs\n )\n if chunk.error and not error_occurs:\n error_occurs = True\n else:\n async for chunk in self.__single_type_sp_agenerator__(\n messages, response, parsing_type, start_time, functions, tools\n ):\n yield chunk\n if chunk.parsed_outputs:\n parsed_outputs = update_dict(\n parsed_outputs, chunk.parsed_outputs\n )\n if chunk.error and not error_occurs:\n error_occurs = True\n\n if (\n output_keys is not None\n and set(parsed_outputs.keys()) != set(output_keys)\n ) and not error_occurs:\n error_occurs = True\n error_log = \"Output keys do not match with parsed output keys\"\n yield LLMStreamResponse(error=True, error_log=error_log)\n\n except Exception as e:\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n def make_model_response(\n self,\n chunk: ModelResponse,\n response_ms,\n messages: List[Dict[str, str]],\n raw_output: str,\n functions: Optional[List[Any]] = None,\n function_call: Optional[Dict[str, Any]] = None,\n tools: Optional[List[Any]] = None,\n tool_calls: Optional[List[Dict[str, Any]]] = None,\n ) -> ModelResponse:\n count_start_time = datetime.datetime.now()\n prompt_token: int = num_tokens_for_messages(\n messages=messages, model=chunk[\"model\"]\n )\n completion_token: int = num_tokens_for_messages(\n model=chunk[\"model\"],\n messages=[{\"role\": \"assistant\", \"content\": raw_output}],\n )\n\n if functions and len(functions) > 0:\n functions_token = num_tokens_from_functions_input(\n functions=functions, model=chunk[\"model\"]\n )\n prompt_token += functions_token\n\n if tools and len(tools) > 0:\n tools_token = num_tokens_from_functions_input(\n functions=[tool[\"function\"] for tool in tools], model=chunk[\"model\"]\n )\n prompt_token += tools_token\n # if function_call:\n # function_call_token = num_tokens_from_function_call_output(\n # function_call_output=function_call, model=chunk[\"model\"]\n # )\n # completion_token += function_call_token\n\n count_end_time = datetime.datetime.now()\n logger.debug(\n f\"counting token time : {(count_end_time - count_start_time).total_seconds() * 1000} ms\"\n )\n\n usage = Usage(\n **{\n \"prompt_tokens\": prompt_token,\n \"completion_tokens\": completion_token,\n \"total_tokens\": prompt_token + completion_token,\n }\n )\n\n last_message = Message(\n role=chunk.choices[0].delta.role\n if getattr(chunk.choices[0].delta, \"role\", None)\n else \"assistant\",\n content=raw_output if raw_output != \"\" else None,\n function_call=function_call if function_call else None,\n tool_calls=tool_calls if tool_calls else None,\n )\n choices = [\n Choices(finish_reason=chunk.choices[0].finish_reason, message=last_message)\n ]\n\n res = ModelResponse(\n id=chunk[\"id\"],\n created=chunk[\"created\"],\n model=chunk[\"model\"],\n stream=True,\n )\n res.choices = choices\n res.usage = usage\n res._response_ms = response_ms\n\n return res\n\n def __llm_stream_response_generator__(\n self,\n messages: List[Dict[str, str]],\n response: Generator[ModelResponse, None, None],\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n raw_output = \"\"\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n try:\n for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n raw_output += chunk.choices[0].delta.content\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=chunk.choices[0].delta.content,\n )\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n def __single_type_sp_generator__(\n self,\n messages: List[Dict[str, str]],\n response: Generator[ModelResponse, None, None],\n parsing_type: ParsingType,\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n try:\n parsing_pattern = get_pattern_by_type(parsing_type)\n start_tag = parsing_pattern[\"start\"]\n start_fstring = parsing_pattern[\"start_fstring\"]\n end_fstring = parsing_pattern[\"end_fstring\"]\n start_token = parsing_pattern[\"start_token\"]\n end_token = parsing_pattern[\"end_token\"]\n\n buffer = \"\"\n raw_output = \"\"\n active_key = None\n stream_pause = False\n end_tag = None\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n buffer += stream_value\n while True:\n if active_key is None:\n keys = re.findall(start_tag, buffer, flags=re.DOTALL)\n if len(keys) == 0:\n break # no key\n active_key, active_type = keys[\n 0\n ] # Updated to unpack both key and type\n end_tag = end_fstring.format(key=active_key)\n # delete start tag from buffer\n start_pattern = start_fstring.format(\n key=active_key, type=active_type\n )\n buffer = buffer.split(start_pattern)[-1]\n else:\n if (\n stream_value.find(start_token) != -1\n ): # start token appers in chunk -> pause\n stream_pause = True\n break\n elif stream_pause:\n if (\n buffer.find(end_tag) != -1\n ): # if end tag appears in buffer\n yield LLMStreamResponse(\n parsed_outputs={\n active_key: buffer.split(end_tag)[\n 0\n ].replace(end_tag, \"\")\n }\n )\n buffer = buffer.split(end_tag)[-1]\n active_key = None\n stream_pause = False\n elif (\n stream_value.find(end_token) != -1\n ): # if pattern ends = (\"[blah]\" != end_pattern) appeared in buffer\n if (\n active_type == \"List\"\n or active_type == \"Dict\"\n and end_token.find(\"]\") != -1\n ):\n try:\n buffer_dict = json.loads(buffer)\n stream_pause = False\n continue\n except Exception as exception:\n logger.error(exception)\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={\n active_key: buffer.split(\n start_token\n )[0]\n },\n )\n stream_pause = False\n buffer = \"\"\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={active_key: buffer},\n )\n stream_pause = False\n buffer = \"\"\n break\n else:\n # no start token, no stream_pause (not inside of tag)\n if buffer:\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n break\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n def __double_type_sp_generator__(\n self,\n messages: List[Dict[str, str]],\n response: Generator[ModelResponse, None, None],\n parsing_type: ParsingType,\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> Generator[LLMStreamResponse, None, None]:\n try:\n parsing_pattern = get_pattern_by_type(parsing_type)\n start_tag = parsing_pattern[\"start\"]\n start_fstring = parsing_pattern[\"start_fstring\"]\n end_fstring = parsing_pattern[\"end_fstring\"]\n start_token = parsing_pattern[\"start_token\"]\n end_token = parsing_pattern[\"end_token\"]\n\n buffer = \"\"\n raw_output = \"\"\n active_key = None\n stream_pause = False\n end_tag = None\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n buffer += stream_value\n\n while True:\n if active_key is None:\n keys = re.findall(start_tag, buffer, flags=re.DOTALL)\n if len(keys) == 0:\n break # no key\n active_key, active_type = keys[0]\n end_tag = end_fstring.format(key=active_key)\n # delete start tag from buffer\n start_pattern = start_fstring.format(\n key=active_key, type=active_type\n )\n buffer = buffer.split(start_pattern)[-1]\n\n else:\n if (\n stream_value.find(start_token) != -1\n ): # start token appers in chunk -> pause\n stream_pause = True\n break\n elif stream_pause:\n if (\n buffer.find(end_tag) != -1\n ): # if end tag appears in buffer\n yield LLMStreamResponse(\n parsed_outputs={\n active_key: buffer.split(end_tag)[0]\n }\n )\n buffer = buffer.split(end_tag)[-1]\n active_key = None\n stream_pause = False\n elif (\n stream_value.find(end_token) != -1\n ): # if (\"[blah]\" != end_pattern) appeared in buffer\n if (\n buffer.find(end_token + end_token) != -1\n ): # if ]] in buffer -> error\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={\n active_key: buffer.split(start_token)[0]\n },\n )\n buffer = buffer.split(end_token + end_token)[-1]\n stream_pause = False\n break\n else:\n if (\n buffer.find(start_token + start_token) != -1\n ): # if [[ in buffer -> pause\n break\n else:\n # if [ in buffer (== [blah]) -> stream\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n stream_pause = False\n break\n break\n else:\n # no start token, no stream_pause (not inside of tag)\n if buffer:\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n break\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def __llm_stream_response_agenerator__(\n self,\n messages: List[Dict[str, str]],\n response: AsyncGenerator[ModelResponse, None],\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n raw_output = \"\"\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n try:\n async for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def __single_type_sp_agenerator__(\n self,\n messages: List[Dict[str, str]],\n response: AsyncGenerator[ModelResponse, None],\n parsing_type: ParsingType,\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n try:\n parsing_pattern = get_pattern_by_type(parsing_type)\n start_tag = parsing_pattern[\"start\"]\n start_fstring = parsing_pattern[\"start_fstring\"]\n end_fstring = parsing_pattern[\"end_fstring\"]\n start_token = parsing_pattern[\"start_token\"]\n end_token = parsing_pattern[\"end_token\"]\n\n buffer = \"\"\n raw_output = \"\"\n active_key = None\n stream_pause = False\n end_tag = None\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n async for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n buffer += stream_value\n\n while True:\n if active_key is None:\n keys = re.findall(start_tag, buffer, flags=re.DOTALL)\n if len(keys) == 0:\n break # no key\n\n active_key, active_type = keys[\n 0\n ] # Updated to unpack both key and type\n end_tag = end_fstring.format(key=active_key)\n # delete start tag from buffer\n start_pattern = start_fstring.format(\n key=active_key, type=active_type\n )\n buffer = buffer.split(start_pattern)[-1]\n\n else:\n if (\n stream_value.find(start_token) != -1\n ): # start token appers in chunk -> pause\n stream_pause = True\n break\n elif stream_pause:\n if (\n buffer.find(end_tag) != -1\n ): # if end tag appears in buffer\n yield LLMStreamResponse(\n parsed_outputs={\n active_key: buffer.split(end_tag)[\n 0\n ].replace(end_tag, \"\")\n }\n )\n buffer = buffer.split(end_tag)[-1]\n active_key = None\n stream_pause = False\n elif (\n stream_value.find(end_token) != -1\n ): # if pattern ends = (\"[blah]\" != end_pattern) appeared in buffer\n if (\n active_type == \"List\"\n or active_type == \"Dict\"\n and end_token.find(\"]\") != -1\n ):\n try:\n buffer_dict = json.loads(buffer)\n stream_pause = False\n continue\n except Exception as exception:\n logger.error(exception)\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={\n active_key: buffer.split(\n start_token\n )[0]\n },\n )\n stream_pause = False\n buffer = \"\"\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={active_key: buffer},\n )\n stream_pause = False\n buffer = \"\"\n break\n else:\n # no start token, no stream_pause (not inside of tag)\n if buffer:\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n break\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))\n\n async def __double_type_sp_agenerator__(\n self,\n messages: List[Dict[str, str]],\n response: AsyncGenerator[ModelResponse, None],\n parsing_type: ParsingType,\n start_time: datetime.datetime,\n functions: Optional[List[Any]] = None,\n tools: Optional[List[Any]] = None,\n ) -> AsyncGenerator[LLMStreamResponse, None]:\n try:\n parsing_pattern = get_pattern_by_type(parsing_type)\n start_tag = parsing_pattern[\"start\"]\n start_fstring = parsing_pattern[\"start_fstring\"]\n end_fstring = parsing_pattern[\"end_fstring\"]\n start_token = parsing_pattern[\"start_token\"]\n end_token = parsing_pattern[\"end_token\"]\n\n buffer = \"\"\n raw_output = \"\"\n active_key = None\n stream_pause = False\n end_tag = None\n function_call = {\"name\": \"\", \"arguments\": \"\"}\n tool_calls = []\n\n async for chunk in response:\n yield_api_response_with_fc = False\n if getattr(chunk.choices[0].delta, \"function_call\", None) is not None:\n for key, value in (\n chunk.choices[0].delta.function_call.model_dump().items()\n ):\n if value is not None:\n function_call[key] += value\n\n yield LLMStreamResponse(\n api_response=chunk,\n function_call=chunk.choices[0].delta.function_call,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"tool_calls\", None) is not None:\n # tool_calls: list\n tool_calls_delta: List[Any] = chunk.choices[0].delta.tool_calls\n index = tool_calls_delta[0].index\n if index == len(tool_calls):\n tool_calls.append(\n {\n \"id\": tool_calls_delta[0].id,\n \"function\": {},\n \"type\": \"function\",\n }\n )\n tool_delta: ChoiceDeltaToolCallFunction = tool_calls_delta[\n 0\n ].function\n tool_calls[index][\"function\"] = update_dict(\n tool_calls[index][\"function\"], tool_delta.model_dump()\n )\n\n yield LLMStreamResponse(\n api_response=chunk,\n tool_calls=chunk.choices[0].delta.tool_calls,\n )\n yield_api_response_with_fc = True\n\n if getattr(chunk.choices[0].delta, \"content\", None) is not None:\n stream_value: str = chunk.choices[0].delta.content\n raw_output += stream_value\n yield LLMStreamResponse(\n api_response=chunk if not yield_api_response_with_fc else None,\n raw_output=stream_value,\n )\n\n buffer += stream_value\n\n while True:\n if active_key is None:\n keys = re.findall(start_tag, buffer, flags=re.DOTALL)\n # if len(keys) > 1:\n # yield LLMStreamResponse(\n # error=True,\n # error_log=\"Parsing error : Nested key detected\",\n # )\n # break\n if len(keys) == 0:\n break # no key\n active_key, active_type = keys[0]\n end_tag = end_fstring.format(key=active_key)\n # delete start tag from buffer\n start_pattern = start_fstring.format(\n key=active_key, type=active_type\n )\n buffer = buffer.split(start_pattern)[-1]\n\n else:\n if (\n stream_value.find(start_token) != -1\n ): # start token appers in chunk -> pause\n stream_pause = True\n break\n elif stream_pause:\n if (\n buffer.find(end_tag) != -1\n ): # if end tag appears in buffer\n yield LLMStreamResponse(\n parsed_outputs={\n active_key: buffer.split(end_tag)[0]\n }\n )\n buffer = buffer.split(end_tag)[-1]\n active_key = None\n stream_pause = False\n # break\n elif (\n stream_value.find(end_token) != -1\n ): # if (\"[blah]\" != end_pattern) appeared in buffer\n if (\n buffer.find(end_token + end_token) != -1\n ): # if ]] in buffer -> error\n yield LLMStreamResponse(\n error=True,\n error_log=\"Parsing error : Invalid end tag detected\",\n parsed_outputs={\n active_key: buffer.split(start_token)[0]\n },\n )\n buffer = buffer.split(end_token + end_token)[-1]\n stream_pause = False\n break\n else:\n if (\n buffer.find(start_token + start_token) != -1\n ): # if [[ in buffer -> pause\n break\n else:\n # if [ in buffer (== [blah]) -> stream\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n stream_pause = False\n break\n break\n else:\n # no start token, no stream_pause (not inside of tag)\n if buffer:\n yield LLMStreamResponse(\n parsed_outputs={active_key: buffer}\n )\n buffer = \"\"\n break\n\n if chunk.choices[0].finish_reason != None:\n end_time = datetime.datetime.now()\n response_ms = (end_time - start_time).total_seconds() * 1000\n yield LLMStreamResponse(\n api_response=self.make_model_response(\n chunk,\n response_ms,\n messages,\n raw_output,\n functions=functions,\n function_call=function_call\n if chunk.choices[0].finish_reason == \"function_call\"\n else None,\n tools=tools,\n tool_calls=tool_calls\n if chunk.choices[0].finish_reason == \"tool_calls\"\n else None,\n )\n )\n except Exception as e:\n logger.error(e)\n yield LLMStreamResponse(error=True, error_log=str(e))" }, { "identifier": "DeployedPrompt", "path": "promptmodel/database/models.py", "snippet": "class DeployedPrompt(BaseModel):\n id = AutoField()\n version_uuid = ForeignKeyField(\n DeployedFunctionModelVersion,\n field=DeployedFunctionModelVersion.uuid,\n backref=\"prompts\",\n on_delete=\"CASCADE\",\n )\n role = CharField()\n step = IntegerField()\n content = TextField()" }, { "identifier": "DeployedFunctionModel", "path": "promptmodel/database/models.py", "snippet": "class DeployedFunctionModel(BaseModel):\n uuid = UUIDField(unique=True, default=uuid4)\n name = CharField()" }, { "identifier": "DeployedFunctionModelVersion", "path": "promptmodel/database/models.py", "snippet": "class DeployedFunctionModelVersion(BaseModel):\n uuid = UUIDField(unique=True, default=uuid4)\n version = IntegerField(null=False)\n from_version = IntegerField(null=True)\n function_model_uuid = ForeignKeyField(\n DeployedFunctionModel,\n field=DeployedFunctionModel.uuid,\n backref=\"versions\",\n on_delete=\"CASCADE\",\n )\n model = CharField()\n is_published = BooleanField(default=False)\n is_ab_test = BooleanField(default=False)\n ratio = FloatField(null=True)\n parsing_type = CharField(\n null=True,\n default=None,\n constraints=[\n Check(\n f\"parsing_type IN ('{ParsingType.COLON.value}', '{ParsingType.SQUARE_BRACKET.value}', '{ParsingType.DOUBLE_SQUARE_BRACKET.value}')\"\n )\n ],\n )\n output_keys = JSONField(null=True, default=None)\n functions = JSONField(default=[])" }, { "identifier": "get_deployed_prompts", "path": "promptmodel/database/crud.py", "snippet": "def get_deployed_prompts(function_model_name: str) -> Tuple[List[DeployedPrompt], str]:\n try:\n with db.atomic():\n versions: List[DeployedFunctionModelVersion] = list(\n DeployedFunctionModelVersion.select()\n .join(DeployedFunctionModel)\n .where(\n DeployedFunctionModelVersion.function_model_uuid\n == DeployedFunctionModel.get(\n DeployedFunctionModel.name == function_model_name\n ).uuid\n )\n )\n prompts: List[DeployedPrompt] = list(\n DeployedPrompt.select()\n .where(\n DeployedPrompt.version_uuid.in_(\n [version.uuid for version in versions]\n )\n )\n .order_by(DeployedPrompt.step.asc())\n )\n # select version by ratio\n selected_version = select_version_by_ratio(\n [version.__data__ for version in versions]\n )\n selected_prompts = list(\n filter(\n lambda prompt: str(prompt.version_uuid.uuid)\n == str(selected_version[\"uuid\"]),\n prompts,\n )\n )\n\n version_details = {\n \"model\": selected_version[\"model\"],\n \"version\" : selected_version[\"version\"],\n \"uuid\": selected_version[\"uuid\"],\n \"parsing_type\": selected_version[\"parsing_type\"],\n \"output_keys\": selected_version[\"output_keys\"],\n }\n\n return selected_prompts, version_details\n except Exception as e:\n logger.error(e)\n return None, None" }, { "identifier": "CacheManager", "path": "promptmodel/promptmodel_init.py", "snippet": "class CacheManager:\n _instance = None\n _lock = threading.Lock()\n\n def __new__(cls):\n with cls._lock:\n if cls._instance is None:\n instance = super(CacheManager, cls).__new__(cls)\n instance.last_update_time = 0 # to manage update frequency\n instance.update_interval = 60 * 60 * 6 # seconds, 6 hours\n instance.program_alive = True\n instance.background_tasks = []\n initialize_db()\n atexit.register(instance._terminate)\n asyncio.run(instance.update_cache()) # updae cache first synchronously\n instance.cache_thread = threading.Thread(\n target=instance._run_cache_loop\n )\n instance.cache_thread.daemon = True\n instance.cache_thread.start()\n cls._instance = instance\n return cls._instance\n\n def cache_update_background_task(self, config):\n asyncio.run(update_deployed_db(config))\n\n def _run_cache_loop(self):\n asyncio.run(self._update_cache_periodically())\n\n async def _update_cache_periodically(self):\n while True:\n await asyncio.sleep(self.update_interval) # Non-blocking sleep\n await self.update_cache()\n\n async def update_cache(self):\n # Current time\n current_time = time.time()\n config = read_config()\n\n if not config:\n upsert_config({\"version\": 0}, section=\"project\")\n config = {\"project\": {\"version\": 0}}\n if \"project\" not in config:\n upsert_config({\"version\": 0}, section=\"project\")\n config = {\"project\": {\"version\": 0}}\n\n if \"version\" not in config[\"project\"]:\n upsert_config({\"version\": 0}, section=\"project\")\n config = {\"project\": {\"version\": 0}}\n\n # Check if we need to update the cache\n if current_time - self.last_update_time > self.update_interval:\n # Update cache logic\n try:\n await update_deployed_db(config)\n except:\n # try once more\n await update_deployed_db(config)\n # Update the last update time\n self.last_update_time = current_time\n\n def _terminate(self):\n self.program_alive = False\n\n # async def cleanup_background_tasks(self):\n # for task in self.background_tasks:\n # if not task.done():\n # task.cancel()\n # try:\n # await task\n # except asyncio.CancelledError:\n # pass # 작업이 취소됨" }, { "identifier": "read_config", "path": "promptmodel/utils/config_utils.py", "snippet": "def read_config():\n \"\"\"\n Reads the configuration from the given filename.\n\n :return: A dictionary containing the configuration.\n \"\"\"\n if not os.path.exists(CONFIG_FILE):\n return {}\n\n with open(CONFIG_FILE, \"r\") as file:\n config = yaml.safe_load(file) or {}\n return config" }, { "identifier": "upsert_config", "path": "promptmodel/utils/config_utils.py", "snippet": "def upsert_config(new_config: Dict[str, Any], section: str = None):\n \"\"\"\n Upserts the given configuration file with the given configuration.\n\n :param new_config: A dictionary containing the new configuration.\n :param section: The section of the configuration to update.\n \"\"\"\n config = read_config()\n if section:\n config_section = config.get(section, {})\n new_config = {section: merge_dict(config_section, new_config)}\n config = merge_dict(config, new_config)\n # If . directory does not exist, create it\n if not os.path.exists(\"./.promptmodel\"):\n os.mkdir(\"./.promptmodel\")\n\n with open(CONFIG_FILE, \"w\") as file:\n yaml.safe_dump(config, file, default_flow_style=False)" }, { "identifier": "select_version_by_ratio", "path": "promptmodel/utils/random_utils.py", "snippet": "def select_version_by_ratio(versions):\n epsilon = 1e-10\n ratios = [version[\"ratio\"] for version in versions]\n\n if not abs(sum(ratios) - 1.0) <= epsilon:\n raise ValueError(f\"Sum of ratios must be 1.0, now {sum(ratios)}\")\n\n cumulative_ratios = []\n cumulative_sum = 0\n for ratio in ratios:\n cumulative_sum += ratio\n cumulative_ratios.append(cumulative_sum)\n\n random_value = random.random()\n for idx, cumulative_ratio in enumerate(cumulative_ratios):\n if random_value <= cumulative_ratio:\n return versions[idx]" }, { "identifier": "logger", "path": "promptmodel/utils/logger.py", "snippet": "def debug(msg: Any, *args):\ndef success(msg: Any, *args):\ndef info(msg: Any, *args):\ndef warning(msg: Any, *args):\ndef error(msg: Any, *args):" }, { "identifier": "run_async_in_sync", "path": "promptmodel/utils/async_utils.py", "snippet": "def run_async_in_sync(coro: Coroutine):\n try:\n loop = asyncio.get_running_loop()\n except RuntimeError: # No running loop\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n result = loop.run_until_complete(coro)\n # loop.close()\n return result\n\n return loop.run_until_complete(coro)" }, { "identifier": "num_tokens_for_messages_for_each", "path": "promptmodel/utils/token_counting.py", "snippet": "def num_tokens_for_messages_for_each(\n messages: List[Dict[str, str]], model: str = \"gpt-3.5-turbo-0613\"\n) -> List[int]:\n processed_messages = [\n {**message, \"function_call\": str(message[\"function_call\"])}\n if \"function_call\" in message\n else message\n for message in messages\n ]\n processed_messages = [\n {**message, \"tool_calls\": str(message[\"tool_calls\"])}\n if \"tool_calls\" in message\n else message\n for message in processed_messages\n ]\n return [\n token_counter(model=model, messages=[message]) for message in processed_messages\n ]" }, { "identifier": "num_tokens_from_functions_input", "path": "promptmodel/utils/token_counting.py", "snippet": "def num_tokens_from_functions_input(\n functions: Optional[List[Any]] = None, model=\"gpt-3.5-turbo-0613\"\n) -> int:\n \"\"\"Return the number of tokens used by a list of functions.\"\"\"\n if functions is None:\n return 0\n num_tokens = 0\n for function in functions:\n function_tokens = token_counter(model=model, text=function[\"name\"])\n function_tokens += token_counter(model=model, text=function[\"description\"])\n\n if \"parameters\" in function:\n parameters = function[\"parameters\"]\n if \"properties\" in parameters:\n for properties_key in parameters[\"properties\"]:\n function_tokens += token_counter(model=model, text=properties_key)\n v = parameters[\"properties\"][properties_key]\n for field in v:\n if field == \"type\":\n function_tokens += 2\n function_tokens += token_counter(\n model=model, text=v[\"type\"]\n )\n elif field == \"description\":\n function_tokens += 2\n function_tokens += token_counter(\n model=model, text=v[\"description\"]\n )\n elif field == \"enum\":\n function_tokens -= 3\n for o in v[\"enum\"]:\n function_tokens += 3\n function_tokens += token_counter(model=model, text=o)\n else:\n print(f\"Warning: not supported field {field}\")\n function_tokens += 11\n\n num_tokens += function_tokens\n\n num_tokens += 12\n return num_tokens" }, { "identifier": "update_dict", "path": "promptmodel/utils/output_utils.py", "snippet": "def update_dict(\n target: Dict[str, str],\n source: Dict[str, str],\n):\n for key, value in source.items():\n if value is not None:\n if key not in target:\n target[key] = value\n else:\n target[key] += value\n return target" }, { "identifier": "AsyncAPIClient", "path": "promptmodel/apis/base.py", "snippet": "class AsyncAPIClient:\n \"\"\"\n A class to represent an Async API request client.\n Used in Deployment stage.\n\n ...\n\n Methods\n -------\n get_headers():\n Generates headers for the API request.\n execute(method=\"GET\", params=None, data=None, json=None, **kwargs):\n Executes the API request.\n \"\"\"\n\n @classmethod\n async def _get_headers(cls, use_cli_key: bool = True) -> Dict:\n \"\"\"\n Reads, decrypts the api_key, and returns headers for API request.\n\n Returns\n -------\n dict\n a dictionary containing the Authorization header\n \"\"\"\n config = read_config()\n if use_cli_key:\n if \"connection\" not in config:\n print(\n \"User not logged in. Please run [violet]prompt login[/violet] first.\"\n )\n exit()\n\n encrypted_key = config[\"connection\"][\"encrypted_api_key\"]\n if encrypted_key is None:\n raise Exception(\"No API key found. Please run 'prompt login' first.\")\n decrypted_key = decrypt_message(encrypted_key)\n else:\n decrypted_key = os.environ.get(\"PROMPTMODEL_API_KEY\")\n if decrypted_key is None:\n raise Exception(\n \"PROMPTMODEL_API_KEY was not found in the current environment.\"\n )\n headers = {\"Authorization\": f\"Bearer {decrypted_key}\"}\n return headers\n\n @classmethod\n async def execute(\n cls,\n path: str,\n method=\"GET\",\n params: Dict = None,\n data: Dict = None,\n json: Dict = None,\n ignore_auth_error: bool = False,\n use_cli_key: bool = True,\n **kwargs,\n ) -> requests.Response:\n \"\"\"\n Executes the API request with the decrypted API key in the headers.\n\n Parameters\n ----------\n method : str, optional\n The HTTP method of the request (default is \"GET\")\n params : dict, optional\n The URL parameters to be sent with the request\n data : dict, optional\n The request body to be sent with the request\n json : dict, optional\n The JSON-encoded request body to be sent with the request\n ignore_auth_error: bool, optional\n Whether to ignore authentication errors (default is False)\n **kwargs : dict\n Additional arguments to pass to the requests.request function\n\n Returns\n -------\n requests.Response\n The response object returned by the requests library\n \"\"\"\n url = f\"{ENDPOINT_URL}{path}\"\n headers = await cls._get_headers(use_cli_key)\n try:\n async with httpx.AsyncClient(http2=True) as _client:\n response = await _client.request(\n method,\n url,\n headers=headers,\n params=params,\n data=data,\n json=json,\n **kwargs,\n )\n if not response:\n print(f\"[red]Error: {response}[/red]\")\n if response.status_code == 200:\n return response\n elif response.status_code == 403:\n if not ignore_auth_error:\n print(\"[red]Authentication failed.[/red]\")\n else:\n print(f\"[red]Error: {response}[/red]\")\n\n return response\n except requests.exceptions.ConnectionError:\n print(\"[red]Could not connect to the Promptmodel API.[/red]\")\n except requests.exceptions.Timeout:\n print(\"[red]The request timed out.[/red]\")\n except Exception as exception:\n print(f\"[red]Error: {exception}[/red]\")" }, { "identifier": "LLMResponse", "path": "promptmodel/types/response.py", "snippet": "class LLMResponse(OpenAIObject):\n api_response: Optional[ModelResponse] = None\n raw_output: Optional[str] = None\n parsed_outputs: Optional[Dict[str, Any]] = None\n error: Optional[bool] = None\n error_log: Optional[str] = None\n function_call: Optional[FunctionCall] = None\n tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None\n pm_detail: Optional[PMDetail] = None" }, { "identifier": "LLMStreamResponse", "path": "promptmodel/types/response.py", "snippet": "class LLMStreamResponse(OpenAIObject):\n api_response: Optional[ModelResponse] = None\n raw_output: Optional[str] = None\n parsed_outputs: Optional[Dict[str, Any]] = None\n error: Optional[bool] = None\n error_log: Optional[str] = None\n function_call: Optional[ChoiceDeltaFunctionCall] = None\n tool_calls: Optional[List[ChoiceDeltaToolCall]] = None\n pm_detail: Optional[PMDetail] = None" }, { "identifier": "FunctionModelConfig", "path": "promptmodel/types/response.py", "snippet": "class FunctionModelConfig(BaseModel):\n \"\"\"Response Class for FunctionModel.get_config()\n prompts: List[Dict[str, Any]] = []\n each prompt can have role, content, name, function_call, and tool_calls\n version_detail: Dict[str, Any] = {}\n version_detail has \"model\", \"uuid\", \"parsing_type\" and \"output_keys\".\n model: str\n model name (e.g. \"gpt-3.5-turbo\")\n name: str\n name of the FunctionModel.\n version_uuid: str\n version uuid of the FunctionModel.\n version: int\n version id of the FunctionModel.\n parsing_type: Optional[str] = None\n parsing type of the FunctionModel.\n output_keys: Optional[List[str]] = None\n output keys of the FunctionModel.\n \"\"\"\n\n prompts: List[Dict[str, Any]]\n model: str\n name: str\n version_uuid: str\n version: int\n parsing_type: Optional[str] = None\n output_keys: Optional[List[str]] = None" }, { "identifier": "ChatModelConfig", "path": "promptmodel/types/response.py", "snippet": "class ChatModelConfig(BaseModel):\n system_prompt: str\n model: str\n name: str\n version_uuid: str\n version: int\n message_logs: Optional[List[Dict]] = []" }, { "identifier": "UnitConfig", "path": "promptmodel/types/response.py", "snippet": "class UnitConfig(BaseModel):\n \"\"\"Response Class for UnitLogger.get_config().\n Created after calling UnitLogger.log_start()\n name: str\n name of the UnitLogger.\n version_uuid: str\n version uuid of the UnitLogger.\n version: int\n version id of the UnitLogger.\n log_uuid: str\n log_uuid for current trace.\n \"\"\"\n\n name: str\n version_uuid: str\n log_uuid: str\n version: int" }, { "identifier": "PMDetail", "path": "promptmodel/types/response.py", "snippet": "class PMDetail(BaseModel):\n model: str\n name: str\n version_uuid: str\n version: int\n log_uuid: str" }, { "identifier": "ChatLogRequest", "path": "promptmodel/types/request.py", "snippet": "class ChatLogRequest(BaseModel):\n uuid: Optional[str] = None\n message: Dict[str, Any]\n metadata: Optional[Dict] = None\n api_response: Optional[ModelResponse] = None\n\n def __post_init__(\n self,\n ):\n if self.api_response is not None and self.message is None:\n self.message = self.api_response.choices[0].message.model_dump()" } ]
from typing import ( Any, AsyncGenerator, Callable, Dict, Generator, List, Optional, Tuple, Union, ) from uuid import UUID from threading import Thread from rich import print from uuid import uuid4 from litellm.utils import ModelResponse, get_max_tokens from promptmodel.llms.llm import LLM from promptmodel.database.models import ( DeployedPrompt, DeployedFunctionModel, DeployedFunctionModelVersion, ) from promptmodel.database.crud import ( get_deployed_prompts, ) from promptmodel.promptmodel_init import CacheManager from promptmodel.utils.config_utils import read_config, upsert_config from promptmodel.utils.random_utils import select_version_by_ratio from promptmodel.utils import logger from promptmodel.utils.async_utils import run_async_in_sync from promptmodel.utils.token_counting import ( num_tokens_for_messages_for_each, num_tokens_from_functions_input, ) from promptmodel.utils.output_utils import update_dict from promptmodel.apis.base import AsyncAPIClient from promptmodel.types.response import ( LLMResponse, LLMStreamResponse, FunctionModelConfig, ChatModelConfig, UnitConfig, PMDetail, ) from promptmodel.types.request import ChatLogRequest
18,010
class LLMProxy(LLM): def __init__( self, name: str, version: Optional[Union[str, int]] = "deploy", unit_config: Optional[UnitConfig] = None ): super().__init__() self._name = name self.version = version self.unit_config = unit_config def _wrap_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]: def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = run_async_in_sync( LLMProxy.fetch_prompts(self._name, self.version) ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) log_uuid = str(uuid4()) # Call the generator with the arguments stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args) api_response = None dict_cache = {} # to store aggregated dictionary values string_cache = "" # to store aggregated string values error_occurs = False error_log = None for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.parsed_outputs: dict_cache = update_dict(dict_cache, item.parsed_outputs) if item.raw_output: string_cache += item.raw_output if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None
class LLMProxy(LLM): def __init__( self, name: str, version: Optional[Union[str, int]] = "deploy", unit_config: Optional[UnitConfig] = None ): super().__init__() self._name = name self.version = version self.unit_config = unit_config def _wrap_gen(self, gen: Callable[..., Any]) -> Callable[..., Any]: def wrapper(inputs: Dict[str, Any], **kwargs): prompts, version_details = run_async_in_sync( LLMProxy.fetch_prompts(self._name, self.version) ) call_args = self._prepare_call_args( prompts, version_details, inputs, kwargs ) log_uuid = str(uuid4()) # Call the generator with the arguments stream_response: Generator[LLMStreamResponse, None, None] = gen(**call_args) api_response = None dict_cache = {} # to store aggregated dictionary values string_cache = "" # to store aggregated string values error_occurs = False error_log = None for item in stream_response: if ( item.api_response and "delta" not in item.api_response.choices[0] ): # only get the last api_response, not delta response api_response = item.api_response if item.parsed_outputs: dict_cache = update_dict(dict_cache, item.parsed_outputs) if item.raw_output: string_cache += item.raw_output if item.error and not error_occurs: error_occurs = True error_log = item.error_log if error_occurs: # delete all promptmodel data in item item.raw_output = None item.parsed_outputs = None item.function_call = None
item.pm_detail = PMDetail(
20
2023-10-09 03:35:44+00:00
24k
cambridgeltl/ClaPS
run_prune_search.py
[ { "identifier": "PromptedClassificationReward", "path": "rewards/text_classification_reward.py", "snippet": "class PromptedClassificationReward:\n def __init__(\n self,\n args,\n task_lm: str,\n is_mask_lm: Optional[bool],\n num_classes: int,\n verbalizers: List[str],\n reward_type: str = \"entropy\",\n compute_zscore: bool = True,\n incorrect_coeff: float = 180.0, # lambda_1 in paper\n correct_coeff: float = 200.0, # lambda_2 in paper\n use_bn_calibration: bool = False,\n bn_calibrator: Optional[BatchNormCalibrate] = None,\n template: Optional[str] = None,\n gpu_id: Optional[int] = None,\n ):\n \"\"\"\n Few shot text classification reward (adapted from RLPrompt repository)\n Args:\n task_lm: the string specifying the language model type of the task LM\n is_mask_lm: bool. Whether the LM is masked, or left-to-right.\n compute_zscore: bool. Whether do reward normalization by normalizing the\n mean and standard deviation across the batch.\n incorrect_coeff, correct_coeff:\n num_classes: number of classes in the labels\n verbalizers: a list of verbalizers (for e.g., for sentiment classification)\n reward_type: the type of the reward.\n \"gap\" -- use the one proposed in RLPrompt\n \"ll\" -- use the usual cross entropy loss\n template: the template to organize the queries and prompts.\n default one is [Input][Prompt][MASK].\n default template is adopted when it is not specified.\n bn_calibrator: an optional batch norm calibrator. When provided,\n in inference mode the logits will be first normalised by it first. The\n calibrator must be initialized when passed to this class.\n This class essentially provides the objective function for BO/RL/any other\n prompt optimizer.\n \"\"\"\n super().__init__()\n if torch.cuda.is_available():\n if gpu_id:\n self.device = torch.device(f\"cuda:{gpu_id}\")\n else:\n self.device = torch.device(\"cuda\")\n else:\n self.device = torch.device(\"cpu\")\n # self.device = torch.device(\"cpu\")\n self.args = args\n self.task_lm = task_lm\n if is_mask_lm is None:\n # If False, then treat as left-to-right LM\n self.is_mask_lm = True if \"bert\" in self.task_lm else False\n else:\n self.is_mask_lm = is_mask_lm\n assert reward_type in [\"gap\", \"cross_entropy\", \"entropy\"]\n self.reward_type = reward_type\n print(\"Task LM:\", self.task_lm)\n if self.is_mask_lm:\n assert self.task_lm in SUPPORTED_MASK_LMS\n self._tokenizer = AutoTokenizer.from_pretrained(self.task_lm)\n self._generator = AutoModelForMaskedLM.from_pretrained(self.task_lm).to(\n self.device\n )\n else:\n self._generator = T5ForConditionalGeneration.from_pretrained(\n self.task_lm\n ).to(self.device)\n self._tokenizer = AutoTokenizer.from_pretrained(\n self.task_lm, use_fast=False\n )\n\n self.compute_zscore = compute_zscore\n self.incorrect_coeff = incorrect_coeff\n self.correct_coeff = correct_coeff\n self.num_classes = num_classes\n print(\"Num classes:\", self.num_classes)\n self.verbalizers = verbalizers\n print(\"Verbalizers:\", self.verbalizers)\n self.verbalizer_ids = [\n self._tokenizer.convert_tokens_to_ids(v) for v in self.verbalizers\n ]\n print(\"Verbalizer ids:\", self.verbalizer_ids)\n if template is None:\n self.template = self.load_default_template() # prompt templates\n else:\n self.template = template\n self.use_bn_calibration = use_bn_calibration\n self.bn_calibrator = bn_calibrator\n self._counter = 0\n\n def to(self, device):\n self._generator.to(device)\n\n def load_default_template(self) -> List[str]:\n template_dict = {\n \"xnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \", \n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"mnli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \", \n ],\n \"snli\": [\n \" {prompt} {sentence_1} {sentence_2} Entailment: \",\n \" {prompt}. In this task, the goal is to predict textual entailment with 'yes' 'maybe' 'no'. sentence A implies sentence B entailment: yes; sentence A is neutral to sentence B entailment: maybe; sentence A contradicts sentence B entailment: no. Sentence A: {sentence_1}, Sentence B: {sentence_2}, Entailment: \",\n ],\n \"rte\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Textual Entailment: \",\n ],\n \"sst2\": [\n \" {prompt}. Sentence: {sentence_1}, Sentiment: \",\n ],\n \"mrpc\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"qnli\": [\n \" {prompt}. Question: {sentence_1}, Sentence: {sentence_2}, Entailment: \",\n ],\n \"qqp\": [\n \" {prompt}. Sentence 1: {sentence_1}, Sentence 2: {sentence_2}, Semantically Equivalent: \",\n ],\n \"ag_news\": [\n \" {prompt}. Classify the news articles into the categories of World, Sports, Business, and Technology. {sentence_1}: \",\n \"{prompt}\\n\\n{sentence_1}\\n\\nWhich topic is this article about?\\nWorld, Sports, Business, Technology, \",\n ],\n }\n if \"anli\" in self.args[\"dataset_name\"]:\n template = template_dict[\"anli\"][self.args[\"template_id\"]]\n elif (\n \"xnli\" in self.args[\"dataset_name\"]\n or \"americas_nli\" in self.args[\"dataset_name\"]\n ):\n template = template_dict[\"xnli\"][self.args[\"template_id\"]]\n else:\n if self.args[\"dataset_name\"] in template_dict:\n template = template_dict[self.args[\"dataset_name\"]][\n self.args[\"template_id\"]\n ]\n if self.is_mask_lm:\n mask_token = self._tokenizer.mask_token\n print(mask_token)\n simple_list = [\"SetFit/sst2\", \"SetFit/CR\", \"rotten_tomatoes\", \"SetFit/sst5\"]\n long_list = [\"yelp_polarity\", \"yelp_review_full\"]\n hard_list = [\"ag_news\"]\n rl_list = [\n \"rl-agnews\",\n \"rl-cr\",\n \"rl-mr\",\n \"rl-sst-2\",\n \"rl-sst-5\",\n \"rl-yelp-2\",\n \"rl-yelp-5\",\n ]\n if self.args[\"dataset_name\"] in simple_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n elif self.args[\"dataset_name\"] in long_list:\n template = f\" {{prompt}} It was {mask_token}. {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in hard_list:\n template = f\" {{prompt}} {mask_token} News: {{sentence_1}}\"\n elif self.args[\"dataset_name\"] in rl_list:\n template = f\" {{prompt}} {{sentence_1}} It was {mask_token}.\"\n return template\n\n def __call__(self, *args: Any, **kwds: Any) -> Any:\n return self.forward(*args, **kwds)\n\n def forward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n # output_token: Union[List[str], str],\n to_tensor: bool,\n mode: str = \"train\",\n verbose: bool = True,\n accumulate_class: bool = False,\n ) -> Tuple[Union[List[float], torch.Tensor], Dict[str, Any]]:\n \"\"\"\n This computes the reward of the current prompt.\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n assert mode in [\"train\", \"infer\"]\n if mode == \"train\":\n self._counter += 1\n\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n accs: List[float] = []\n confs: List[float] = []\n entropies: List[float] = []\n class_logits: List[torch.Tensor] = []\n\n counter_list = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n quantities_to_log = {}\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n (\n reward,\n acc,\n correct_predictions,\n conf,\n entropy,\n class_logit,\n ) = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n bn_calibrator=self.bn_calibrator if self.use_bn_calibration else None,\n )\n\n rewards.append(reward)\n accs.append(acc.item())\n confs.append(conf.item())\n entropies.append(entropy.item())\n counter_list.append(correct_predictions)\n class_logits.append(class_logit)\n\n # keep track of rewards for z-score normalization\n input_rewards[\"z\"] += [reward.item()]\n\n # Print examples\n if verbose:\n print_strs = [\n \"Accuracy:\",\n acc.item(),\n \"|\",\n \"Reward:\",\n round(reward.item(), 2),\n ]\n print(*print_strs)\n rewards_tensor = torch.stack(rewards)\n accs_tensor = torch.tensor(accs)\n confs_tensor = torch.tensor(confs)\n entropies_tensor = torch.tensor(entropies)\n # compute the expected calibration error (ECE) by accs_tensor and confs_tensor\n ece = torch.abs(accs_tensor - confs_tensor).mean()\n\n # z-score normalization (2nd stage)\n if mode == \"train\" and self.compute_zscore:\n input_reward_means = {k: np.mean(v) for k, v in input_rewards.items()}\n input_reward_stds = {k: np.std(v) for k, v in input_rewards.items()}\n # not source strings\n idx_means = torch.tensor(input_reward_means[\"z\"]).float()\n idx_stds = torch.tensor(input_reward_stds[\"z\"]).float()\n rewards_tensor = (rewards_tensor - idx_means) / (idx_stds + 1e-4)\n quantities_to_log[prompt_strings[i]][\"resized_reward\"] = []\n for i in range(rewards_tensor.size(0)):\n quantities_to_log[prompt_strings[i]][\"resized_reward\"].append(\n rewards_tensor[i].item()\n )\n elif mode == \"infer\": # Optional: Predict Val Prompts\n score = rewards_tensor.mean().item()\n if verbose:\n print(f\"Our prompt: {prompt_strings}. Score={score}. Acc={acc}\")\n for pt in prompt_strings:\n print(self._tokenizer.tokenize(pt))\n print(accumulate_class)\n print(\"counter_list\", counter_list)\n print(\"ece\", ece)\n if accumulate_class:\n return (\n prompt_strings,\n rewards_tensor,\n accs_tensor,\n counter_list,\n ece,\n entropies_tensor,\n class_logits, # <- list of tensors. n elements = n prompts\n )\n else:\n return prompt_strings, rewards_tensor, accs_tensor\n\n if to_tensor is True:\n return rewards_tensor, accs_tensor, quantities_to_log\n else:\n return rewards_tensor.tolist(), accs, quantities_to_log\n\n def kl_divergence_row_by_row(self, p, q):\n kl_div = torch.sum(p * torch.log(p / q), dim=1)\n return kl_div\n\n def compute_default_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the probs of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_probs = _compute_probs(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_probs\n\n def compute_default_reward(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n ) -> torch.Tensor:\n \"\"\"\n This computes the rewards of the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n default_templates = self._format_prompts(\n source_texts, source_2_texts, [\"\" for _ in source_texts]\n )\n default_logits = self._get_logits(default_templates)\n default_reward, _, _, _, _, _ = _compute_reward(\n default_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n return default_reward\n\n def compute_kl(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_probs: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n input_rewards: Dict[str, List[float]] = defaultdict(list)\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_probs = _compute_probs(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n kl = self.kl_divergence_row_by_row(prompt_probs, default_probs)\n kl = torch.sum(kl)\n rewards.append(kl)\n kl_tensor = torch.stack(rewards)\n return kl_tensor\n\n def compute_reward_diff(\n self,\n source_texts: List[str],\n source_2_texts: List[str],\n class_labels: List[int],\n output_tokens: Union[List[List[str]], List[str], str],\n to_tensor: bool,\n default_rewards: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n This computes the kl-divergence of the current prompt to the naive prompt (instruction).\n source_texts: a list of string. Usually samples from the validation set\n class_labels: a list of integers. Usually the labels of the validation set\n prompts:\n Either List[List[str]]: List of tokens. The length of the list should be the same as the number of source_texts.\n OR List[str]: List of (decoded) prompts.\n OR: str. A single prompt\n \"\"\"\n # Process prompts and verbalizer indices\n if isinstance(output_tokens, list):\n if isinstance(output_tokens[0], list):\n prompt_tokens = output_tokens\n prompt_strings = self._convert_tokens_to_string(prompt_tokens)\n elif isinstance(output_tokens[0], str):\n prompt_strings = output_tokens\n elif isinstance(output_tokens, str):\n prompt_strings = [output_tokens] # Single prompt string\n\n rewards: List[torch.Tensor] = []\n for i, prompt in enumerate(prompt_strings):\n # Compute LM logits\n current_prompts = [prompt for _ in source_texts]\n formatted_templates = self._format_prompts(\n source_texts, source_2_texts, current_prompts\n )\n all_logits = self._get_logits(formatted_templates)\n prompt_rewards, _, _, _, _, _ = _compute_reward(\n all_logits,\n target=class_labels,\n reward_type=self.reward_type,\n verbalizer_ids=self.verbalizer_ids,\n correct_coeff=self.correct_coeff,\n incorrect_coeff=self.incorrect_coeff,\n )\n reward_diff = prompt_rewards - default_rewards\n reward_diff = torch.sum(reward_diff)\n rewards.append(reward_diff)\n reward_diff_tensor = torch.stack(rewards)\n return reward_diff_tensor\n\n # Adapted from\n # https://huggingface.co/docs/transformers/v4.21.1/en/task_summary#masked-language-modeling\n def _get_mask_token_index(self, input_ids: torch.Tensor) -> np.ndarray:\n mask_token_index = torch.where(input_ids == self._tokenizer.mask_token_id)[1]\n return mask_token_index\n\n def ensure_exactly_one_mask_token(\n self, model_inputs: Dict[str, torch.Tensor]\n ) -> None:\n for input_ids in model_inputs[\"input_ids\"]:\n masked_index = self._get_mask_token_index(input_ids)\n numel = np.prod(masked_index.shape)\n assert numel == 1\n\n @torch.no_grad()\n def _get_logits(self, texts: List[str]) -> torch.Tensor:\n # for MLM, add mask token\n batch_size = len(texts)\n encoded_inputs = self._tokenizer(\n texts,\n padding=\"longest\",\n truncation=True,\n return_tensors=\"pt\",\n add_special_tokens=True,\n )\n decoder_input_ids = (\n torch.ones((batch_size, 1)) * torch.tensor(self._tokenizer.pad_token_id)\n ).int()\n if self.is_mask_lm:\n # self.ensure_exactly_one_mask_token(encoded_inputs) TODO\n token_logits = self._generator(**encoded_inputs.to(self.device)).logits\n mask_token_indices = self._get_mask_token_index(encoded_inputs[\"input_ids\"])\n out_logits = token_logits[range(batch_size), mask_token_indices, :]\n return out_logits\n else:\n token_logits = self._generator(\n input_ids=encoded_inputs[\"input_ids\"].to(self.device),\n decoder_input_ids=decoder_input_ids.to(self.device),\n ).logits\n token_logits = token_logits[:, 0, :]\n return token_logits\n\n def _convert_tokens_to_string(self, tokens: List[List[str]]) -> List[str]:\n return [self._tokenizer.convert_tokens_to_string(s) for s in tokens]\n\n def _format_prompts(\n self,\n source_strs: List[str],\n source_2_strs: List[str],\n prompt_strs: List[str],\n ) -> List[str]:\n return [\n self.template.format(sentence_1=s_1, sentence_2=s_2, prompt=p)\n for s_1, s_2, p in zip(source_strs, source_2_strs, prompt_strs)\n ]" }, { "identifier": "PromptedClassificationDataset", "path": "utils/fsc_datasets.py", "snippet": "class PromptedClassificationDataset:\n def __init__(self, args):\n self.args = args\n self.glue_list = ['sst2', 'rte', 'mrpc', 'qqp', 'mnli', 'qnli']\n self.superglue_list = ['cb', 'copa', 'boolq', 'wic', 'wsc']\n self.nli_3_list = ['mnli', 'xnli', 'anli', 'cb', 'snli']\n if 'xnli' in args['dataset_name']:\n split = self.args['dataset_name'].split('_')[1]\n self.dataset = datasets.load_dataset('xnli', split)\n elif args['dataset_name'] in self.glue_list:\n self.dataset = datasets.load_dataset('glue', args['dataset_name'])\n elif 'anli' in args['dataset_name']:\n self.dataset = datasets.load_dataset('anli')\n elif args['dataset_name'] in self.superglue_list:\n self.dataset = datasets.load_dataset('super_glue', args['dataset_name'])\n elif 'rl' in args['dataset_name']:\n pass\n else:\n self.dataset = datasets.load_dataset(args['dataset_name'])\n def get_few_shot_dataset(self, shots: int) -> tuple:\n \"\"\"\n Retrieves a few-shot dataset by selecting a specified number of instances per class from the given dataset.\n \n Args:\n dataset (dict): A dictionary containing the dataset split into \"train\", \"validation\", and \"test\" subsets.\n shots (int): The number of instances to select per class for the few-shot dataset.\n \n Returns:\n tuple: The few-shot training dataset, the original validation dataset, and the original test dataset.\n \"\"\"\n \n if self.args['dataset_name'] == 'mnli':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation_matched']\n test_dataset = self.dataset['test_matched']\n elif self.args['dataset_name'] == 'yelp_polarity' or self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'SetFit/CR' or self.args['dataset_name'] == 'yelp_review_full':\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['train']\n test_dataset = self.dataset['test']\n elif 'rl' in self.args['dataset_name']:\n train_dataset = get_rl_data('train', self.args['dataset_name'], self.args['seed'])\n val_dataset = get_rl_data('dev', self.args['dataset_name'], self.args['seed'])\n test_dataset = get_rl_data('test', self.args['dataset_name'], self.args['seed'])\n train_dataset = [x for x in train_dataset]\n val_dataset = [x for x in val_dataset]\n return train_dataset, val_dataset, test_dataset\n elif self.args['dataset_name'] == 'snli':\n train_dataset = [x for x in self.dataset['train'] if x['label'] != -1]\n val_dataset = [x for x in self.dataset['validation'] if x['label'] != -1]\n test_dataset = [x for x in self.dataset['test'] if x['label'] != -1]\n else:\n train_dataset = self.dataset['train']\n val_dataset = self.dataset['validation']\n test_dataset = self.dataset['test']\n\n train_0 = [x for x in train_dataset if x['label'] == 0][:shots]\n train_1 = [x for x in train_dataset if x['label'] == 1][:shots]\n train_2 = [x for x in train_dataset if x['label'] == 2][:shots]\n train_3 = [x for x in train_dataset if x['label'] == 3][:shots]\n train_4 = [x for x in train_dataset if x['label'] == 4][:shots]\n train_dataset = train_0 + train_1 + train_2 + train_3 + train_4\n if self.args['dataset_name'] in self.glue_list or self.args['dataset_name'] in self.superglue_list:\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n new_val_dataset = val_0 + val_1 + val_2\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n elif self.args['dataset_name'] == 'ag_news' or self.args['dataset_name'] == 'yele_review_full':\n val_0 = [x for x in train_dataset if x['label'] == 0][-shots:]\n val_1 = [x for x in train_dataset if x['label'] == 1][-shots:]\n val_2 = [x for x in train_dataset if x['label'] == 2][-shots:]\n val_3 = [x for x in train_dataset if x['label'] == 3][-shots:]\n val_4 = [x for x in train_dataset if x['label'] == 4][-shots:]\n new_val_dataset = val_0 + val_1 + val_2 + val_3 + val_4\n test_dataset = val_dataset\n print('train_dataset', train_dataset)\n return train_dataset, new_val_dataset, test_dataset\n \n val_0 = [x for x in val_dataset if x['label'] == 0][:shots]\n val_1 = [x for x in val_dataset if x['label'] == 1][:shots]\n val_2 = [x for x in val_dataset if x['label'] == 2][:shots]\n val_dataset = val_0 + val_1 + val_2\n print('train_dataset', train_dataset)\n return train_dataset, val_dataset, test_dataset\n\n def get_verbalizer(self) -> list:\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n verbalizer_predefined = ['yes', 'maybe', 'no']\n elif self.args['dataset_name'] == 'sst2' or self.args['dataset_name'] == 'yelp_polarity':\n verbalizer_predefined = ['negative', 'positive']\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'qnli':\n verbalizer_predefined = ['yes', 'no']\n elif self.args['dataset_name'] == 'mrpc' or self.args['dataset_name'] == 'qqp':\n verbalizer_predefined = ['no', 'yes']\n elif self.args['dataset_name'] == 'boolq':\n verbalizer_predefined = ['no', 'yes']\n elif 'indonlp/NusaX-senti' in self.args['dataset_name']:\n verbalizer_predefined = ['negative', 'neutral', 'positive']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Technology']\n\n special_space = '▁'\n binary_list = ['SetFit/sst2', 'yelp_polarity', 'SetFit/CR', 'rotten_tomatoes']\n rl_binary_list = ['rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-yelp-2']\n if 'bert' in self.args['model_name']:\n special_space = 'Ġ'\n if self.args['dataset_name'] in binary_list:\n verbalizer_predefined = ['terrible', 'great']\n elif self.args['dataset_name'] == 'ag_news':\n verbalizer_predefined = ['World', 'Sports', 'Business', 'Tech']\n elif self.args['dataset_name'] == 'SetFit/sst5' or self.args['dataset_name'] == 'yelp_review_full':\n verbalizer_predefined = ['terrible', 'bad', 'okay', 'good', 'great']\n elif self.args['dataset_name'] in rl_binary_list:\n verbalizer_predefined = ['terrible', 'great']\n\n verbalizer_predefined = [special_space + v for v in verbalizer_predefined]\n return verbalizer_predefined\n \n def get_data(self, data) -> tuple:\n text_label_list = ['yelp_polarity', 'ag_news', 'SetFit/sst5', 'SetFit/CR', 'rotten_tomatoes', \"SetFit/sst2\", 'yelp_review_full']\n rl_list = ['rl-agnews', 'rl-cr', 'rl-mr', 'rl-sst-2', \n 'rl-sst-5', 'rl-yelp-2', 'rl-yelp-5']\n if 'xnli' in self.args['dataset_name'] or self.args['dataset_name'] == 'mnli' or 'anli' in self.args['dataset_name'] or 'americas_nli' in self.args['dataset_name'] or self.args['dataset_name'] == 'snli':\n return [d[\"premise\"] for d in data], [d[\"hypothesis\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'sst2':\n return [d[\"sentence\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'rte' or self.args['dataset_name'] == 'mrpc':\n return [d[\"sentence1\"] for d in data], [d[\"sentence2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qnli':\n return [d[\"question\"] for d in data], [d[\"sentence\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'qqp':\n return [d[\"question1\"] for d in data], [d[\"question2\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] == 'boolq':\n return [d[\"question\"] for d in data], [d[\"passage\"] for d in data], [d[\"label\"] for d in data]\n elif 'indonlp/NusaX-senti' in self.args['dataset_name'] or self.args['dataset_name'] in text_label_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]\n elif self.args['dataset_name'] in rl_list:\n return [d[\"text\"] for d in data], [d[\"text\"] for d in data], [d[\"label\"] for d in data]" }, { "identifier": "GeneticAlgorithmTrainer", "path": "algs/genetics.py", "snippet": "class GeneticAlgorithmTrainer(BaseTrainer):\n def __init__(\n self,\n pop_size: int,\n mutate_size: int,\n crossover_size: int,\n epochs: int,\n mutate_frac: float,\n str_len: int,\n stages: int,\n n_classes: int,\n eval_batch_size: int,\n genetics: Genetics,\n obj_func: PromptedClassificationReward,\n prompt_dataset: PromptedClassificationDataset,\n logger: Any,\n use_bn_calibrator: bool,\n ):\n super().__init__(\n obj_func=obj_func,\n prompt_dataset=prompt_dataset,\n logger=logger,\n use_bn_calibrator=use_bn_calibrator,\n )\n self.pop_size = pop_size\n self.mutate_size = mutate_size\n self.crossover_size = crossover_size\n self.epochs = epochs\n self.mutate_frac = mutate_frac\n self.str_len = str_len\n self.stages = stages\n self.n_classes = n_classes\n self.genetics = genetics\n self.epoch_per_extend = 3\n self.extend_size = 128\n self.eval_batch_size = eval_batch_size\n\n def train(self, train_data):\n premise_texts, hypothesis_texts, class_labels = self.prompt_dataset.get_data(\n train_data\n )\n epoch_per_stage = self.epochs // self.stages\n start_str = \"\"\n best_str_list = []\n\n for _ in range(self.stages):\n pop = [\n self.genetics.random_string(self.str_len) for _ in range(self.pop_size)\n ]\n if self.logger is not None:\n self.logger.info(pop)\n old_reward = 0\n epoch_counter = 0\n for evo_epoch in range(epoch_per_stage):\n if self.str_len == 1:\n pop_ = [start_str + \" \" + p for p in pop]\n else:\n pop_ = [start_str + p for p in pop]\n reward = self.obj_func.forward(\n premise_texts,\n hypothesis_texts,\n class_labels,\n pop_,\n True,\n \"infer\",\n verbose=False,\n )[0]\n if self.logger is not None:\n self.logger.info(\n f\"Epoch = {evo_epoch}. Max reward = {reward.max()}. Best prompt = {pop_[reward.argmax()]}\"\n )\n max_reward = reward.max()\n if max_reward > old_reward:\n old_reward = max_reward\n epoch_counter = 0\n else:\n epoch_counter += 1\n\n sorted_idx = reward.argsort(descending=True)[\n : max(1, int(reward.shape[0] * self.mutate_frac))\n ]\n pop = [pop[i] for i in sorted_idx]\n mutate_cfgs, crossover_cfgs = [], []\n extend_cfgs = []\n for _ in range(self.mutate_size):\n old_cfg = np.random.choice(pop)\n cfg = self.genetics.mutate(old_cfg)\n mutate_cfgs.append(cfg)\n\n for _ in range(self.crossover_size):\n cfg1 = np.random.choice(pop)\n cfg2 = np.random.choice(pop)\n cfg = self.genetics.crossover(cfg1, cfg2)\n crossover_cfgs.append(cfg)\n\n pop += mutate_cfgs\n pop += crossover_cfgs\n\n if self.logger is not None:\n self.logger.info(\n f\"Epoch = {evo_epoch}. Population length = {len(pop)}\"\n )\n\n if self.str_len > 1:\n if pop[reward.argmax()] not in best_str_list:\n best_str_list.append(pop[reward.argmax()])\n else:\n if pop_[reward.argmax()] not in best_str_list:\n best_str_list.append(pop_[reward.argmax()])\n # if we do step by steo do the pop_\n if self.str_len == 1:\n pop_ = [start_str + \" \" + p for p in pop]\n else:\n pop_ = [start_str + p for p in pop]\n start_str = pop_[reward.argmax()]\n\n return best_str_list\n\n def random_train(self, train_data):\n premise_texts, hypothesis_texts, class_labels = self.prompt_dataset.get_data(\n train_data\n )\n start_str = \"\"\n best_str_list = []\n pop = [\n self.genetics.random_string(self.str_len)\n for _ in range(self.pop_size * self.epochs)\n ]\n # logger.info(pop)\n pop_ = [start_str + p for p in pop]\n reward = self.obj_func.forward(\n premise_texts,\n hypothesis_texts,\n class_labels,\n pop_,\n True,\n \"infer\",\n verbose=False,\n )[0]\n\n if self.logger is not None:\n self.logger.info(\n f\"Max reward = {reward.max()}. Best prompt = {pop_[reward.argmax()]}\"\n )\n if pop[reward.argmax()] not in best_str_list:\n best_str_list.append(pop[reward.argmax()])\n return best_str_list" }, { "identifier": "Genetics", "path": "algs/genetics.py", "snippet": "class Genetics:\n def __init__(self, crossover_tokenizer, vocab_id):\n self.crossover_tokenizer = crossover_tokenizer\n self.vocab_id = vocab_id\n\n def mutate(self, x, prob=0.1):\n \"\"\"\n Mutates the input string by replacing tokens with a certain probability.\n\n Args:\n x (str): The input string.\n prob (float, optional): The probability of replacing each token. Defaults to 0.1.\n\n Returns:\n str: The mutated string.\n \"\"\"\n x_list = self.crossover_tokenizer.encode(x)\n\n def pick_another(x_, candidates):\n return (\n x_\n if len(candidates) == 1\n else random.choice([v for v in candidates if v != x_])\n )\n\n for i, element in enumerate(x_list):\n if i == 0 or i == len(x_list) - 1:\n continue\n if random.random() < prob:\n x_list[i] = pick_another(element, self.vocab_id)\n\n out = self.crossover_tokenizer.decode(x_list, skip_special_tokens=True)\n return out\n\n def crossover(self, x1, x2):\n \"\"\"\n Performs crossover between two input strings.\n\n Args:\n x1 (str): The first input string.\n x2 (str): The second input string.\n\n Returns:\n str: The crossover result.\n \"\"\"\n\n def _crossover_helper(v1, v2):\n return v1 if random.random() < 0.5 else v2\n\n def _inbalance_helper(v1, v2):\n n_tokens = min(len(v1), len(v2))\n max_n = max(len(v1), len(v2))\n out_token = []\n for i in range(n_tokens):\n out_token.append(v1[i] if random.random() < 0.5 else v2[i])\n for i in range(n_tokens, max_n):\n out_token.append(v1[i] if len(v1) > n_tokens else v2[i])\n return out_token\n\n x1_tokens = self.crossover_tokenizer.encode(x1)\n x2_tokens = self.crossover_tokenizer.encode(x2)\n x = _crossover_helper(x1_tokens, x2_tokens)\n ret = self.crossover_tokenizer.decode(x, skip_special_tokens=True)\n return ret\n\n def random_string(self, length=5):\n \"\"\"\n Generates a random string of a specified length.\n\n Args:\n length (int, optional): The length of the random string. Defaults to 5.\n\n Returns:\n str: The random string.\n \"\"\"\n choices = self.vocab_id\n out = random.choices(choices, k=length)\n out = self.crossover_tokenizer.decode(out, skip_special_tokens=True)\n return out\n\n def random_extend_pop(self, pop: list, n: int) -> list:\n \"\"\"\n Extends the population with random strings.\n\n Args:\n pop (list): The population.\n n (int): The number of random strings to generate.\n\n Returns:\n list: The extended population.\n \"\"\"\n pop = [p + self.random_string(n) for p in pop]\n return pop" }, { "identifier": "ParticleSwarmOptimizer", "path": "algs/particle_swarm.py", "snippet": "class ParticleSwarmOptimizer(BaseTrainer):\n def __init__(\n self,\n pop_size: int,\n epochs: int,\n mutate_frac: float,\n str_len: int,\n n_classes: int,\n eval_batch_size: int,\n obj_func: PromptedClassificationReward,\n prompt_dataset: PromptedClassificationDataset,\n logger: Any,\n use_bn_calibrator: bool,\n vocab_id,\n crossover_tokenizer,\n ):\n super().__init__(\n obj_func=obj_func,\n prompt_dataset=prompt_dataset,\n logger=logger,\n use_bn_calibrator=use_bn_calibrator,\n )\n self.crossover_tokenizer = crossover_tokenizer\n self.vocab_id = vocab_id\n self.pop_size = pop_size\n self.epochs = epochs\n self.mutate_frac = mutate_frac\n self.str_len = str_len\n self.n_classes = n_classes\n self.eval_batch_size = eval_batch_size\n\n def do_replace(self, x_cur, pos, new_word):\n x_new = x_cur.copy()\n x_new[pos] = new_word\n return x_new\n\n def predict_batch(\n self,\n sentences,\n ):\n return np.array(\n [\n self.predict(\n s,\n )\n for s in sentences\n ]\n )\n\n def predict(\n self,\n sentence,\n ):\n # Alia for reward computation -- note that we expect\n # a list of int in terms of vocab_id for sentence argument here.\n sentence_str = self.crossover_tokenizer.decode(\n sentence, skip_special_tokens=True\n )\n tem = (\n self.obj_func.forward(\n self.premise_texts,\n self.hypothesis_texts,\n self.class_labels,\n [sentence_str],\n True,\n \"infer\",\n verbose=False,\n )[0]\n .detach()\n .cpu()\n .item()\n )\n\n return tem\n\n def select_best_replacement(self, pos, x_cur, replace_list):\n \"\"\"Select the most effective replacement to word at pos (pos)\n in (x_cur) between the words in replace_list\"\"\"\n new_x_list = [\n self.do_replace(x_cur, pos, w) if w != 0 else x_cur for w in replace_list\n ]\n # Randomly select some rather than enumerate, which is very slow\n new_x_list_str = [\n self.crossover_tokenizer.decode(s, skip_special_tokens=True)\n for s in new_x_list\n ]\n x_scores = (\n self.obj_func.forward(\n self.premise_texts,\n self.hypothesis_texts,\n self.class_labels,\n new_x_list_str,\n True,\n \"infer\",\n verbose=False,\n )[0]\n .detach()\n .cpu()\n .numpy()\n )\n # new_x_preds = self.predict_batch(new_x_list)\n # x_scores = new_x_preds # [:, target]\n orig_score = self.predict(x_cur) # [target]\n\n new_x_scores = x_scores - orig_score\n # Eliminate not that clsoe words\n\n if np.max(new_x_scores) > 0:\n best_id = np.argsort(new_x_scores)[-1]\n return [x_scores[best_id], new_x_list[best_id]]\n return [orig_score, x_cur]\n\n def perturb(self, x_cur, neigbhours, w_select_probs):\n # Pick a word that is not modified and is not UNK\n x_len = w_select_probs.shape[0]\n rand_idx = np.random.choice(x_len, 1, p=w_select_probs)[0]\n # while x_cur[rand_idx] != x_orig[rand_idx] and np.sum(x_orig != x_cur) < np.sum(\n # np.sign(w_select_probs)\n # ):\n # rand_idx = np.random.choice(x_len, 1, p=w_select_probs)[0]\n replace_list = neigbhours[rand_idx]\n x_cur[rand_idx] = np.random.choice(replace_list)\n score = self.predict(x_cur)\n return [score, x_cur]\n # return self.select_best_replacement(rand_idx, x_cur, replace_list)\n\n def turn(self, x1, x2, prob, x_len):\n x_new = copy.deepcopy(x2)\n for i in range(x_len):\n if np.random.uniform() < prob[i]:\n x_new[i] = x1[i]\n return x_new\n\n def equal(self, a, b):\n return -3 if a == b else 3\n\n def sigmod(self, n):\n return 1 / (1 + np.exp(-n))\n\n def train(self, train_data):\n (\n self.premise_texts,\n self.hypothesis_texts,\n self.class_labels,\n ) = self.prompt_dataset.get_data(train_data)\n\n neigbhours_list = [self.vocab_id for _ in range(self.str_len)]\n neighbours_len = [len(x) for x in neigbhours_list]\n x_len = self.str_len\n #\n w_select_probs = []\n for pos in range(x_len):\n if neighbours_len[pos] == 0:\n w_select_probs.append(0)\n else:\n w_select_probs.append(min(neighbours_len[pos], 10))\n w_select_probs = w_select_probs / np.sum(w_select_probs)\n\n if np.sum(neighbours_len) == 0:\n return None\n\n # Generate random population\n pop = [\n np.random.choice(self.vocab_id, self.str_len) for _ in range(self.pop_size)\n ]\n pop_scores = self.predict_batch(\n pop,\n )\n\n part_elites = copy.deepcopy(pop)\n part_elites_scores = pop_scores\n all_elite_score = np.max(pop_scores)\n pop_ranks = np.argsort(pop_scores)\n top_attack = pop_ranks[-1]\n all_elite = pop[top_attack]\n\n Omega_1 = 0.8\n Omega_2 = 0.2\n C1_origin = 0.8\n C2_origin = 0.2\n V = [np.random.uniform(-3, 3) for rrr in range(self.pop_size)]\n V_P = [[V[t] for rrr in range(x_len)] for t in range(self.pop_size)]\n\n for i in range(self.epochs):\n Omega = (Omega_1 - Omega_2) * (self.epochs - i) / self.epochs + Omega_2\n C1 = C1_origin - i / self.epochs * (C1_origin - C2_origin)\n C2 = C2_origin + i / self.epochs * (C1_origin - C2_origin)\n\n for id in range(self.pop_size):\n for dim in range(x_len):\n V_P[id][dim] = Omega * V_P[id][dim] + (1 - Omega) * (\n self.equal(pop[id][dim], part_elites[id][dim])\n + self.equal(pop[id][dim], all_elite[dim])\n )\n turn_prob = [self.sigmod(V_P[id][d]) for d in range(x_len)]\n P1 = C1\n P2 = C2\n\n if np.random.uniform() < P1:\n pop[id] = self.turn(part_elites[id], pop[id], turn_prob, x_len)\n if np.random.uniform() < P2:\n pop[id] = self.turn(all_elite, pop[id], turn_prob, x_len)\n\n pop_scores = []\n pop_scores_all = []\n for a in pop:\n pt = self.predict(a)\n\n pop_scores.append(pt)\n pop_scores_all.append(pt)\n pop_ranks = np.argsort(pop_scores)\n top_attack = pop_ranks[-1]\n\n if self.logger is not None:\n self.logger.info(\n f\"{i} -- {pop_scores[top_attack]}。 Best = {self.crossover_tokenizer.decode(all_elite, add_special_tokens=False)}\"\n )\n\n new_pop = []\n new_pop_scores = []\n for id in range(len(pop)):\n x = pop[id]\n if np.random.uniform() < self.mutate_frac:\n tem = self.perturb(x, neigbhours_list, w_select_probs)\n # if tem is None:\n # return None\n # # if tem[0] == 1:\n # # return tem[1]\n # else:\n new_pop_scores.append(tem[0])\n new_pop.append(tem[1])\n else:\n new_pop_scores.append(pop_scores[id])\n new_pop.append(x)\n pop = new_pop\n\n pop_scores = new_pop_scores\n pop_ranks = np.argsort(pop_scores)\n top_attack = pop_ranks[-1]\n for k in range(self.pop_size):\n if pop_scores[k] > part_elites_scores[k]:\n part_elites[k] = pop[k]\n part_elites_scores[k] = pop_scores[k]\n elite = pop[top_attack]\n if np.max(pop_scores) > all_elite_score:\n all_elite = elite\n all_elite_score = np.max(pop_scores)\n\n all_elite_str = self.crossover_tokenizer.decode(\n all_elite, add_special_tokens=False\n )\n\n return [all_elite_str]" }, { "identifier": "GreedyTrainer", "path": "algs/greedy.py", "snippet": "class GreedyTrainer(BaseTrainer):\n def __init__(\n self,\n obj_func: PromptedClassificationReward,\n prompt_dataset: PromptedClassificationDataset,\n vocab_id,\n crossover_tokenizer,\n str_len: int,\n n_classes: int,\n eval_batch_size: int,\n logger,\n use_bn_calibrator: bool = False,\n n_samples_bn_calibrator: int = 128,\n ):\n super().__init__(\n obj_func, prompt_dataset, logger, use_bn_calibrator, n_samples_bn_calibrator\n )\n self.vocab_id = vocab_id\n self.crossover_tokenizer = crossover_tokenizer\n self.str_len = str_len\n self.n_classes = n_classes\n self.eval_batch_size = eval_batch_size\n\n def train(self, train_data):\n premise_texts, hypothesis_texts, class_labels = self.prompt_dataset.get_data(\n train_data\n )\n prompt = \"\"\n candidate_strs = [\n self.crossover_tokenizer.decode([d], skip_special_tokens=True)\n for d in self.vocab_id\n ]\n for _ in range(self.str_len):\n pop = [prompt + candidate_str for candidate_str in candidate_strs]\n # Evaluate the reward of all pop\n reward = (\n self.obj_func.forward(\n premise_texts,\n hypothesis_texts,\n class_labels,\n pop,\n True,\n \"infer\",\n verbose=False,\n )[0]\n .detach()\n .cpu()\n .numpy()\n )\n best_reward_idx = np.argmax(reward)\n if not prompt:\n prompt = candidate_strs[best_reward_idx]\n else:\n prompt += candidate_strs[best_reward_idx]\n print(f\"Current reward = {reward[best_reward_idx]}. Best prompt = {prompt}\")\n return [prompt]" } ]
import random import numpy as np import json import argparse import os import torch import logging from tqdm import tqdm from transformers import AutoTokenizer, set_seed from rewards.text_classification_reward import PromptedClassificationReward from utils.fsc_datasets import PromptedClassificationDataset from algs.genetics import GeneticAlgorithmTrainer, Genetics from algs.particle_swarm import ParticleSwarmOptimizer from algs.greedy import GreedyTrainer
14,471
vocab_id.append(v) logger.info(len(vocab_key)) else: # random select 10% of the vocab vocab, vocab_key, vocab_id = random_pruning(args, vocab, args["percentile"]) logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def random_pruning(args, vocab: dict, percent: int = 99): vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) length = int(len(vocab_key) * (100 - percent) / 100) pruned_index = random.sample(list(np.arange(len(vocab_key))), length) vocab_key = [vocab_key[i] for i in pruned_index] vocab_id = [vocab_id[i] for i in pruned_index] vocab = {vocab_key[i]: vocab_id[i] for i in range(len(vocab_key))} logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def main(args): print(args) set_seed(args["seed"]) revocab_flag = args["reprune_vocab"] shots = args["num_shots"] batch_size = args["train_batch_size"] args["is_mask_lm"] = False special_space = "▁" if "bert" in args["model_name"]: args["is_mask_lm"] = True special_space = "Ġ" logging.info("......Loading dataset......") prompt_dataset = PromptedClassificationDataset(args) verbalizer_predefined = prompt_dataset.get_verbalizer() args["verbalizers"] = verbalizer_predefined logging.info("verbalizers: %s", verbalizer_predefined) args["num_labels"] = len(verbalizer_predefined) train_dataset, val_dataset, test_dataset = prompt_dataset.get_few_shot_dataset( shots ) logging.info("......truncating vocab......") crossover_tokenizer = AutoTokenizer.from_pretrained(args["model_name"]) vocab = crossover_tokenizer.get_vocab() # preprocess the vocab special_tokens = [ crossover_tokenizer.unk_token, crossover_tokenizer.pad_token, crossover_tokenizer.sep_token, crossover_tokenizer.cls_token, ] vocab = { word: index for word, index in vocab.items() if word not in special_tokens and special_space in word } for v in verbalizer_predefined: if v not in vocab: print("verbalizer not in vocab: ", v) assert v in vocab logging.info("the vocab length before action set pruning: %s", len(vocab)) dataset = train_dataset print(dataset) batch_size = min(batch_size, len(dataset)) idx = np.random.choice(len(dataset), batch_size, replace=False) data = [dataset[i] for i in idx] logging.info(f"Length of dataset = {len(data)}") obj_func = PromptedClassificationReward( args=args, reward_type=args["reward_type"], task_lm=args["model_name"], is_mask_lm=args["is_mask_lm"], num_classes=args["num_labels"], verbalizers=args["verbalizers"], use_bn_calibration=args["bn_calibrate"], ) if revocab_flag: # pruning efficiency section # random select 10% of the vocab if args["vocab_path"] != "none": # this is to do kmeans clustering and pruning vocab, _, vocab_id = load_vocab(args) kl_dict, collect_kl_np = find_kl_dict( args, data, vocab, obj_func, prompt_dataset ) else: if not args["run_manual"]: kl_dict, collect_kl_np = load_kl_dict(args) else: kl_dict = {} collect_kl_np = [] if not args["run_manual"]: vocab, _, vocab_id = action_set_pruning(args, kl_dict, collect_kl_np, vocab) else: vocab_id = [v for k, v in vocab.items()] if args["method"] == "genetic": genetics = Genetics(crossover_tokenizer, vocab_id) trainer = GeneticAlgorithmTrainer( pop_size=128, mutate_size=64, crossover_size=64, mutate_frac=0.1, str_len=5, epochs=30, stages=1, n_classes=args["num_labels"], genetics=genetics, eval_batch_size=args["eval_batch_size"], obj_func=obj_func, prompt_dataset=prompt_dataset, use_bn_calibrator=args["bn_calibrate"], logger=logger, ) elif args["method"] == "particle_swarm":
logging.basicConfig(level=logging.INFO) logger = logging.getLogger(__name__) logger.setLevel(logging.INFO) def remove_special_token(text: str, special_token: str) -> str: return text.replace(special_token, "") def find_kl_dict(args, data, vocab, obj_func, prompted_dataset): premise_texts, hypothesis_texts, class_labels = prompted_dataset.get_data(data) if args["prune_type"] == "kl": default_probs = obj_func.compute_default_kl( premise_texts, hypothesis_texts, class_labels, "", True ) else: default_probs = obj_func.compute_default_reward( premise_texts, hypothesis_texts, class_labels, "", True ) collect_kl = [] kl_dict = {} for v, k in tqdm(vocab.items()): if args["prune_type"] == "kl": kl = obj_func.compute_kl( premise_texts, hypothesis_texts, class_labels, v, True, default_probs ) else: kl = obj_func.compute_reward_diff( premise_texts, hypothesis_texts, class_labels, v, True, default_probs ) collect_kl.append(kl) kl_dict[v] = kl for k, v in kl_dict.items(): kl_dict[k] = float(v) with open(args["dict_path"], "w") as fp: json.dump(kl_dict, fp, indent=4, ensure_ascii=False) collect_kl_np = [] for tensor in collect_kl: collect_kl_np.append(tensor.cpu().numpy()) return kl_dict, collect_kl_np def load_kl_dict(args): # load the KL dict from json file with open(args["dict_path"], "r") as fp: kl_dict = json.load(fp) collect_kl_np = [] for k, v in kl_dict.items(): collect_kl_np.append(v) return kl_dict, collect_kl_np def load_vocab(args): with open(args["vocab_path"], "r") as fp: vocab = json.load(fp) vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) return vocab, vocab_key, vocab_id def action_set_pruning(args, kl_dict, collect_kl_np, vocab): if not args["random_prune"]: collect_kl_np = np.array(collect_kl_np) top_10_percent = np.percentile(collect_kl_np, args["percentile"]) # filter the vocab based on the top_10_percent_idx new_vocab = { word: vocab[word] for word, value in kl_dict.items() if value > top_10_percent } vocab = new_vocab vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) logger.info(len(vocab_key)) else: # random select 10% of the vocab vocab, vocab_key, vocab_id = random_pruning(args, vocab, args["percentile"]) logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def random_pruning(args, vocab: dict, percent: int = 99): vocab_key = [] vocab_id = [] for k, v in vocab.items(): vocab_key.append(k) vocab_id.append(v) length = int(len(vocab_key) * (100 - percent) / 100) pruned_index = random.sample(list(np.arange(len(vocab_key))), length) vocab_key = [vocab_key[i] for i in pruned_index] vocab_id = [vocab_id[i] for i in pruned_index] vocab = {vocab_key[i]: vocab_id[i] for i in range(len(vocab_key))} logger.info(len(vocab_key)) return vocab, vocab_key, vocab_id def main(args): print(args) set_seed(args["seed"]) revocab_flag = args["reprune_vocab"] shots = args["num_shots"] batch_size = args["train_batch_size"] args["is_mask_lm"] = False special_space = "▁" if "bert" in args["model_name"]: args["is_mask_lm"] = True special_space = "Ġ" logging.info("......Loading dataset......") prompt_dataset = PromptedClassificationDataset(args) verbalizer_predefined = prompt_dataset.get_verbalizer() args["verbalizers"] = verbalizer_predefined logging.info("verbalizers: %s", verbalizer_predefined) args["num_labels"] = len(verbalizer_predefined) train_dataset, val_dataset, test_dataset = prompt_dataset.get_few_shot_dataset( shots ) logging.info("......truncating vocab......") crossover_tokenizer = AutoTokenizer.from_pretrained(args["model_name"]) vocab = crossover_tokenizer.get_vocab() # preprocess the vocab special_tokens = [ crossover_tokenizer.unk_token, crossover_tokenizer.pad_token, crossover_tokenizer.sep_token, crossover_tokenizer.cls_token, ] vocab = { word: index for word, index in vocab.items() if word not in special_tokens and special_space in word } for v in verbalizer_predefined: if v not in vocab: print("verbalizer not in vocab: ", v) assert v in vocab logging.info("the vocab length before action set pruning: %s", len(vocab)) dataset = train_dataset print(dataset) batch_size = min(batch_size, len(dataset)) idx = np.random.choice(len(dataset), batch_size, replace=False) data = [dataset[i] for i in idx] logging.info(f"Length of dataset = {len(data)}") obj_func = PromptedClassificationReward( args=args, reward_type=args["reward_type"], task_lm=args["model_name"], is_mask_lm=args["is_mask_lm"], num_classes=args["num_labels"], verbalizers=args["verbalizers"], use_bn_calibration=args["bn_calibrate"], ) if revocab_flag: # pruning efficiency section # random select 10% of the vocab if args["vocab_path"] != "none": # this is to do kmeans clustering and pruning vocab, _, vocab_id = load_vocab(args) kl_dict, collect_kl_np = find_kl_dict( args, data, vocab, obj_func, prompt_dataset ) else: if not args["run_manual"]: kl_dict, collect_kl_np = load_kl_dict(args) else: kl_dict = {} collect_kl_np = [] if not args["run_manual"]: vocab, _, vocab_id = action_set_pruning(args, kl_dict, collect_kl_np, vocab) else: vocab_id = [v for k, v in vocab.items()] if args["method"] == "genetic": genetics = Genetics(crossover_tokenizer, vocab_id) trainer = GeneticAlgorithmTrainer( pop_size=128, mutate_size=64, crossover_size=64, mutate_frac=0.1, str_len=5, epochs=30, stages=1, n_classes=args["num_labels"], genetics=genetics, eval_batch_size=args["eval_batch_size"], obj_func=obj_func, prompt_dataset=prompt_dataset, use_bn_calibrator=args["bn_calibrate"], logger=logger, ) elif args["method"] == "particle_swarm":
trainer = ParticleSwarmOptimizer(
4
2023-10-08 12:39:44+00:00
24k
clessig/atmorep
atmorep/core/trainer.py
[ { "identifier": "AtmoRep", "path": "atmorep/core/atmorep_model.py", "snippet": "class AtmoRep( torch.nn.Module) :\n\n def __init__(self, cf) :\n '''Constructor'''\n \n super( AtmoRep, self).__init__()\n\n self.cf = cf\n\n ###################################################\n def create( self, devices, load_pretrained=True) :\n '''Create network'''\n\n cf = self.cf\n self.devices = devices\n size_token_info = 6\n self.fields_coupling_idx = []\n\n self.fields_index = {}\n for ifield, field_info in enumerate(cf.fields) :\n self.fields_index[ field_info[0] ] = ifield \n \n # # embedding network for global/auxiliary token infos\n # TODO: only for backward compatibility, remove\n self.embed_token_info = torch.nn.Linear( cf.size_token_info, cf.size_token_info_net)\n torch.nn.init.constant_( self.embed_token_info.weight, 0.0)\n\n self.embeds_token_info = torch.nn.ModuleList()\n for ifield, field_info in enumerate( cf.fields) :\n \n self.embeds_token_info.append( torch.nn.Linear( cf.size_token_info, cf.size_token_info_net))\n \n if len(field_info[1]) > 4 and load_pretrained :\n # TODO: inconsistent with embeds_token_info -> version that can handle both\n # we could imply use the file name: embed_token_info vs embeds_token_info\n name = 'AtmoRep' + '_embed_token_info'\n mloaded = torch.load( get_model_filename( name, field_info[1][4][0], field_info[1][4][1]))\n self.embeds_token_info[-1].load_state_dict( mloaded)\n print( 'Loaded embed_token_info from id = {}.'.format( field_info[1][4][0] ) )\n else :\n # initalization\n torch.nn.init.constant_( self.embeds_token_info[-1].weight, 0.0)\n self.embeds_token_info[-1].bias.data.fill_(0.0)\n\n # embedding and encoder\n\n self.embeds = torch.nn.ModuleList()\n self.encoders = torch.nn.ModuleList()\n self.masks = torch.nn.ParameterList()\n\n for field_idx, field_info in enumerate(cf.fields) : \n\n # learnabl class token\n if cf.learnable_mask :\n mask = torch.nn.Parameter( 0.1 * torch.randn( np.prod( field_info[4]), requires_grad=True))\n self.masks.append( mask.to(devices[0]))\n else :\n self.masks.append( None)\n\n # encoder\n self.encoders.append( TransformerEncoder( cf, field_idx, True).create())\n # load pre-trained model if specified\n if len(field_info[1]) > 4 and load_pretrained :\n self.load_block( field_info, 'encoder', self.encoders[-1])\n self.embeds.append( self.encoders[-1].embed)\n\n # indices of coupled fields for efficient access in forward\n self.fields_coupling_idx.append( [field_idx])\n for field_coupled in field_info[1][2] : \n if 'axial' in cf.encoder_att_type :\n self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] )\n else :\n for _ in range(cf.coupling_num_heads_per_field) :\n self.fields_coupling_idx[field_idx].append( self.fields_index[field_coupled] )\n\n # decoder \n\n self.decoders = torch.nn.ModuleList()\n self.field_pred_idxs = []\n for field in cf.fields_prediction :\n\n for ifield, field_info in enumerate(cf.fields) : \n if field_info[0] == field[0] :\n self.field_pred_idxs.append( ifield)\n break\n\n self.decoders.append( TransformerDecoder( cf, field_info ) )\n # load pre-trained model if specified\n if len(field_info[1]) > 4 and load_pretrained :\n self.load_block( field_info, 'decoder', self.decoders[-1])\n\n # tail networks\n \n self.tails = torch.nn.ModuleList()\n for ifield, field in enumerate(cf.fields_prediction) :\n\n field_idx = self.field_pred_idxs[ifield]\n field_info = cf.fields[field_idx]\n self.tails.append( TailEnsemble( cf, field_info[1][1], np.prod(field_info[4]) ).create())\n # load pre-trained model if specified\n if len(field_info[1]) > 4 and load_pretrained:\n self.load_block( field_info, 'tail', self.tails[-1])\n\n # set devices\n\n for field_idx, field_info in enumerate(cf.fields) :\n # find determined device, use default if nothing specified\n device = self.devices[0]\n if len(field_info[1]) > 3 :\n assert field_info[1][3] < 4, 'Only single node model parallelism supported'\n assert field_info[1][3] < len(devices), 'Per field device id larger than max devices'\n device = self.devices[ field_info[1][3] ]\n # set device\n if self.masks[field_idx] != None :\n self.masks[field_idx].to(device)\n self.embeds[field_idx].to(device)\n self.encoders[field_idx].to(device)\n\n for field_idx, field in enumerate(cf.fields_prediction) :\n field_info = cf.fields[ self.field_pred_idxs[field_idx] ]\n device = self.devices[0]\n if len(field_info[1]) > 3 :\n device = self.devices[ field_info[1][3] ]\n self.decoders[field_idx].to(device)\n self.tails[field_idx].to(device)\n\n # embed_token_info on device[0] since it is shared by all fields, potentially sub-optimal\n self.embed_token_info.to(devices[0]) # TODO: only for backward compatibility, remove\n self.embeds_token_info.to(devices[0])\n\n self.checkpoint = identity\n if cf.grad_checkpointing :\n self.checkpoint = checkpoint_wrapper\n\n return self\n\n ###################################################\n def load_block( self, field_info, block_name, block ) :\n\n # name = self.__class__.__name__ + '_' + block_name + '_' + field_info[0]\n name = 'AtmoRep_' + block_name + '_' + field_info[0]\n\n b_loaded = torch.load( get_model_filename(name, field_info[1][4][0], field_info[1][4][1]))\n\n # in coupling mode, proj_out of attention heads needs separate treatment: only the pre-trained\n # part can be loaded\n keys_del = []\n for name, param in block.named_parameters():\n if 'proj_out' in name :\n for k in b_loaded.keys() :\n if name == k :\n if param.shape[0] != param.shape[1] : # non-square proj_out indicate deviation from pre-training\n with torch.no_grad() :\n # load pre-trained part\n param[ : , : b_loaded[k].shape[1] ] = b_loaded[k]\n # initalize remaining part to small random value\n param[ : , b_loaded[k].shape[1] : ] = 0.01 * torch.rand( param.shape[0],\n param.shape[1] - b_loaded[k].shape[1])\n keys_del += [ k ]\n for k in keys_del :\n del b_loaded[k]\n\n # use strict=False so that differing blocks, e.g. through coupling, are ignored\n mkeys, _ = block.load_state_dict( b_loaded, False)\n\n # missing keys = keys that are not pre-trained are initalized to small value\n [mkeys.remove(k) for k in keys_del] # remove proj_out keys so that they are not over-written\n [utils.init_weights_uniform( block.state_dict()[k], 0.01) for k in mkeys]\n\n print( 'Loaded {} for {} from id = {} (ignoring/missing {} elements).'.format( block_name,\n field_info[0], field_info[1][4][0], len(mkeys) ) )\n\n ###################################################\n @staticmethod\n def load( model_id, devices, cf = None, epoch = -2, load_pretrained=False) :\n '''Load network from checkpoint'''\n\n if not cf : \n cf = utils.Config()\n cf.load_json( model_id)\n\n model = AtmoRep( cf).create( devices, load_pretrained=False)\n mloaded = torch.load( utils.get_model_filename( model, model_id, epoch) )\n mkeys, _ = model.load_state_dict( mloaded, False )\n\n if len(mkeys) > 0 :\n print( f'Loaded AtmoRep: ignoring {len(mkeys)} elements: {mkeys}')\n\n # TODO: remove, only for backward \n if model.embeds_token_info[0].weight.abs().max() == 0. :\n model.embeds_token_info = torch.nn.ModuleList()\n\n return model\n \n ###################################################\n def save( self, epoch = -2) :\n '''Save network '''\n\n # save entire network\n torch.save( self.state_dict(), utils.get_model_filename( self, self.cf.wandb_id, epoch) )\n\n # save parts also separately\n\n # name = self.__class__.__name__ + '_embed_token_info'\n # torch.save( self.embed_token_info.state_dict(),\n # utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n name = self.__class__.__name__ + '_embeds_token_info'\n torch.save( self.embeds_token_info.state_dict(),\n utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n\n for ifield, enc in enumerate(self.encoders) :\n name = self.__class__.__name__ + '_encoder_' + self.cf.fields[ifield][0]\n torch.save( enc.state_dict(), utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n\n for ifield, dec in enumerate(self.decoders) :\n name = self.__class__.__name__ + '_decoder_' + self.cf.fields_prediction[ifield][0]\n torch.save( dec.state_dict(), utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n\n for ifield, tail in enumerate(self.tails) :\n name = self.__class__.__name__ + '_tail_' + self.cf.fields_prediction[ifield][0]\n torch.save( tail.state_dict(), utils.get_model_filename( name, self.cf.wandb_id, epoch) )\n\n ###################################################\n def forward( self, xin) :\n '''Evaluate network'''\n\n # embedding\n cf = self.cf\n fields_embed = self.get_fields_embed(xin)\n \n # attention maps (if requested)\n atts = [ [] for _ in cf.fields ]\n\n # encoder\n embeds_layers = [[] for i in self.field_pred_idxs]\n for ib in range(self.cf.encoder_num_layers) :\n fields_embed, att = self.forward_encoder_block( ib, fields_embed) \n [embeds_layers[idx].append( fields_embed[i]) for idx,i in enumerate(self.field_pred_idxs)]\n [atts[i].append( att[i]) for i,_ in enumerate(cf.fields) ]\n \n # encoder-decoder coupling / token transformations\n (decoders_in, embeds_layers) = self.encoder_to_decoder( embeds_layers)\n\n preds = []\n for idx,i in enumerate(self.field_pred_idxs) :\n \n # decoder\n token_seq_embed, att = self.decoders[idx]( (decoders_in[idx], embeds_layers[idx]) )\n \n # tail net\n tail_in = self.decoder_to_tail( idx, token_seq_embed)\n pred = self.checkpoint( self.tails[idx], tail_in)\n \n preds.append( pred)\n [atts[i].append( a) for a in att]\n\n return preds, atts\n\n ###################################################\n def forward_encoder_block( self, iblock, fields_embed) :\n ''' evaluate one block (attention and mlp) '''\n\n # double buffer for commutation-invariant result (w.r.t evaluation order of transformers)\n fields_embed_cur, atts = [], []\n\n # attention heads\n for ifield in range( len(fields_embed)) :\n d = fields_embed[ifield].device\n fields_in =[fields_embed[i].to(d,non_blocking=True) for i in self.fields_coupling_idx[ifield]]\n # unpack list in argument for checkpointing\n y, att = self.checkpoint( self.encoders[ifield].heads[iblock], *fields_in)\n fields_embed_cur.append( y)\n atts.append( att)\n \n # MLPs \n for ifield in range( len(fields_embed)) :\n fields_embed_cur[ifield] = self.checkpoint( self.encoders[ifield].mlps[iblock], \n fields_embed_cur[ifield] )\n \n return fields_embed_cur, atts\n\n ###################################################\n \n def get_fields_embed( self, xin ) :\n cf = self.cf\n if 0 == len(self.embeds_token_info) : # TODO: only for backward compatibility, remove\n emb_net_ti = self.embed_token_info\n return [prepare_token( field_data, emb_net, emb_net_ti, cf.with_cls )\n for fidx,(field_data,emb_net) in enumerate(zip( xin, self.embeds))]\n else :\n embs_net_ti = self.embeds_token_info\n return [prepare_token( field_data, emb_net, embs_net_ti[fidx], cf.with_cls )\n for fidx,(field_data,emb_net) in enumerate(zip( xin, self.embeds))]\n \n ###################################################\n\n def get_attention( self, xin) : \n\n cf = self.cf\n attn = []\n fields_embed = self.get_fields_embed(xin)\n #either accumulated attention or last layer attention:\n blocks = list(range(self.cf.encoder_num_layers)) if cf.attention_mode == 'accum' else [self.cf.encoder_num_layers-1]\n for idx, ifield in enumerate(self.field_pred_idxs) : \n d = fields_embed[ifield].device\n fields_in =[fields_embed[i].to(d,non_blocking=True) for i in self.fields_coupling_idx[ifield]]\n attn_field = self.encoders[ifield].heads[blocks[0]].get_attention(fields_in)\n if cf.attention_mode == 'accum':\n for iblock in blocks[1:]:\n attn_layer = self.encoders[ifield].heads[iblock].get_attention(fields_in)\n attn_field = attn_field + attn_layer\n attn_field = torch.sum(attn_field, dim = 0, keepdim=True)\n attn.append(attn_field)\n# print(\"att FINAL\", ifield, len(attn), attn[0].shape)\n return attn" }, { "identifier": "AtmoRepData", "path": "atmorep/core/atmorep_model.py", "snippet": "class AtmoRepData( torch.nn.Module) :\n\n def __init__( self, net) :\n '''Wrapper class for AtmoRep that handles data loading'''\n\n super( AtmoRepData, self).__init__()\n \n self.data_loader_test = None\n self.data_loader_train = None\n self.data_loader_iter = None\n\n self.net = net\n\n # ensure that all data loaders have the same seed and hence load the same data\n self.rng_seed = net.cf.rng_seed \n if not self.rng_seed :\n self.rng_seed = int(torch.randint( 100000000, (1,))) \n \n ###################################################\n def load_data( self, mode : NetMode, batch_size = -1, num_loader_workers = -1) :\n '''Load data'''\n\n cf = self.net.cf\n \n if batch_size < 0 :\n batch_size = cf.batch_size_max\n if num_loader_workers < 0 :\n num_loader_workers = cf.num_loader_workers\n\n if mode == NetMode.train :\n self.data_loader_train = self._load_data( self.dataset_train, batch_size, num_loader_workers)\n elif mode == NetMode.test :\n batch_size = cf.batch_size_test\n self.data_loader_test = self._load_data( self.dataset_test, batch_size, num_loader_workers)\n else : \n assert False\n\n ###################################################\n def _load_data( self, dataset, batch_size, num_loader_workers) :\n '''Private implementation for load'''\n\n dataset.load_data( batch_size)\n\n loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, \n 'num_workers': num_loader_workers, 'pin_memory': True}\n data_loader = torch.utils.data.DataLoader( dataset, **loader_params, sampler = None) \n\n return data_loader\n\n ###################################################\n def set_data( self, mode : NetMode, times_pos, batch_size = -1, num_loader_workers = -1) :\n\n cf = self.net.cf\n if batch_size < 0 :\n batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test\n \n dataset = self.dataset_train if mode == NetMode.train else self.dataset_test\n dataset.set_data( times_pos, batch_size)\n\n self._set_data( dataset, mode, batch_size, num_loader_workers)\n\n ###################################################\n def set_global( self, mode : NetMode, times, batch_size = -1, num_loader_workers = -1) :\n\n cf = self.net.cf\n if batch_size < 0 :\n batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test\n \n dataset = self.dataset_train if mode == NetMode.train else self.dataset_test\n dataset.set_global( times, batch_size, cf.token_overlap)\n\n self._set_data( dataset, mode, batch_size, num_loader_workers)\n\n ###################################################\n def set_location( self, mode : NetMode, pos, years, months, num_t_samples_per_month, \n batch_size = -1, num_loader_workers = -1) :\n\n cf = self.net.cf\n if batch_size < 0 :\n batch_size = cf.batch_size_train if mode == NetMode.train else cf.batch_size_test\n \n dataset = self.dataset_train if mode == NetMode.train else self.dataset_test\n dataset.set_location( pos, years, months, num_t_samples_per_month, batch_size)\n\n self._set_data( dataset, mode, batch_size, num_loader_workers)\n\n ###################################################\n def _set_data( self, dataset, mode : NetMode, batch_size = -1, loader_workers = -1) :\n '''Private implementation for set_data, set_global'''\n\n cf = self.net.cf\n if loader_workers < 0 :\n loader_workers = cf.num_loader_workers\n\n loader_params = { 'batch_size': None, 'batch_sampler': None, 'shuffle': False, \n 'num_workers': loader_workers, 'pin_memory': True}\n \n if mode == NetMode.train :\n self.data_loader_train = torch.utils.data.DataLoader( dataset, **loader_params, \n sampler = None)\n elif mode == NetMode.test :\n self.data_loader_test = torch.utils.data.DataLoader( dataset, **loader_params, \n sampler = None)\n else :\n assert False\n\n ###################################################\n def normalizer( self, field, vl_idx) :\n\n if isinstance( field, str) :\n for fidx, field_info in enumerate(self.cf.fields) :\n if field == field_info[0] :\n break\n assert fidx < len(self.cf.fields), 'invalid field'\n normalizer = self.dataset_train.datasets[fidx].normalizer\n\n elif isinstance( field, int) :\n normalizer = self.dataset_train.datasets[field][vl_idx].normalizer\n\n else :\n assert False, 'invalid argument type (has to be index to cf.fields or field name)'\n\n return normalizer\n\n ###################################################\n def mode( self, mode : NetMode) :\n \n if mode == NetMode.train :\n self.data_loader_iter = iter(self.data_loader_train)\n self.net.train()\n elif mode == NetMode.test :\n self.data_loader_iter = iter(self.data_loader_test)\n self.net.eval()\n else :\n assert False\n\n self.cur_mode = mode\n\n ###################################################\n def len( self, mode : NetMode) :\n if mode == NetMode.train :\n return len(self.data_loader_train)\n elif mode == NetMode.test :\n return len(self.data_loader_test)\n else :\n assert False\n\n ###################################################\n def next( self) :\n return next(self.data_loader_iter)\n\n ###################################################\n def forward( self, xin) :\n pred = self.net.forward( xin)\n return pred\n\n ###################################################\n def get_attention( self, xin): #, field_idx) :\n attn = self.net.get_attention( xin) #, field_idx)\n return attn\n\n ###################################################\n def create( self, pre_batch, devices, create_net = True, pre_batch_targets = None,\n load_pretrained=True) :\n\n if create_net :\n self.net.create( devices, load_pretrained)\n\n self.pre_batch = pre_batch\n self.pre_batch_targets = pre_batch_targets\n\n cf = self.net.cf\n self.dataset_train = MultifieldDataSampler( cf.data_dir, cf.years_train, cf.fields,\n batch_size = cf.batch_size_start,\n num_t_samples = cf.num_t_samples,\n num_patches_per_t = cf.num_patches_per_t_train,\n num_load = cf.num_files_train,\n pre_batch = self.pre_batch,\n rng_seed = self.rng_seed,\n file_shape = cf.file_shape,\n smoothing = cf.data_smoothing,\n level_type = cf.level_type,\n file_format = cf.file_format,\n month = cf.month,\n time_sampling = cf.time_sampling,\n geo_range = cf.geo_range_sampling,\n fields_targets = cf.fields_targets,\n pre_batch_targets = self.pre_batch_targets )\n \n self.dataset_test = MultifieldDataSampler( cf.data_dir, cf.years_test, cf.fields,\n batch_size = cf.batch_size_test,\n num_t_samples = cf.num_t_samples,\n num_patches_per_t = cf.num_patches_per_t_test,\n num_load = cf.num_files_test,\n pre_batch = self.pre_batch,\n rng_seed = self.rng_seed,\n file_shape = cf.file_shape,\n smoothing = cf.data_smoothing,\n level_type = cf.level_type,\n file_format = cf.file_format,\n month = cf.month,\n time_sampling = cf.time_sampling,\n geo_range = cf.geo_range_sampling,\n lat_sampling_weighted = cf.lat_sampling_weighted,\n fields_targets = cf.fields_targets,\n pre_batch_targets = self.pre_batch_targets )\n\n return self" }, { "identifier": "prepare_batch_BERT_multifield", "path": "atmorep/training/bert.py", "snippet": "def prepare_batch_BERT_multifield( cf, rngs, fields, BERT_strategy, fields_data) :\n \n fields_tokens_masked_idx = [[] for _ in fields_data]\n fields_tokens_masked_idx_list = [[] for _ in fields_data]\n fields_targets = [[] for _ in fields_data]\n sources = [[] for _ in fields_data]\n token_infos = [[] for _ in fields_data]\n\n if not BERT_strategy :\n BERT_strategy = cf.BERT_strategy\n\n if BERT_strategy == 'BERT' :\n bert_f = prepare_batch_BERT_field\n elif BERT_strategy == 'forecast' :\n bert_f = prepare_batch_BERT_forecast_field\n elif BERT_strategy == 'temporal_interpolation' :\n bert_f = prepare_batch_BERT_temporal_field\n elif BERT_strategy == 'forecast_1shot' :\n bert_f = prepare_batch_BERT_forecast_field_1shot\n elif BERT_strategy == 'identity' :\n bert_f = prepare_batch_BERT_identity_field\n elif BERT_strategy == 'totalmask' :\n bert_f = prepare_batch_BERT_totalmask_field\n else :\n assert False\n\n # # advance randomly to avoid issues with parallel data loaders that naively duplicate rngs\n # delta = torch.randint( 0, 1000, (1,)).item()\n # [rng.bit_generator.advance( delta) for rng in rngs]\n\n if cf.BERT_window :\n # window size has to be multiple of two due to the variable token sizes (the size is \n # however currently restricted to differ by exactly a factor of two only)\n size_t = int(rngs[0].integers( 2, fields[0][3][0]+1, 1)[0] / 2.) * 2 \n size_lat = int(rngs[0].integers( 2, fields[0][3][1]+1, 1)[0] / 2.) * 2\n size_lon = int(rngs[0].integers( 2, fields[0][3][2]+1, 1)[0] / 2.) * 2\n\n rng_idx = 1\n for ifield, data_field in enumerate(fields_data) :\n for ilevel, (field_data, token_info) in enumerate(data_field) :\n\n tok_size = fields[ifield][4]\n field_data = tokenize( field_data, tok_size )\n field_data_shape = field_data.shape\n \n # cut neighborhood for current batch\n if cf.BERT_window :\n # adjust size based on token size so that one has a fixed size window in physical space\n cur_size_t = int(size_t * fields[ifield][3][0] / fields[0][3][0])\n cur_size_lat = int(size_lat * fields[ifield][3][1] / fields[0][3][1])\n cur_size_lon = int(size_lon * fields[ifield][3][2] / fields[0][3][2])\n # define indices\n idx_t_s = field_data.shape[1] - cur_size_t\n idx_lat_s = field_data.shape[2] - cur_size_lat\n idx_lon_s = field_data.shape[3] - cur_size_lon\n # cut\n field_data = field_data[ :, idx_t_s:, idx_lat_s:, idx_lon_s:]\n field_data = field_data.contiguous()\n # for token info first recover space-time shape\n token_info = token_info.reshape( list(field_data_shape[0:4]) + [token_info.shape[-1]]) \n token_info = token_info[ :, idx_t_s:, idx_lat_s:, idx_lon_s:]\n token_info = torch.flatten( token_info, 1, -2)\n token_info = token_info.contiguous()\n \n # no masking for static fields or if masking rate = 0\n if fields[ifield][1][0] > 0 and fields[ifield][5][0] > 0. :\n\n ret = bert_f( cf, ifield, field_data, token_info, rngs[rng_idx])\n (field_data, token_info, target, tokens_masked_idx, tokens_masked_idx_list) = ret\n \n if target != None :\n fields_targets[ifield].append( target)\n fields_tokens_masked_idx[ifield].append( tokens_masked_idx)\n fields_tokens_masked_idx_list[ifield].append( tokens_masked_idx_list)\n\n rng_idx += 1\n\n sources[ifield].append( field_data.unsqueeze(1) )\n token_infos[ifield].append( token_info )\n\n # merge along vertical level\n sources[ifield] = torch.cat( sources[ifield], 1)\n token_infos[ifield] = torch.cat( token_infos[ifield], 1)\n # merge along vertical level, for target we have level, batch, ... ordering \n fields_targets[ifield] = torch.cat( fields_targets[ifield],0) \\\n if len(fields_targets[ifield]) > 0 else fields_targets[ifield]\n\n return (sources, token_infos, fields_targets, fields_tokens_masked_idx,\n fields_tokens_masked_idx_list)" }, { "identifier": "positional_encoding_harmonic", "path": "atmorep/transformer/transformer_base.py", "snippet": "def positional_encoding_harmonic( x, num_levels, num_tokens, with_cls = False) :\n '''space time harmonic positional encoding'''\n\n dim_embed = x.shape[-1]\n dev = x.get_device()\n \n # num_tokens = x.shape[-3:-1]\n # len_token_seq = num_levels * np.prod(num_tokens)\n # pe = torch.zeros( len_token_seq, dim_embed, device=dev)\n # position = torch.arange( 0, len_token_seq).unsqueeze(1)\n # div = torch.exp(torch.arange( 0, dim_embed, 2) * -(math.log(1000) / dim_embed))\n\n # pe[:, 0::2] = torch.sin(position * div)\n # pe[:, 1::2] = torch.cos(position * div)\n # pe = pe.unsqueeze(0)\n\n # x += pe.reshape( x[0].shape )\n\n\n idx = torch.arange( np.prod( x.shape[1:-1]), device=dev)\n num_tokens_t_lat_lon = np.prod( num_tokens)\n num_tokens_lat_lon = num_tokens[1] * num_tokens[2]\n idxs_v = (idx / num_tokens_t_lat_lon).int()\n # idxs_v = num_tokens_t_lat_lon\n temp = torch.remainder( idx, num_tokens_t_lat_lon)\n idxs_t = (temp / num_tokens_lat_lon).int()\n temp = torch.remainder( idx, num_tokens_lat_lon)\n idxs_lat = (temp / num_tokens[1]).int()\n idxs_lon = torch.remainder( temp, num_tokens[2])\n\n pe = torch.zeros( idx.shape[0], dim_embed, device=dev)\n xs = (2. * np.pi * torch.arange( 0, dim_embed, 2, device=dev) / dim_embed)\n pe[:, 0::2] = 0.5 * torch.sin( torch.outer( 8 * idxs_lat, xs) ) \\\n + torch.sin( torch.outer( idxs_t, xs) )\n pe[:, 1::2] = 0.5 * torch.cos( torch.outer( 8 * idxs_lon, xs) ) \\\n + torch.cos( torch.outer( idxs_v , xs) )\n if with_cls :\n x[:,1:] += pe.reshape( x[0,1:].shape)\n else :\n x += pe.reshape( x[0].shape)\n\n return x" }, { "identifier": "shape_to_str", "path": "atmorep/utils/utils.py", "snippet": "def shape_to_str( shape) :\n ret ='{}'.format( list(shape)).replace(' ', '').replace(',','_').replace('(','s_').replace(')','')\n ret = ret.replace('[','s_').replace(']','')\n return ret" }, { "identifier": "relMSELoss", "path": "atmorep/utils/utils.py", "snippet": "def relMSELoss( pred, target = None) :\n val = torch.mean( (pred - target) * (pred - target)) / torch.mean( target * target)\n return val" }, { "identifier": "Gaussian", "path": "atmorep/utils/utils.py", "snippet": "def Gaussian( x, mu=0., std_dev=1.) :\n # return (1 / (std_dev*np.sqrt(2.*np.pi))) * torch.exp( -0.5 * (x-mu)*(x-mu) / (std_dev*std_dev))\n # unnormalized Gaussian where maximum is one\n return torch.exp( -0.5 * (x-mu)*(x-mu) / (std_dev*std_dev))" }, { "identifier": "CRPS", "path": "atmorep/utils/utils.py", "snippet": "def CRPS( y, mu, std_dev) :\n # see Eq. A2 in S. Rasp and S. Lerch. Neural networks for postprocessing ensemble weather forecasts. Monthly Weather Review, 146(11):3885 – 3900, 2018.\n c1 = np.sqrt(1./np.pi)\n t1 = 2. * erf( (y-mu) / std_dev) - 1.\n t2 = 2. * Gaussian( (y-mu) / std_dev)\n val = std_dev * ( (y-mu)/std_dev * t1 + t2 - c1 )\n return val" }, { "identifier": "NetMode", "path": "atmorep/utils/utils.py", "snippet": "class NetMode( Enum) :\n indeterminate = 0\n train = 1\n test = 2" }, { "identifier": "sgn_exp", "path": "atmorep/utils/utils.py", "snippet": "def sgn_exp( x ) :\n '''exponential preserving sign'''\n return x.sign() * (torch.exp( x.abs() ) - 1.)" }, { "identifier": "write_forecast", "path": "atmorep/datasets/data_writer.py", "snippet": "def write_forecast( model_id, epoch, batch_idx, levels, sources, sources_coords,\n targets, targets_coords,\n preds, ensembles,\n zarr_store_type = 'ZipStore' ) :\n ''' \n sources : num_fields x [field name , data]\n targets :\n preds, ensemble share coords with targets\n '''\n\n fname = f'{config.path_results}/id{model_id}/results_id{model_id}_epoch{epoch:05d}' + '_{}.zarr'\n\n zarr_store = getattr( zarr, zarr_store_type)\n\n store_source = zarr_store( fname.format( 'source'))\n exp_source = zarr.group(store=store_source)\n for fidx, field in enumerate(sources) :\n ds_field = exp_source.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels)\n ds_batch_item.create_dataset( 'datetime', data=sources_coords[0][bidx])\n ds_batch_item.create_dataset( 'lat', data=sources_coords[1][bidx])\n ds_batch_item.create_dataset( 'lon', data=sources_coords[2][bidx])\n store_source.close()\n\n store_target = zarr_store( fname.format( 'target'))\n exp_target = zarr.group(store=store_target)\n for fidx, field in enumerate(targets) :\n ds_field = exp_target.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels)\n ds_batch_item.create_dataset( 'datetime', data=targets_coords[0][bidx])\n ds_batch_item.create_dataset( 'lat', data=targets_coords[1][bidx])\n ds_batch_item.create_dataset( 'lon', data=targets_coords[2][bidx])\n store_target.close()\n\n store_pred = zarr_store( fname.format( 'pred'))\n exp_pred = zarr.group(store=store_pred)\n for fidx, field in enumerate(preds) :\n ds_field = exp_pred.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels)\n ds_batch_item.create_dataset( 'datetime', data=targets_coords[0][bidx])\n ds_batch_item.create_dataset( 'lat', data=targets_coords[1][bidx])\n ds_batch_item.create_dataset( 'lon', data=targets_coords[2][bidx])\n store_pred.close()\n\n store_ens = zarr_store( fname.format( 'ens'))\n exp_ens = zarr.group(store=store_ens)\n for fidx, field in enumerate(ensembles) :\n ds_field = exp_ens.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels)\n ds_batch_item.create_dataset( 'datetime', data=targets_coords[0][bidx])\n ds_batch_item.create_dataset( 'lat', data=targets_coords[1][bidx])\n ds_batch_item.create_dataset( 'lon', data=targets_coords[2][bidx])\n store_ens.close()" }, { "identifier": "write_BERT", "path": "atmorep/datasets/data_writer.py", "snippet": "def write_BERT( model_id, epoch, batch_idx, levels, sources, sources_coords,\n targets, targets_coords,\n preds, ensembles,\n zarr_store_type = 'ZipStore' ) :\n '''\n sources : num_fields x [field name , data]\n targets :\n preds, ensemble share coords with targets\n '''\n\n # fname = f'{config.path_results}/id{model_id}/results_id{model_id}_epoch{epoch}.zarr'\n fname = f'{config.path_results}/id{model_id}/results_id{model_id}_epoch{epoch:05d}' + '_{}.zarr'\n\n zarr_store = getattr( zarr, zarr_store_type)\n\n store_source = zarr_store( fname.format( 'source'))\n exp_source = zarr.group(store=store_source)\n for fidx, field in enumerate(sources) :\n ds_field = exp_source.require_group( f'{field[0]}')\n batch_size = field[1].shape[0]\n for bidx in range( field[1].shape[0]) :\n sample = batch_idx * batch_size + bidx\n ds_batch_item = ds_field.create_group( f'sample={sample:05d}' )\n ds_batch_item.create_dataset( 'data', data=field[1][bidx])\n ds_batch_item.create_dataset( 'ml', data=levels[fidx])\n ds_batch_item.create_dataset( 'datetime', data=sources_coords[0][0][bidx])\n ds_batch_item.create_dataset( 'lat', data=sources_coords[1][0][bidx])\n ds_batch_item.create_dataset( 'lon', data=sources_coords[2][0][bidx])\n store_source.close()\n\n store_target = zarr_store( fname.format( 'target'))\n exp_target = zarr.group(store=store_target)\n for fidx, field in enumerate(targets) :\n if 0 == len(field[1]) : # skip fields that were not predicted\n continue\n batch_size = len(field[1][0])\n ds_field = exp_target.require_group( f'{field[0]}')\n for bidx in range( len(field[1][0])) :\n sample = batch_idx * batch_size + bidx\n ds_target_b = ds_field.create_group( f'sample={sample:05d}')\n for vidx in range(len(levels[fidx])) :\n ds_target_b_l = ds_target_b.require_group( f'ml={levels[fidx][vidx]}')\n ds_target_b_l.create_dataset( 'data', data=field[1][vidx][bidx])\n ds_target_b_l.create_dataset( 'ml', data=levels[fidx][vidx])\n ds_target_b_l.create_dataset( 'datetime', data=targets_coords[0][fidx][bidx][vidx])\n ds_target_b_l.create_dataset( 'lat', data=targets_coords[1][fidx][bidx][vidx])\n ds_target_b_l.create_dataset( 'lon', data=targets_coords[2][fidx][bidx][vidx])\n store_target.close()\n\n store_pred = zarr_store( fname.format( 'pred'))\n exp_pred = zarr.group(store=store_pred)\n for fidx, field in enumerate(preds) :\n if 0 == len(field[1]) : # skip fields that were not predicted\n continue\n batch_size = len(field[1][0])\n ds_pred = exp_pred.require_group( f'{field[0]}')\n for bidx in range( len(field[1][0])) :\n sample = batch_idx * batch_size + bidx\n ds_pred_b = ds_pred.create_group( f'sample={sample:05d}')\n for vidx in range(len(levels[fidx])) :\n ds_pred_b_l = ds_pred_b.create_group( f'ml={levels[fidx][vidx]}')\n ds_pred_b_l.create_dataset( 'data', data\n =field[1][vidx][bidx])\n ds_pred_b_l.create_dataset( 'ml', data=levels[fidx][vidx])\n ds_pred_b_l.create_dataset( 'datetime', data=targets_coords[0][fidx][bidx][vidx])\n ds_pred_b_l.create_dataset( 'lat', data=targets_coords[1][fidx][bidx][vidx])\n ds_pred_b_l.create_dataset( 'lon', data=targets_coords[2][fidx][bidx][vidx])\n store_pred.close()\n\n store_ens = zarr_store( fname.format( 'ens'))\n exp_ens = zarr.group(store=store_ens)\n for fidx, field in enumerate(ensembles) :\n if 0 == len(field[1]) : # skip fields that were not predicted\n continue\n batch_size = len(field[1][0])\n ds_ens = exp_ens.require_group( f'{field[0]}')\n for bidx in range( len(field[1][0])) :\n sample = batch_idx * batch_size + bidx\n ds_ens_b = ds_ens.create_group( f'sample={sample:05d}')\n for vidx in range(len(levels[fidx])) :\n ds_ens_b_l = ds_ens_b.create_group( f'ml={levels[fidx][vidx]}')\n ds_ens_b_l.create_dataset( 'data', data=field[1][vidx][bidx])\n ds_ens_b_l.create_dataset( 'ml', data=levels[fidx][vidx])\n ds_ens_b_l.create_dataset( 'datetime', data=targets_coords[0][fidx][bidx][vidx])\n ds_ens_b_l.create_dataset( 'lat', data=targets_coords[1][fidx][bidx][vidx])\n ds_ens_b_l.create_dataset( 'lon', data=targets_coords[2][fidx][bidx][vidx])\n store_ens.close()" }, { "identifier": "write_attention", "path": "atmorep/datasets/data_writer.py", "snippet": "def write_attention(model_id, epoch, batch_idx, levels, attn, attn_coords, zarr_store_type = 'ZipStore' ) :\n\n fname = f'{config.path_results}/id{model_id}/results_id{model_id}_epoch{epoch:05d}' + '_{}.zarr'\n zarr_store = getattr( zarr, zarr_store_type)\n\n store_attn = zarr_store( fname.format( 'attention'))\n exp_attn = zarr.group(store=store_attn)\n\n for fidx, atts_f in enumerate(attn) :\n ds_field = exp_attn.require_group( f'{atts_f[0]}')\n ds_field_b = ds_field.require_group( f'batch={batch_idx:05d}')\n for lidx, atts_f_l in enumerate(atts_f[1]) : # layer in the network\n ds_f_l = ds_field_b.require_group( f'layer={lidx:05d}')\n ds_f_l.create_dataset( 'ml', data=levels[fidx])\n ds_f_l.create_dataset( 'datetime', data=attn_coords[0][fidx])\n ds_f_l.create_dataset( 'lat', data=attn_coords[1][fidx])\n ds_f_l.create_dataset( 'lon', data=attn_coords[2][fidx])\n ds_f_l_h = ds_f_l.require_group('heads')\n for hidx, atts_f_l_head in enumerate(atts_f_l) : # number of attention head\n if atts_f_l_head != None :\n ds_f_l_h.create_dataset(f'{hidx}', data=atts_f_l_head.numpy() )\n store_attn.close()" } ]
import torch import torchinfo import numpy as np import code import os import datetime import functools import pandas as pd import wandb import torch.distributed as dist import torch.utils.data.distributed import atmorep.config.config as config import atmorep.utils.token_infos_transformations as token_infos_transformations import atmorep.utils.utils as utils from pathlib import Path from typing import TypeVar from torch.distributed.optim import ZeroRedundancyOptimizer from atmorep.core.atmorep_model import AtmoRep from atmorep.core.atmorep_model import AtmoRepData from atmorep.training.bert import prepare_batch_BERT_multifield from atmorep.transformer.transformer_base import positional_encoding_harmonic from atmorep.utils.utils import shape_to_str from atmorep.utils.utils import relMSELoss from atmorep.utils.utils import Gaussian from atmorep.utils.utils import CRPS from atmorep.utils.utils import NetMode from atmorep.utils.utils import sgn_exp from atmorep.datasets.data_writer import write_forecast, write_BERT, write_attention
14,945
num_tokens = field_info[3] token_size = field_info[4] lat_d_h, lon_d_h = int(np.floor(token_size[1]/2.)), int(np.floor(token_size[2]/2.)) tinfos = token_infos[fidx].reshape( [-1, num_levels, *num_tokens, cf.size_token_info]) res = tinfos[0,0,0,0,0][-1].item() batch_size = tinfos.shape[0] sources_b = detok( sources[fidx].numpy()) if is_predicted : # split according to levels lens_levels = [t.shape[0] for t in tokens_masked_idx[fidx]] targets_b = torch.split( targets[fidx], lens_levels) preds_mu_b = torch.split( log_preds[fidx][0], lens_levels) preds_ens_b = torch.split( log_preds[fidx][2], lens_levels) # split according to batch lens_batches = [ [bv.shape[0] for bv in b] for b in tokens_masked_idx_list[fidx] ] targets_b = [torch.split( targets_b[vidx], lens) for vidx,lens in enumerate(lens_batches)] preds_mu_b = [torch.split(preds_mu_b[vidx], lens) for vidx,lens in enumerate(lens_batches)] preds_ens_b =[torch.split(preds_ens_b[vidx],lens) for vidx,lens in enumerate(lens_batches)] # recover token shape targets_b = [[targets_b[vidx][bidx].reshape([-1, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] preds_mu_b = [[preds_mu_b[vidx][bidx].reshape([-1, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] preds_ens_b = [[preds_ens_b[vidx][bidx].reshape( [-1, cf.net_tail_num_nets, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] # for all batch items coords_b = [] for bidx, tinfo in enumerate(tinfos) : # use first vertical levels since a column is considered lats = np.arange(tinfo[0,0,0,0,4]-lat_d_h*res, tinfo[0,0,-1,0,4]+lat_d_h*res+0.001,res) if tinfo[0,0,0,-1,5] < tinfo[0,0,0,0,5] : lons = np.remainder( np.arange( tinfo[0,0,0,0,5] - lon_d_h*res, 360. + tinfo[0,0,0,-1,5] + lon_d_h*res + 0.001, res), 360.) else : lons = np.arange(tinfo[0,0,0,0,5]-lon_d_h*res, tinfo[0,0,0,-1,5]+lon_d_h*res+0.001,res) lons = np.remainder( lons, 360.) # time stamp in token_infos is at start time so needs to be advanced by token_size[0]-1 s = utils.token_info_to_time( tinfo[0,0,0,0,:3] ) - pd.Timedelta(hours=token_size[0]-1) e = utils.token_info_to_time( tinfo[0,-1,0,0,:3] ) dates = pd.date_range( start=s, end=e, freq='h') # target etc are aliasing targets_b which simplifies bookkeeping below if is_predicted : target = [targets_b[vidx][bidx] for vidx in range(num_levels)] pred_mu = [preds_mu_b[vidx][bidx] for vidx in range(num_levels)] pred_ens = [preds_ens_b[vidx][bidx] for vidx in range(num_levels)] dates_masked_l, lats_masked_l, lons_masked_l = [], [], [] for vidx, _ in enumerate(field_info[2]) : normalizer = self.model.normalizer( fidx, vidx) y, m = dates[0].year, dates[0].month sources_b[bidx,vidx] = normalizer.denormalize( y, m, sources_b[bidx,vidx], [lats, lons]) if is_predicted : # TODO: make sure normalizer_local / normalizer_global is used in data_loader idx = tokens_masked_idx_list[fidx][vidx][bidx] tinfo_masked = tinfos[bidx,vidx].flatten( 0,2) tinfo_masked = tinfo_masked[idx] lad, lod = lat_d_h*res, lon_d_h*res lats_masked, lons_masked, dates_masked = [], [], [] for t in tinfo_masked : lats_masked.append( np.expand_dims( np.arange(t[4]-lad, t[4]+lad+0.001,res), 0)) lons_masked.append( np.expand_dims( np.arange(t[5]-lod, t[5]+lod+0.001,res), 0)) r = pd.date_range( start=utils.token_info_to_time(t), periods=token_size[0], freq='h') dates_masked.append( np.expand_dims(r.to_pydatetime().astype( 'datetime64[s]'), 0) ) lats_masked = np.concatenate( lats_masked, 0) lons_masked = np.remainder( np.concatenate( lons_masked, 0), 360.) dates_masked = np.concatenate( dates_masked, 0) for ii,(t,p,e,la,lo) in enumerate(zip( target[vidx], pred_mu[vidx], pred_ens[vidx], lats_masked, lons_masked)) : targets_b[vidx][bidx][ii] = normalizer.denormalize( y, m, t, [la, lo]) preds_mu_b[vidx][bidx][ii] = normalizer.denormalize( y, m, p, [la, lo]) preds_ens_b[vidx][bidx][ii] = normalizer.denormalize( y, m, e, [la, lo]) dates_masked_l += [ dates_masked ] lats_masked_l += [ [90.-lat for lat in lats_masked] ] lons_masked_l += [ lons_masked ] dates = dates.to_pydatetime().astype( 'datetime64[s]') coords_b += [ [dates, 90.-lats, lons, dates_masked_l, lats_masked_l, lons_masked_l] ] fn = field_info[0] sources_out.append( [fn, sources_b]) if is_predicted : targets_out.append([fn, [[t.numpy(force=True) for t in t_v] for t_v in targets_b]]) preds_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_mu_b]]) ensembles_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_ens_b]]) else : targets_out.append( [fn, []]) preds_out.append( [fn, []]) ensembles_out.append( [fn, []]) sources_dates_out.append( [c[0] for c in coords_b]) sources_lats_out.append( [c[1] for c in coords_b]) sources_lons_out.append( [c[2] for c in coords_b]) if is_predicted : targets_dates_out.append( [c[3] for c in coords_b]) targets_lats_out.append( [c[4] for c in coords_b]) targets_lons_out.append( [c[5] for c in coords_b]) else : targets_dates_out.append( [ ]) targets_lats_out.append( [ ]) targets_lons_out.append( [ ]) levels = [[np.array(l) for l in field[2]] for field in cf.fields]
#################################################################################################### # # Copyright (C) 2022 # #################################################################################################### # # project : atmorep # # author : atmorep collaboration # # description : # # license : # #################################################################################################### # code.interact(local=locals()) # import horovod.torch as hvd #################################################################################################### class Trainer_Base() : def __init__( self, cf, devices ) : self.cf = cf self.devices = devices self.device_in = devices[0] self.device_out = devices[-1] self.fields_prediction_idx = [] self.loss_weights = torch.zeros( len(cf.fields_prediction) ) for ifield, field in enumerate(cf.fields_prediction) : self.loss_weights[ifield] = self.cf.fields_prediction[ifield][1] for idx, field_info in enumerate(cf.fields) : if field_info[0] == field[0] : self.fields_prediction_idx.append( idx) break self.loss_weights = self.loss_weights.to( self.device_out) self.MSELoss = torch.nn.MSELoss() # transformation for token infos if hasattr( cf, 'token_infos_transformation') : self.tok_infos_trans = getattr( token_infos_transformations, cf.token_infos_transformation) else : self.tok_infos_trans = getattr( token_infos_transformations, 'identity') if 0 == cf.par_rank : directory = Path( config.path_results, 'id{}'.format( cf.wandb_id)) if not os.path.exists(directory): os.makedirs( directory) directory = Path( config.path_models, 'id{}'.format( cf.wandb_id)) if not os.path.exists(directory): os.makedirs( directory) ################################################### def create( self, load_embeds=True) : net = AtmoRep( self.cf) self.model = AtmoRepData( net) self.model.create( self.pre_batch, self.devices, load_embeds) # TODO: pass the properly to model / net self.model.net.encoder_to_decoder = self.encoder_to_decoder self.model.net.decoder_to_tail = self.decoder_to_tail return self ################################################### @classmethod def load( Typename, cf, model_id, epoch, devices) : trainer = Typename( cf, devices).create( load_embeds=False) trainer.model.net = trainer.model.net.load( model_id, devices, cf, epoch) # TODO: pass the properly to model / net trainer.model.net.encoder_to_decoder = trainer.encoder_to_decoder trainer.model.net.decoder_to_tail = trainer.decoder_to_tail str = 'Loaded model id = {}{}.'.format( model_id, f' at epoch = {epoch}' if epoch> -2 else '') print( str) return trainer ################################################### def save( self, epoch) : self.model.net.save( epoch) ################################################### def get_learn_rates( self) : cf = self.cf size_padding = 5 learn_rates = np.zeros( cf.num_epochs + size_padding) learn_rates[:cf.lr_start_epochs] = np.linspace( cf.lr_start, cf.lr_max, num = cf.lr_start_epochs) lr = learn_rates[cf.lr_start_epochs-1] ic = 0 for epoch in range( cf.lr_start_epochs, cf.num_epochs + size_padding) : lr = max( lr / cf.lr_decay_rate, cf.lr_min) learn_rates[epoch] = lr if ic > 9999 : # sanity check assert "Maximum number of epochs exceeded." return learn_rates ################################################### def run( self, epoch = -1) : cf = self.cf model = self.model learn_rates = self.get_learn_rates() if cf.with_ddp : self.model_ddp = torch.nn.parallel.DistributedDataParallel( model, static_graph=True) if not cf.optimizer_zero : self.optimizer = torch.optim.AdamW( self.model_ddp.parameters(), lr=cf.lr_start, weight_decay=cf.weight_decay) else : self.optimizer = ZeroRedundancyOptimizer(self.model_ddp.parameters(), optimizer_class=torch.optim.AdamW, lr=cf.lr_start ) else : self.optimizer = torch.optim.AdamW( self.model.parameters(), lr=cf.lr_start, weight_decay=cf.weight_decay) if 0 == cf.par_rank : # print( self.model.net) model_parameters = filter(lambda p: p.requires_grad, self.model_ddp.parameters()) num_params = sum([np.prod(p.size()) for p in model_parameters]) print( f'Number of trainable parameters: {num_params:,}') # test at the beginning as reference self.model.load_data( NetMode.test, batch_size=cf.batch_size_test) if cf.test_initial : cur_test_loss = self.validate( epoch, cf.BERT_strategy).cpu().numpy() test_loss = np.array( [cur_test_loss]) else : # generic value based on data normalization test_loss = np.array( [1.0]) epoch += 1 batch_size = cf.batch_size_start - cf.batch_size_delta if cf.profile : lr = learn_rates[epoch] for g in self.optimizer.param_groups: g['lr'] = lr self.model.load_data( NetMode.train, batch_size = cf.batch_size_max) self.profile() # training loop while True : if epoch >= cf.num_epochs : break lr = learn_rates[epoch] for g in self.optimizer.param_groups: g['lr'] = lr batch_size = min( cf.batch_size_max, batch_size + cf.batch_size_delta) tstr = datetime.datetime.now().strftime("%H:%M:%S") print( '{} : {} :: batch_size = {}, lr = {}'.format( epoch, tstr, batch_size, lr) ) self.model.load_data( NetMode.train, batch_size = batch_size) self.train( epoch) if cf.with_wandb and 0 == cf.par_rank : self.save( epoch) cur_test_loss = self.validate( epoch, cf.BERT_strategy).cpu().numpy() # self.validate( epoch, 'forecast') # save model if cur_test_loss < test_loss.min() : self.save( -2) test_loss = np.append( test_loss, [cur_test_loss]) epoch += 1 tstr = datetime.datetime.now().strftime("%H:%M:%S") print( 'Finished training at {} with test loss = {}.'.format( tstr, test_loss[-1]) ) # save final network if cf.with_wandb and 0 == cf.par_rank : self.save( -2) ################################################### def train( self, epoch): model = self.model cf = self.cf model.mode( NetMode.train) self.optimizer.zero_grad() loss_total = [[] for i in range(len(cf.losses)) ] std_dev_total = [[] for i in range(len(self.fields_prediction_idx)) ] mse_loss_total = [] grad_loss_total = [] ctr = 0 for batch_idx in range( model.len( NetMode.train)) : batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) preds, _ = self.model_ddp( batch_data) loss, mse_loss, losses = self.loss( preds, batch_idx) self.optimizer.zero_grad() loss.backward() self.optimizer.step() [loss_total[idx].append( losses[key]) for idx, key in enumerate(losses)] mse_loss_total.append( mse_loss.detach().cpu() ) grad_loss_total.append( loss.detach().cpu() ) [std_dev_total[idx].append( pred[1].detach().cpu()) for idx, pred in enumerate(preds)] # logging if int((batch_idx * cf.batch_size_max) / 4) > ctr : # wandb logging if cf.with_wandb and (0 == cf.par_rank) : loss_dict = { "training loss": torch.mean( torch.tensor( mse_loss_total)), "gradient loss": torch.mean( torch.tensor( grad_loss_total)) } # log individual loss terms for individual fields for idx, cur_loss in enumerate(loss_total) : loss_name = self.cf.losses[idx] lt = torch.tensor(cur_loss) for i, field in enumerate(cf.fields_prediction) : idx_name = loss_name + ', ' + field[0] idx_std_name = 'stddev, ' + field[0] loss_dict[idx_name] = torch.mean( lt[:,i]).cpu().detach() loss_dict[idx_std_name] = torch.mean(torch.cat(std_dev_total[i],0)).cpu().detach() wandb.log( loss_dict ) # console output print('train epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:1.5f} : {:1.5f} :: {:1.5f}'.format( epoch, batch_idx, model.len( NetMode.train), 100. * batch_idx/model.len(NetMode.train), torch.mean( torch.tensor( grad_loss_total)), torch.mean(torch.tensor(mse_loss_total)), torch.mean( preds[0][1]) ), flush=True) # save model (use -2 as epoch to indicate latest, stored without epoch specification) # self.save( -2) # reset loss_total = [[] for i in range(len(cf.losses)) ] mse_loss_total = [] grad_loss_total = [] std_dev_total = [[] for i in range(len(self.fields_prediction_idx)) ] ctr += 1 # save gradients if cf.save_grads and cf.with_wandb and (0 == cf.par_rank) : dir_name = './grads/id{}'.format( cf.wandb_id) if not os.path.exists(dir_name): os.makedirs(dir_name) rmsprop_ws = [] for k in range( len(self.optimizer.state_dict()['state']) ) : rmsprop_ws.append(self.optimizer.state_dict()['state'][k]['exp_avg_sq'].mean().unsqueeze(0)) rmsprop_ws = torch.cat( rmsprop_ws) fname = '{}/{}_epoch{}_rmsprop.npy'.format( dir_name, cf.wandb_id, epoch) np.save( fname, rmsprop_ws.cpu().detach().numpy() ) idx = 0 for name, param in self.model.named_parameters(): if param.requires_grad : fname = '{}/{}_epoch{}_{:05d}_{}_grad.npy'.format( dir_name, cf.wandb_id, epoch, idx,name) np.save( fname, param.grad.cpu().detach().numpy() ) idx += 1 # clean memory self.optimizer.zero_grad() del batch_data, loss, loss_total, mse_loss_total, grad_loss_total, std_dev_total ################################################### def profile( self): model = self.model cf = self.cf model.mode( NetMode.train) self.optimizer.zero_grad() # See https://pytorch.org/tutorials/intermediate/tensorboard_profiler_tutorial.html # for details on how to load and analyse report # https://pytorch.org/blog/trace-analysis-for-masses/ # do for all par_ranks to avoid that they run out of sync print( '---------------------------------') print( 'Profiling:') pname = './logs/profile_par_rank' + str(cf.par_rank) + '_' + cf.wandb_id + '/profile' with torch.profiler.profile( activities=[torch.profiler.ProfilerActivity.CPU, torch.profiler.ProfilerActivity.CUDA], schedule=torch.profiler.schedule(wait=1, warmup=1, active=3, repeat=2), on_trace_ready=torch.profiler.tensorboard_trace_handler(pname), profile_memory=True, record_shapes=True, with_stack=True) as prof: for batch_idx in range( 2 * (1+1+3) ) : batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) preds, _ = self.model_ddp( batch_data) loss, mse_loss, losses = self.loss( preds, batch_idx) self.optimizer.zero_grad() # loss.backward() # self.optimizer.step() prof.step() print( 'Profiling finished.') print( '---------------------------------') ################################################### def validate( self, epoch, BERT_test_strategy = 'BERT'): cf = self.cf BERT_strategy_train = cf.BERT_strategy cf.BERT_strategy = BERT_test_strategy self.model.mode( NetMode.test) total_loss = 0. total_losses = torch.zeros( len(self.fields_prediction_idx) ) test_len = 0 self.mode_test = True # run in training mode offset = 0 if -1 == epoch and 0 == cf.par_rank : if 1 == cf.num_accs_per_task : # bug in torchinfo; fixed in v1.8.0 offset += 1 print( 'Network size:') batch_data = self.model.next() batch_data = self.prepare_batch( batch_data) torchinfo.summary( self.model, input_data=[batch_data]) # run test set evaluation with torch.no_grad() : for it in range( self.model.len( NetMode.test) - offset) : batch_data = self.model.next() if cf.par_rank < cf.log_test_num_ranks : # keep on cpu since it will otherwise clog up GPU memory (sources, token_infos, targets, tmis, tmis_list) = batch_data[0] # targets if len(batch_data[1]) > 0 : if type(batch_data[1][0][0]) is list : targets = [batch_data[1][i][0][0] for i in range( len(batch_data[1]))] else : targets = batch_data[1][0] # store on cpu log_sources = ( [source.detach().clone().cpu() for source in sources ], [ti.detach().clone().cpu() for ti in token_infos], [target.detach().clone().cpu() for target in targets ], tmis, tmis_list ) batch_data = self.prepare_batch( batch_data) preds, atts = self.model( batch_data) loss = torch.tensor( 0.) ifield = 0 for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] # hook for custom test loss self.test_loss( pred, target) # base line loss cur_loss = self.MSELoss( pred[0], target = target ).cpu().item() loss += cur_loss total_losses[ifield] += cur_loss ifield += 1 total_loss += loss test_len += 1 # store detailed results on current test set for book keeping if cf.par_rank < cf.log_test_num_ranks : log_preds = [[p.detach().clone().cpu() for p in pred] for pred in preds] self.log_validate( epoch, it, log_sources, log_preds) if cf.attention: self.log_attention( epoch, it, [atts, [ti.detach().clone().cpu() for ti in token_infos]]) # average over all nodes total_loss /= test_len * len(self.cf.fields_prediction) total_losses /= test_len if cf.with_ddp : total_loss_cuda = total_loss.cuda() total_losses_cuda = total_losses.cuda() dist.all_reduce( total_loss_cuda, op=torch.distributed.ReduceOp.AVG ) dist.all_reduce( total_losses_cuda, op=torch.distributed.ReduceOp.AVG ) total_loss = total_loss_cuda.cpu() total_losses = total_losses_cuda.cpu() if 0 == cf.par_rank : print( 'validation loss for strategy={} at epoch {} : {}'.format( BERT_test_strategy, epoch, total_loss), flush=True) if cf.with_wandb and (0 == cf.par_rank) : loss_dict = {"val. loss {}".format(BERT_test_strategy) : total_loss} total_losses = total_losses.cpu().detach() for i, field in enumerate(cf.fields_prediction) : idx_name = 'val., {}, '.format(BERT_test_strategy) + field[0] loss_dict[idx_name] = total_losses[i] print( 'validation loss for {} : {}'.format( field[0], total_losses[i] )) wandb.log( loss_dict) batch_data = [] torch.cuda.empty_cache() cf.BERT_strategy = BERT_strategy_train self.mode_test = False return total_loss ################################################### def evaluate( self, data_idx = 0, log = True): cf = self.cf self.model.mode( NetMode.test) log_sources = [] test_len = 0 # evaluate loss = torch.tensor( 0.) with torch.no_grad() : for it in range( self.model.len( NetMode.test)) : batch_data = self.model.next() if cf.par_rank < cf.log_test_num_ranks : # keep on cpu since it will otherwise clog up GPU memory (sources, token_infos, targets, tmis, tmis_list) = batch_data[0] # targets if len(batch_data[1]) > 0 : targets = [] for target_field in batch_data[1] : targets.append(torch.cat([target_vl[0].unsqueeze(1) for target_vl in target_field],1)) # store on cpu log_sources = ( [source.detach().clone().cpu() for source in sources ], [ti.detach().clone().cpu() for ti in token_infos], [target.detach().clone().cpu() for target in targets ], tmis, tmis_list ) batch_data = self.prepare_batch( batch_data) preds, atts = self.model( batch_data) ifield = 0 for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] cur_loss = self.MSELoss( pred[0], target = target ).cpu() loss += cur_loss ifield += 1 test_len += 1 # logging if cf.par_rank < cf.log_test_num_ranks : self.log_validate( data_idx, it, log_sources, preds) if cf.attention: self.log_attention( data_idx , it, [atts, [ti.detach().clone().cpu() for ti in token_infos]]) # average over all nodes loss /= test_len * len(self.cf.fields_prediction) if cf.with_ddp : loss_cuda = loss.cuda() dist.all_reduce( loss_cuda, op=torch.distributed.ReduceOp.AVG ) loss = loss_cuda.cpu() if 0 == cf.par_rank : print( 'Loss {}'.format( loss)) ################################################### def test_loss( self, pred, target) : '''Hook for custom test loss''' pass ################################################### def loss( self, preds, batch_idx = 0) : # TODO: move implementations to individual files cf = self.cf mse_loss_total = torch.tensor( 0.,) losses = dict(zip(cf.losses,[[] for loss in cf.losses ])) for pred, idx in zip( preds, self.fields_prediction_idx) : target = self.targets[idx] mse_loss = self.MSELoss( pred[0], target = target) mse_loss_total += mse_loss.cpu().detach() # MSE loss if 'mse' in self.cf.losses : losses['mse'].append( mse_loss) # MSE loss if 'mse_ensemble' in self.cf.losses : loss_en = torch.tensor( 0., device=target.device) for en in torch.transpose( pred[2], 1, 0) : loss_en += self.MSELoss( en, target = target) # losses['mse_ensemble'].append( 50. * loss_en / pred[2].shape[1]) losses['mse_ensemble'].append( loss_en / pred[2].shape[1]) # Generalized cross entroy loss for continuous distributions if 'stats' in self.cf.losses : stats_loss = Gaussian( target, pred[0], pred[1]) diff = (stats_loss-1.) # stats_loss = 0.01 * torch.mean( diff * diff) + torch.mean( torch.sqrt(torch.abs( pred[1])) ) stats_loss = torch.mean( diff * diff) + torch.mean( torch.sqrt( torch.abs( pred[1])) ) losses['stats'].append( stats_loss) # Generalized cross entroy loss for continuous distributions if 'stats_area' in self.cf.losses : diff = torch.abs( torch.special.erf( (target - pred[0]) / (pred[1] * pred[1])) ) stats_area = 0.2 * torch.mean( diff * diff) + torch.mean( torch.sqrt(torch.abs( pred[1])) ) losses['stats_area'].append( stats_area) # CRPS score if 'crps' in self.cf.losses : crps_loss = torch.mean( CRPS( target, pred[0], pred[1])) losses['crps'].append( crps_loss) loss = torch.tensor( 0., device=self.device_out) for key in losses : # print( 'LOSS : {} :: {}'.format( key, losses[key])) for ifield, val in enumerate(losses[key]) : loss += self.loss_weights[ifield] * val.to( self.device_out) loss /= len(self.cf.fields_prediction) * len( self.cf.losses) mse_loss = mse_loss_total / len(self.cf.fields_prediction) return loss, mse_loss, losses #################################################################################################### class Trainer_BERT( Trainer_Base) : ################################################### def __init__( self, cf, devices) : Trainer_Base.__init__( self, cf, devices) self.rng_seed = cf.rng_seed if not self.rng_seed : self.rng_seed = int(torch.randint( 100000000, (1,))) # TODO: generate only rngs that are needed ll = len(cf.fields) * 8 #len(cf.vertical_levels) if cf.BERT_fields_synced : self.rngs = [np.random.default_rng(self.rng_seed) for _ in range(ll)] else : self.rngs = [np.random.default_rng(self.rng_seed+i) for i in range(ll)] # batch preprocessing to be done in loader (mainly for performance reasons since it's # parallelized there) self.pre_batch = functools.partial( prepare_batch_BERT_multifield, self.cf, self.rngs, self.cf.fields, self.cf.BERT_strategy ) ################################################### def prepare_batch( self, xin) : '''Move data to device and some additional final preprocessing before model eval''' cf = self.cf devs = self.devices # unpack loader output # xin[0] since BERT does not have targets (sources, token_infos, targets, fields_tokens_masked_idx,fields_tokens_masked_idx_list) = xin[0] # network input batch_data = [ ( sources[i].to( devs[ cf.fields[i][1][3] ], non_blocking=True), self.tok_infos_trans(token_infos[i]).to( self.devices[0], non_blocking=True)) for i in range(len(sources)) ] # store token number since BERT selects sub-cube (optionally) self.num_tokens = [] for field_idx in range(len(batch_data)) : self.num_tokens.append( list(batch_data[field_idx][0].shape[2:5])) # target self.targets = [] for ifield in self.fields_prediction_idx : self.targets.append( targets[ifield].to( devs[cf.fields[ifield][1][3]], non_blocking=True )) # idxs of masked tokens tmi_out = [[] for _ in range(len(fields_tokens_masked_idx))] for i,tmi in enumerate(fields_tokens_masked_idx) : tmi_out[i] = [tmi_l.to( devs[cf.fields[i][1][3]], non_blocking=True) for tmi_l in tmi] self.tokens_masked_idx = tmi_out # idxs of masked tokens per batch entry self.fields_tokens_masked_idx_list = fields_tokens_masked_idx_list # learnable class token (cannot be done in the data loader since this is running in parallel) if cf.learnable_mask : for ifield, (source, _) in enumerate(batch_data) : source = torch.flatten( torch.flatten( torch.flatten( source, 1, 4), 2, 4), 0, 1) assert len(cf.fields[ifield][2]) == 1 tmidx = self.tokens_masked_idx[ifield][0] source[ tmidx ] = self.model.net.masks[ifield].to( source.device) return batch_data ################################################### def encoder_to_decoder( self, embeds_layers) : return ([embeds_layers[i][-1] for i in range(len(embeds_layers))] , embeds_layers ) ################################################### def decoder_to_tail( self, idx_pred, pred) : '''Positional encoding of masked tokens for tail network evaluation''' field_idx = self.fields_prediction_idx[idx_pred] dev = self.devices[ self.cf.fields[field_idx][1][3] ] target_idx = self.tokens_masked_idx[field_idx] assert len(target_idx) > 0, 'no masked tokens but target variable' # select "fixed" masked tokens for loss computation # recover vertical level dimension num_tokens = self.num_tokens[field_idx] num_vlevels = len(self.cf.fields[field_idx][2]) # flatten token dimensions: remove space-time separation pred = torch.flatten( pred, 2, 3).to( dev) # extract masked token level by level pred_masked = [] for lidx, level in enumerate(self.cf.fields[field_idx][2]) : # select masked tokens, flattened along batch dimension for easier indexing and processing pred_l = torch.flatten( pred[:,lidx], 0, 1) pred_masked_l = pred_l[ target_idx[lidx] ] target_idx_l = target_idx[lidx] # add positional encoding of masked tokens # # TODO: do we need the positional encoding? # compute space time indices of all tokens target_idxs_v = level * torch.ones( target_idx_l.shape[0], device=dev) num_tokens_space = num_tokens[1] * num_tokens[2] # remove offset introduced by linearization target_idx_l = torch.remainder( target_idx_l, np.prod(num_tokens)) target_idxs_t = (target_idx_l / num_tokens_space).int() temp = torch.remainder( target_idx_l, num_tokens_space) target_idxs_x = (temp / num_tokens[1]).int() target_idxs_y = torch.remainder( temp, num_tokens[2]) # apply harmonic positional encoding dim_embed = pred.shape[-1] pe = torch.zeros( pred_masked_l.shape[0], dim_embed, device=dev) xs = (2. * np.pi / dim_embed) * torch.arange( 0, dim_embed, 2, device=dev) pe[:, 0::2] = 0.5 * torch.sin( torch.outer( 8 * target_idxs_x, xs) ) \ + torch.sin( torch.outer( target_idxs_t, xs) ) pe[:, 1::2] = 0.5 * torch.cos( torch.outer( 8 * target_idxs_y, xs) ) \ + torch.cos( torch.outer( target_idxs_v, xs) ) # TODO: with or without final positional encoding? # pred_masked.append( pred_masked_l + pe) pred_masked.append( pred_masked_l) # flatten along level dimension, for loss evaluation we effectively have level, batch, ... # as ordering of dimensions pred_masked = torch.cat( pred_masked, 0) return pred_masked ################################################### def log_validate( self, epoch, bidx, log_sources, log_preds) : '''Hook for logging: output associated with concrete training strategy.''' if not hasattr( self.cf, 'wandb_id') : return if 'forecast' == self.cf.BERT_strategy : self.log_validate_forecast( epoch, bidx, log_sources, log_preds) elif 'BERT' == self.cf.BERT_strategy : self.log_validate_BERT( epoch, bidx, log_sources, log_preds) else : assert False ################################################### def log_validate_forecast( self, epoch, batch_idx, log_sources, log_preds) : '''Logging for BERT_strategy=forecast.''' cf = self.cf detok = utils.detokenize # TODO, TODO: for 6h forecast we need to iterate over predicted token slices # save source: remains identical so just save ones (sources, token_infos, targets, _, _) = log_sources sources_out, targets_out, preds_out, ensembles_out = [ ], [ ], [ ], [ ] # reconstruct geo-coords (identical for all fields) forecast_num_tokens = 1 if hasattr( cf, 'forecast_num_tokens') : forecast_num_tokens = cf.forecast_num_tokens num_tokens = cf.fields[0][3] token_size = cf.fields[0][4] lat_d_h, lon_d_h = int(np.floor(token_size[1]/2.)), int(np.floor(token_size[2]/2.)) lats, lons = [ ], [ ] for tinfo in token_infos[0] : lat_min, lat_max = tinfo[0][4], tinfo[ num_tokens[1]*num_tokens[2]-1 ][4] lon_min, lon_max = tinfo[0][5], tinfo[ num_tokens[1]*num_tokens[2]-1 ][5] res = tinfo[0][-1] lat = torch.arange( lat_min - lat_d_h*res, lat_max + lat_d_h*res + 0.001, res) if lon_max < lon_min : lon = torch.arange( lon_min - lon_d_h*res, 360. + lon_max + lon_d_h*res + 0.001, res) else : lon = torch.arange( lon_min - lon_d_h*res, lon_max + lon_d_h*res + 0.001, res) lats.append( lat.numpy()) lons.append( torch.remainder( lon, 360.).numpy()) # check that last token (bottom right corner) has the expected coords # assert np.allclose( ) # extract dates for each token entry, constant for each batch and field dates_t = [] for b_token_infos in token_infos[0] : dates_t.append(utils.token_info_to_time(b_token_infos[0])-pd.Timedelta(hours=token_size[0]-1)) # TODO: check that last token matches first one # process input fields for fidx, field_info in enumerate(cf.fields) : # reshape from tokens to contiguous physical field num_levels = len(field_info[2]) source = detok( sources[fidx].cpu().detach().numpy()) # recover tokenized shape target = detok( targets[fidx].cpu().detach().numpy().reshape( [ -1, num_levels, forecast_num_tokens, *field_info[3][1:], *field_info[4] ])) # TODO: check that geo-coords match to general ones that have been pre-determined for bidx in range(token_infos[fidx].shape[0]) : for vidx, _ in enumerate(field_info[2]) : denormalize = self.model.normalizer( fidx, vidx).denormalize date, coords = dates_t[bidx], [lats[bidx], lons[bidx]] source[bidx,vidx] = denormalize( date.year, date.month, source[bidx,vidx], coords) target[bidx,vidx] = denormalize( date.year, date.month, target[bidx,vidx], coords) # append sources_out.append( [field_info[0], source]) targets_out.append( [field_info[0], target]) # process predicted fields for fidx, fn in enumerate(cf.fields_prediction) : # field_info = cf.fields[ self.fields_prediction_idx[fidx] ] num_levels = len(field_info[2]) # predictions pred = log_preds[fidx][0].cpu().detach().numpy() pred = detok( pred.reshape( [ -1, num_levels, forecast_num_tokens, *field_info[3][1:], *field_info[4] ])) # ensemble ensemble = log_preds[fidx][2].cpu().detach().numpy() ensemble = detok( ensemble.reshape( [ -1, cf.net_tail_num_nets, num_levels, forecast_num_tokens, *field_info[3][1:], *field_info[4] ]) ) # denormalize for bidx in range(token_infos[fidx].shape[0]) : for vidx, vl in enumerate(field_info[2]) : denormalize = self.model.normalizer( self.fields_prediction_idx[fidx], vidx).denormalize date, coords = dates_t[bidx], [lats[bidx], lons[bidx]] pred[bidx,vidx] = denormalize( date.year, date.month, pred[bidx,vidx], coords) ensemble[bidx,:,vidx] = denormalize(date.year, date.month, ensemble[bidx,:,vidx], coords) # append preds_out.append( [fn[0], pred]) ensembles_out.append( [fn[0], ensemble]) # generate time range dates_sources, dates_targets = [ ], [ ] for bidx in range( source.shape[0]) : r = pd.date_range( start=dates_t[bidx], periods=source.shape[2], freq='h') dates_sources.append( r.to_pydatetime().astype( 'datetime64[s]') ) dates_targets.append( dates_sources[-1][ -forecast_num_tokens*token_size[0] : ] ) levels = np.array(cf.fields[0][2]) lats = [90.-lat for lat in lats] write_forecast( cf.wandb_id, epoch, batch_idx, levels, sources_out, [dates_sources, lats, lons], targets_out, [dates_targets, lats, lons], preds_out, ensembles_out ) ################################################### def log_validate_BERT( self, epoch, batch_idx, log_sources, log_preds) : '''Logging for BERT_strategy=BERT.''' cf = self.cf detok = utils.detokenize # save source: remains identical so just save ones (sources, token_infos, targets, tokens_masked_idx, tokens_masked_idx_list) = log_sources sources_out, targets_out, preds_out, ensembles_out = [ ], [ ], [ ], [ ] sources_dates_out, sources_lats_out, sources_lons_out = [ ], [ ], [ ] targets_dates_out, targets_lats_out, targets_lons_out = [ ], [ ], [ ] for fidx, field_info in enumerate(cf.fields) : # reconstruct coordinates is_predicted = fidx in self.fields_prediction_idx num_levels = len(field_info[2]) num_tokens = field_info[3] token_size = field_info[4] lat_d_h, lon_d_h = int(np.floor(token_size[1]/2.)), int(np.floor(token_size[2]/2.)) tinfos = token_infos[fidx].reshape( [-1, num_levels, *num_tokens, cf.size_token_info]) res = tinfos[0,0,0,0,0][-1].item() batch_size = tinfos.shape[0] sources_b = detok( sources[fidx].numpy()) if is_predicted : # split according to levels lens_levels = [t.shape[0] for t in tokens_masked_idx[fidx]] targets_b = torch.split( targets[fidx], lens_levels) preds_mu_b = torch.split( log_preds[fidx][0], lens_levels) preds_ens_b = torch.split( log_preds[fidx][2], lens_levels) # split according to batch lens_batches = [ [bv.shape[0] for bv in b] for b in tokens_masked_idx_list[fidx] ] targets_b = [torch.split( targets_b[vidx], lens) for vidx,lens in enumerate(lens_batches)] preds_mu_b = [torch.split(preds_mu_b[vidx], lens) for vidx,lens in enumerate(lens_batches)] preds_ens_b =[torch.split(preds_ens_b[vidx],lens) for vidx,lens in enumerate(lens_batches)] # recover token shape targets_b = [[targets_b[vidx][bidx].reshape([-1, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] preds_mu_b = [[preds_mu_b[vidx][bidx].reshape([-1, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] preds_ens_b = [[preds_ens_b[vidx][bidx].reshape( [-1, cf.net_tail_num_nets, *token_size]) for bidx in range(batch_size)] for vidx in range(num_levels)] # for all batch items coords_b = [] for bidx, tinfo in enumerate(tinfos) : # use first vertical levels since a column is considered lats = np.arange(tinfo[0,0,0,0,4]-lat_d_h*res, tinfo[0,0,-1,0,4]+lat_d_h*res+0.001,res) if tinfo[0,0,0,-1,5] < tinfo[0,0,0,0,5] : lons = np.remainder( np.arange( tinfo[0,0,0,0,5] - lon_d_h*res, 360. + tinfo[0,0,0,-1,5] + lon_d_h*res + 0.001, res), 360.) else : lons = np.arange(tinfo[0,0,0,0,5]-lon_d_h*res, tinfo[0,0,0,-1,5]+lon_d_h*res+0.001,res) lons = np.remainder( lons, 360.) # time stamp in token_infos is at start time so needs to be advanced by token_size[0]-1 s = utils.token_info_to_time( tinfo[0,0,0,0,:3] ) - pd.Timedelta(hours=token_size[0]-1) e = utils.token_info_to_time( tinfo[0,-1,0,0,:3] ) dates = pd.date_range( start=s, end=e, freq='h') # target etc are aliasing targets_b which simplifies bookkeeping below if is_predicted : target = [targets_b[vidx][bidx] for vidx in range(num_levels)] pred_mu = [preds_mu_b[vidx][bidx] for vidx in range(num_levels)] pred_ens = [preds_ens_b[vidx][bidx] for vidx in range(num_levels)] dates_masked_l, lats_masked_l, lons_masked_l = [], [], [] for vidx, _ in enumerate(field_info[2]) : normalizer = self.model.normalizer( fidx, vidx) y, m = dates[0].year, dates[0].month sources_b[bidx,vidx] = normalizer.denormalize( y, m, sources_b[bidx,vidx], [lats, lons]) if is_predicted : # TODO: make sure normalizer_local / normalizer_global is used in data_loader idx = tokens_masked_idx_list[fidx][vidx][bidx] tinfo_masked = tinfos[bidx,vidx].flatten( 0,2) tinfo_masked = tinfo_masked[idx] lad, lod = lat_d_h*res, lon_d_h*res lats_masked, lons_masked, dates_masked = [], [], [] for t in tinfo_masked : lats_masked.append( np.expand_dims( np.arange(t[4]-lad, t[4]+lad+0.001,res), 0)) lons_masked.append( np.expand_dims( np.arange(t[5]-lod, t[5]+lod+0.001,res), 0)) r = pd.date_range( start=utils.token_info_to_time(t), periods=token_size[0], freq='h') dates_masked.append( np.expand_dims(r.to_pydatetime().astype( 'datetime64[s]'), 0) ) lats_masked = np.concatenate( lats_masked, 0) lons_masked = np.remainder( np.concatenate( lons_masked, 0), 360.) dates_masked = np.concatenate( dates_masked, 0) for ii,(t,p,e,la,lo) in enumerate(zip( target[vidx], pred_mu[vidx], pred_ens[vidx], lats_masked, lons_masked)) : targets_b[vidx][bidx][ii] = normalizer.denormalize( y, m, t, [la, lo]) preds_mu_b[vidx][bidx][ii] = normalizer.denormalize( y, m, p, [la, lo]) preds_ens_b[vidx][bidx][ii] = normalizer.denormalize( y, m, e, [la, lo]) dates_masked_l += [ dates_masked ] lats_masked_l += [ [90.-lat for lat in lats_masked] ] lons_masked_l += [ lons_masked ] dates = dates.to_pydatetime().astype( 'datetime64[s]') coords_b += [ [dates, 90.-lats, lons, dates_masked_l, lats_masked_l, lons_masked_l] ] fn = field_info[0] sources_out.append( [fn, sources_b]) if is_predicted : targets_out.append([fn, [[t.numpy(force=True) for t in t_v] for t_v in targets_b]]) preds_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_mu_b]]) ensembles_out.append( [fn, [[p.numpy(force=True) for p in p_v] for p_v in preds_ens_b]]) else : targets_out.append( [fn, []]) preds_out.append( [fn, []]) ensembles_out.append( [fn, []]) sources_dates_out.append( [c[0] for c in coords_b]) sources_lats_out.append( [c[1] for c in coords_b]) sources_lons_out.append( [c[2] for c in coords_b]) if is_predicted : targets_dates_out.append( [c[3] for c in coords_b]) targets_lats_out.append( [c[4] for c in coords_b]) targets_lons_out.append( [c[5] for c in coords_b]) else : targets_dates_out.append( [ ]) targets_lats_out.append( [ ]) targets_lons_out.append( [ ]) levels = [[np.array(l) for l in field[2]] for field in cf.fields]
write_BERT( cf.wandb_id, epoch, batch_idx,
11
2023-10-09 19:42:46+00:00
24k
MachinePerceptionLab/Attentive_DFPrior
src/DF_Prior.py
[ { "identifier": "config", "path": "src/config.py", "snippet": "def load_config(path, default_path=None):\ndef update_recursive(dict1, dict2):\ndef get_model(cfg):" }, { "identifier": "Mapper", "path": "src/Mapper.py", "snippet": "class Mapper(object):\n \"\"\"\n Mapper thread. \n\n \"\"\"\n\n def __init__(self, cfg, args, slam\n ):\n\n self.cfg = cfg\n self.args = args\n\n self.idx = slam.idx\n self.c = slam.shared_c\n self.bound = slam.bound\n self.logger = slam.logger\n self.mesher = slam.mesher\n self.output = slam.output\n self.verbose = slam.verbose\n self.renderer = slam.renderer\n self.low_gpu_mem = slam.low_gpu_mem\n self.mapping_idx = slam.mapping_idx\n self.mapping_cnt = slam.mapping_cnt\n self.decoders = slam.shared_decoders\n self.estimate_c2w_list = slam.estimate_c2w_list\n self.mapping_first_frame = slam.mapping_first_frame\n self.scene_id = slam.scene_id\n with torch.no_grad():\n self.tsdf_volume_shared = slam.tsdf_volume_shared\n self.tsdf_bnds = slam.tsdf_bnds\n \n \n self.scale = cfg['scale']\n self.occupancy = cfg['occupancy']\n self.sync_method = cfg['sync_method']\n\n self.device = cfg['mapping']['device']\n self.fix_high = cfg['mapping']['fix_high']\n self.eval_rec = cfg['meshing']['eval_rec']\n \n \n self.mesh_freq = cfg['mapping']['mesh_freq']\n self.ckpt_freq = cfg['mapping']['ckpt_freq']\n self.fix_color = cfg['mapping']['fix_color']\n self.mapping_pixels = cfg['mapping']['pixels']\n self.num_joint_iters = cfg['mapping']['iters']\n self.clean_mesh = cfg['meshing']['clean_mesh']\n self.every_frame = cfg['mapping']['every_frame']\n self.color_refine = cfg['mapping']['color_refine']\n self.w_color_loss = cfg['mapping']['w_color_loss']\n self.keyframe_every = cfg['mapping']['keyframe_every']\n self.high_iter_ratio = cfg['mapping']['high_iter_ratio']\n self.low_iter_ratio = cfg['mapping']['low_iter_ratio']\n self.mapping_window_size = cfg['mapping']['mapping_window_size']\n self.no_vis_on_first_frame = cfg['mapping']['no_vis_on_first_frame']\n self.no_log_on_first_frame = cfg['mapping']['no_log_on_first_frame']\n self.no_mesh_on_first_frame = cfg['mapping']['no_mesh_on_first_frame']\n self.frustum_feature_selection = cfg['mapping']['frustum_feature_selection']\n self.keyframe_selection_method = cfg['mapping']['keyframe_selection_method']\n self.save_selected_keyframes_info = cfg['mapping']['save_selected_keyframes_info']\n if self.save_selected_keyframes_info:\n self.selected_keyframes = {}\n\n\n self.keyframe_dict = []\n self.keyframe_list = []\n self.frame_reader = get_dataset(\n cfg, args, self.scale, device=self.device)\n self.n_img = len(self.frame_reader)\n if 'Demo' not in self.output: # disable this visualization in demo\n self.visualizer = Visualizer(freq=cfg['mapping']['vis_freq'], inside_freq=cfg['mapping']['vis_inside_freq'],\n vis_dir=os.path.join(self.output, 'mapping_vis'), renderer=self.renderer,\n verbose=self.verbose, device=self.device)\n self.H, self.W, self.fx, self.fy, self.cx, self.cy = slam.H, slam.W, slam.fx, slam.fy, slam.cx, slam.cy\n\n def get_mask_from_c2w(self, c2w, key, val_shape, depth_np):\n \"\"\"\n Frustum feature selection based on current camera pose and depth image.\n\n Args:\n c2w (tensor): camera pose of current frame.\n key (str): name of this feature grid.\n val_shape (tensor): shape of the grid.\n depth_np (numpy.array): depth image of current frame.\n\n Returns:\n mask (tensor): mask for selected optimizable feature.\n points (tensor): corresponding point coordinates.\n \"\"\"\n H, W, fx, fy, cx, cy, = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n X, Y, Z = torch.meshgrid(torch.linspace(self.bound[0][0], self.bound[0][1], val_shape[2]),\n torch.linspace(self.bound[1][0], self.bound[1][1], val_shape[1]),\n torch.linspace(self.bound[2][0], self.bound[2][1], val_shape[0]))\n\n points = torch.stack([X, Y, Z], dim=-1).reshape(-1, 3)\n points_bak = points.clone()\n c2w = c2w.cpu().numpy()\n w2c = np.linalg.inv(c2w)\n ones = np.ones_like(points[:, 0]).reshape(-1, 1)\n homo_vertices = np.concatenate(\n [points, ones], axis=1).reshape(-1, 4, 1)\n cam_cord_homo = w2c@homo_vertices\n cam_cord = cam_cord_homo[:, :3]\n K = np.array([[fx, .0, cx], [.0, fy, cy], [.0, .0, 1.0]]).reshape(3, 3)\n cam_cord[:, 0] *= -1\n uv = K@cam_cord\n z = uv[:, -1:]+1e-5\n uv = uv[:, :2]/z\n uv = uv.astype(np.float32)\n\n remap_chunk = int(3e4)\n depths = []\n for i in range(0, uv.shape[0], remap_chunk):\n depths += [cv2.remap(depth_np,\n uv[i:i+remap_chunk, 0],\n uv[i:i+remap_chunk, 1],\n interpolation=cv2.INTER_LINEAR)[:, 0].reshape(-1, 1)]\n depths = np.concatenate(depths, axis=0)\n\n edge = 0\n mask = (uv[:, 0] < W-edge)*(uv[:, 0] > edge) * \\\n (uv[:, 1] < H-edge)*(uv[:, 1] > edge)\n\n # For ray with depth==0, fill it with maximum depth\n zero_mask = (depths == 0)\n depths[zero_mask] = np.max(depths)\n\n # depth test\n mask = mask & (0 <= -z[:, :, 0]) & (-z[:, :, 0] <= depths+0.5)\n mask = mask.reshape(-1)\n\n # add feature grid near cam center\n ray_o = c2w[:3, 3]\n ray_o = torch.from_numpy(ray_o).unsqueeze(0)\n\n dist = points_bak-ray_o\n dist = torch.sum(dist*dist, axis=1)\n mask2 = dist < 0.5*0.5\n mask2 = mask2.cpu().numpy()\n mask = mask | mask2\n\n points = points[mask]\n mask = mask.reshape(val_shape[2], val_shape[1], val_shape[0])\n return mask\n\n def keyframe_selection_overlap(self, gt_color, gt_depth, c2w, keyframe_dict, k, N_samples=16, pixels=100):\n \"\"\"\n Select overlapping keyframes to the current camera observation.\n\n Args:\n gt_color (tensor): ground truth color image of the current frame.\n gt_depth (tensor): ground truth depth image of the current frame.\n c2w (tensor): camera to world matrix (3*4 or 4*4 both fine).\n keyframe_dict (list): a list containing info for each keyframe.\n k (int): number of overlapping keyframes to select.\n N_samples (int, optional): number of samples/points per ray. Defaults to 16.\n pixels (int, optional): number of pixels to sparsely sample \n from the image of the current camera. Defaults to 100.\n Returns:\n selected_keyframe_list (list): list of selected keyframe id.\n \"\"\"\n device = self.device\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n\n rays_o, rays_d, gt_depth, gt_color = get_samples(\n 0, H, 0, W, pixels, H, W, fx, fy, cx, cy, c2w, gt_depth, gt_color, self.device)\n\n gt_depth = gt_depth.reshape(-1, 1)\n gt_depth = gt_depth.repeat(1, N_samples)\n t_vals = torch.linspace(0., 1., steps=N_samples).to(device)\n near = gt_depth*0.8\n far = gt_depth+0.5\n z_vals = near * (1.-t_vals) + far * (t_vals)\n pts = rays_o[..., None, :] + rays_d[..., None, :] * \\\n z_vals[..., :, None] # [N_rays, N_samples, 3]\n vertices = pts.reshape(-1, 3).cpu().numpy()\n list_keyframe = []\n for keyframeid, keyframe in enumerate(keyframe_dict):\n c2w = keyframe['est_c2w'].cpu().numpy()\n w2c = np.linalg.inv(c2w)\n ones = np.ones_like(vertices[:, 0]).reshape(-1, 1)\n homo_vertices = np.concatenate(\n [vertices, ones], axis=1).reshape(-1, 4, 1) # (N, 4)\n cam_cord_homo = w2c@homo_vertices # (N, 4, 1)=(4,4)*(N, 4, 1)\n cam_cord = cam_cord_homo[:, :3] # (N, 3, 1)\n K = np.array([[fx, .0, cx], [.0, fy, cy],\n [.0, .0, 1.0]]).reshape(3, 3)\n cam_cord[:, 0] *= -1\n uv = K@cam_cord\n z = uv[:, -1:]+1e-5\n uv = uv[:, :2]/z\n uv = uv.astype(np.float32)\n edge = 20\n mask = (uv[:, 0] < W-edge)*(uv[:, 0] > edge) * \\\n (uv[:, 1] < H-edge)*(uv[:, 1] > edge)\n mask = mask & (z[:, :, 0] < 0)\n mask = mask.reshape(-1)\n percent_inside = mask.sum()/uv.shape[0]\n list_keyframe.append(\n {'id': keyframeid, 'percent_inside': percent_inside})\n\n list_keyframe = sorted(\n list_keyframe, key=lambda i: i['percent_inside'], reverse=True)\n selected_keyframe_list = [dic['id']\n for dic in list_keyframe if dic['percent_inside'] > 0.00]\n selected_keyframe_list = list(np.random.permutation(\n np.array(selected_keyframe_list))[:k])\n return selected_keyframe_list\n \n def eval_points(self, p, decoders, tsdf_volume, tsdf_bnds, c=None, stage='color', device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): point coordinates.\n decoders (nn.module decoders): decoders.\n c (dicts, optional): feature grids. Defaults to None.\n stage (str, optional): query stage, corresponds to different levels. Defaults to 'color'.\n device (str, optional): device name to compute on. Defaults to 'cuda:0'.\n\n Returns:\n ret (tensor): occupancy (and color) value of input points.\n \"\"\"\n\n p_split = torch.split(p, 500)\n bound = self.bound\n rets = []\n for pi in p_split:\n # mask for points out of bound\n mask_x = (pi[:, 0] < bound[0][1]) & (pi[:, 0] > bound[0][0])\n mask_y = (pi[:, 1] < bound[1][1]) & (pi[:, 1] > bound[1][0])\n mask_z = (pi[:, 2] < bound[2][1]) & (pi[:, 2] > bound[2][0])\n mask = mask_x & mask_y & mask_z\n\n pi = pi.unsqueeze(0)\n ret, _ = decoders(pi, c_grid=c, tsdf_volume=tsdf_volume, tsdf_bnds=tsdf_bnds, stage=stage)\n \n ret = ret.squeeze(0)\n if len(ret.shape) == 1 and ret.shape[0] == 4:\n ret = ret.unsqueeze(0)\n\n ret[~mask, 3] = 100\n rets.append(ret)\n\n ret = torch.cat(rets, dim=0)\n return ret\n\n def optimize_map(self, num_joint_iters, lr_factor, idx, cur_gt_color, cur_gt_depth, gt_cur_c2w, keyframe_dict, keyframe_list, tsdf_volume, cur_c2w):\n \"\"\"\n Mapping iterations. Sample pixels from selected keyframes,\n then optimize scene representation.\n\n Args:\n num_joint_iters (int): number of mapping iterations.\n lr_factor (float): the factor to times on current lr.\n idx (int): the index of current frame\n cur_gt_color (tensor): gt_color image of the current camera.\n cur_gt_depth (tensor): gt_depth image of the current camera.\n gt_cur_c2w (tensor): groundtruth camera to world matrix corresponding to current frame.\n keyframe_dict (list): list of keyframes info dictionary.\n keyframe_list (list): list ofkeyframe index.\n tsdf_volume (tensor): tsdf volume.\n cur_c2w (tensor): the estimated camera to world matrix of current frame. \n\n Returns:\n return None\n \"\"\"\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n c = self.c\n cfg = self.cfg\n device = self.device\n tsdf_bnds = self.tsdf_bnds.to(device)\n\n if len(keyframe_dict) == 0:\n optimize_frame = []\n else:\n if self.keyframe_selection_method == 'global':\n num = self.mapping_window_size-2\n optimize_frame = random_select(len(self.keyframe_dict)-1, num)\n elif self.keyframe_selection_method == 'overlap':\n num = self.mapping_window_size-2\n optimize_frame = self.keyframe_selection_overlap(\n cur_gt_color, cur_gt_depth, cur_c2w, keyframe_dict[:-1], num)\n\n # add the last keyframe and the current frame(use -1 to denote)\n oldest_frame = None\n if len(keyframe_list) > 0:\n optimize_frame = optimize_frame + [len(keyframe_list)-1]\n oldest_frame = min(optimize_frame)\n optimize_frame += [-1]\n\n if self.save_selected_keyframes_info:\n keyframes_info = []\n for id, frame in enumerate(optimize_frame):\n if frame != -1:\n frame_idx = keyframe_list[frame]\n tmp_gt_c2w = keyframe_dict[frame]['gt_c2w']\n tmp_est_c2w = keyframe_dict[frame]['est_c2w']\n else:\n frame_idx = idx\n tmp_gt_c2w = gt_cur_c2w\n tmp_est_c2w = cur_c2w\n keyframes_info.append(\n {'idx': frame_idx, 'gt_c2w': tmp_gt_c2w, 'est_c2w': tmp_est_c2w})\n self.selected_keyframes[idx] = keyframes_info\n\n pixs_per_image = self.mapping_pixels//len(optimize_frame)\n\n mlp_para_list = []\n decoders_para_list = []\n low_grid_para = []\n high_grid_para = []\n color_grid_para = []\n gt_depth_np = cur_gt_depth.cpu().numpy()\n if True:\n if self.frustum_feature_selection:\n masked_c_grad = {}\n mask_c2w = cur_c2w\n for key, val in c.items():\n if not self.frustum_feature_selection:\n val = Variable(val.to(device), requires_grad=True)\n c[key] = val\n if key == 'grid_low':\n low_grid_para.append(val)\n elif key == 'grid_high':\n high_grid_para.append(val)\n elif key == 'grid_color':\n color_grid_para.append(val)\n\n else:\n mask = self.get_mask_from_c2w(\n mask_c2w, key, val.shape[2:], gt_depth_np)\n mask = torch.from_numpy(mask).permute(2, 1, 0).unsqueeze(\n 0).unsqueeze(0).repeat(1, val.shape[1], 1, 1, 1)\n val = val.to(device)\n # val_grad is the optimizable part, other parameters will be fixed\n val_grad = val[mask].clone()\n val_grad = Variable(val_grad.to(\n device), requires_grad=True)\n masked_c_grad[key] = val_grad\n masked_c_grad[key+'mask'] = mask\n if key == 'grid_low':\n low_grid_para.append(val_grad)\n elif key == 'grid_high':\n high_grid_para.append(val_grad)\n elif key == 'grid_color':\n color_grid_para.append(val_grad)\n\n\n if not self.fix_high:\n decoders_para_list += list(\n self.decoders.high_decoder.parameters())\n if not self.fix_color:\n decoders_para_list += list(\n self.decoders.color_decoder.parameters())\n mlp_para_list += list(\n self.decoders.mlp.parameters())\n \n\n optimizer = torch.optim.Adam([{'params': decoders_para_list, 'lr': 0},\n {'params': mlp_para_list, 'lr': 0},\n {'params': low_grid_para, 'lr': 0},\n {'params': high_grid_para, 'lr': 0},\n {'params': color_grid_para, 'lr': 0}])\n \n\n for joint_iter in range(num_joint_iters):\n if self.frustum_feature_selection:\n for key, val in c.items():\n val_grad = masked_c_grad[key]\n mask = masked_c_grad[key+'mask']\n val = val.to(device)\n val[mask] = val_grad\n c[key] = val\n\n if joint_iter <= int(num_joint_iters*self.low_iter_ratio):\n self.stage = 'low'\n elif joint_iter <= int(num_joint_iters*self.high_iter_ratio):\n self.stage = 'high'\n else:\n self.stage = 'color'\n\n optimizer.param_groups[0]['lr'] = cfg['mapping']['stage'][self.stage]['decoders_lr']*lr_factor\n optimizer.param_groups[1]['lr'] = cfg['mapping']['stage'][self.stage]['mlp_lr']*lr_factor\n optimizer.param_groups[2]['lr'] = cfg['mapping']['stage'][self.stage]['low_lr']*lr_factor\n optimizer.param_groups[3]['lr'] = cfg['mapping']['stage'][self.stage]['high_lr']*lr_factor\n optimizer.param_groups[4]['lr'] = cfg['mapping']['stage'][self.stage]['color_lr']*lr_factor\n \n if (not (idx == 0 and self.no_vis_on_first_frame)) and ('Demo' not in self.output):\n self.visualizer.vis(\n idx, joint_iter, cur_gt_depth, cur_gt_color, cur_c2w, self.c, self.decoders, tsdf_volume, tsdf_bnds)\n\n optimizer.zero_grad()\n batch_rays_d_list = []\n batch_rays_o_list = []\n batch_gt_depth_list = []\n batch_gt_color_list = []\n\n camera_tensor_id = 0\n for frame in optimize_frame:\n if frame != -1:\n gt_depth = keyframe_dict[frame]['depth'].to(device)\n gt_color = keyframe_dict[frame]['color'].to(device)\n c2w = keyframe_dict[frame]['est_c2w']\n\n else:\n gt_depth = cur_gt_depth.to(device)\n gt_color = cur_gt_color.to(device)\n c2w = cur_c2w\n\n batch_rays_o, batch_rays_d, batch_gt_depth, batch_gt_color = get_samples(\n 0, H, 0, W, pixs_per_image, H, W, fx, fy, cx, cy, c2w, gt_depth, gt_color, self.device)\n batch_rays_o_list.append(batch_rays_o.float())\n batch_rays_d_list.append(batch_rays_d.float())\n batch_gt_depth_list.append(batch_gt_depth.float())\n batch_gt_color_list.append(batch_gt_color.float())\n\n batch_rays_d = torch.cat(batch_rays_d_list)\n batch_rays_o = torch.cat(batch_rays_o_list)\n batch_gt_depth = torch.cat(batch_gt_depth_list)\n batch_gt_color = torch.cat(batch_gt_color_list)\n\n\n # should pre-filter those out of bounding box depth value\n with torch.no_grad():\n det_rays_o = batch_rays_o.clone().detach().unsqueeze(-1) # (N, 3, 1)\n det_rays_d = batch_rays_d.clone().detach().unsqueeze(-1) # (N, 3, 1)\n t = (self.bound.unsqueeze(0).to(\n device)-det_rays_o)/det_rays_d\n t, _ = torch.min(torch.max(t, dim=2)[0], dim=1)\n inside_mask = t >= batch_gt_depth\n batch_rays_d = batch_rays_d[inside_mask]\n batch_rays_o = batch_rays_o[inside_mask]\n batch_gt_depth = batch_gt_depth[inside_mask]\n batch_gt_color = batch_gt_color[inside_mask]\n\n ret = self.renderer.render_batch_ray(c, self.decoders, batch_rays_d,\n batch_rays_o, device, tsdf_volume, tsdf_bnds, self.stage,\n batch_gt_depth)\n depth, uncertainty, color, weight = ret\n\n\n depth_mask = (batch_gt_depth > 0)\n \n if joint_iter > int(num_joint_iters*self.low_iter_ratio) and joint_iter <= int(num_joint_iters*self.low_iter_ratio)+5 and idx <= 1:\n loss = torch.abs(\n batch_gt_depth[depth_mask]-depth[depth_mask]).sum() + torch.abs(weight-torch.ones(weight.shape).to(device)).sum()\n else:\n loss = torch.abs(\n batch_gt_depth[depth_mask]-depth[depth_mask]).sum()\n \n if self.stage == 'color':\n color_loss = torch.abs(batch_gt_color - color).sum()\n weighted_color_loss = self.w_color_loss*color_loss\n loss += weighted_color_loss\n\n loss.backward(retain_graph=False)\n optimizer.step()\n optimizer.zero_grad()\n\n # put selected and updated features back to the grid\n if self.frustum_feature_selection:\n for key, val in c.items():\n val_grad = masked_c_grad[key]\n mask = masked_c_grad[key+'mask']\n val = val.detach()\n val[mask] = val_grad.clone().detach()\n c[key] = val\n\n return None\n\n\n def run(self):\n cfg = self.cfg\n idx, gt_color, gt_depth, gt_c2w = self.frame_reader[0]\n\n self.estimate_c2w_list[0] = gt_c2w.cpu()\n init = True\n prev_idx = -1\n tsdf_volume = self.tsdf_volume_shared\n \n while (1):\n while True:\n idx = self.idx[0].clone()\n if idx == self.n_img-1:\n break\n if self.sync_method == 'strict':\n if idx % self.every_frame == 0 and idx != prev_idx:\n break\n elif self.sync_method == 'loose':\n if idx == 0 or idx >= prev_idx+self.every_frame//2:\n break\n elif self.sync_method == 'free':\n break\n time.sleep(0.1)\n prev_idx = idx\n\n if self.verbose:\n print(Fore.GREEN)\n prefix = ''\n print(prefix+\"Mapping Frame \", idx.item())\n print(Style.RESET_ALL)\n\n _, gt_color, gt_depth, gt_c2w = self.frame_reader[idx]\n\n # valid c2w\n valid_c2w = gt_c2w.clone().cpu().numpy()\n if not np.isfinite(valid_c2w).any():\n self.mapping_idx[0] = idx\n continue\n\n\n if not init:\n lr_factor = cfg['mapping']['lr_factor']\n num_joint_iters = cfg['mapping']['iters']\n\n # here provides a color refinement postprocess\n if idx == self.n_img-1 and self.color_refine:\n outer_joint_iters = 5\n self.mapping_window_size *= 2\n self.low_iter_ratio = 0.0\n self.high_iter_ratio = 0.0\n num_joint_iters *= 5\n self.fix_color = True\n self.frustum_feature_selection = False\n else:\n outer_joint_iters = 1\n \n\n else:\n outer_joint_iters = 1\n lr_factor = cfg['mapping']['lr_first_factor']\n num_joint_iters = cfg['mapping']['iters_first']\n\n cur_c2w = self.estimate_c2w_list[idx].to(self.device)\n num_joint_iters = num_joint_iters//outer_joint_iters\n \n for outer_joint_iter in range(outer_joint_iters):\n\n\n _ = self.optimize_map(num_joint_iters, lr_factor, idx, gt_color, gt_depth,\n gt_c2w, self.keyframe_dict, self.keyframe_list, tsdf_volume, cur_c2w=cur_c2w)\n \n\n # add new frame to keyframe set\n if outer_joint_iter == outer_joint_iters-1:\n if (idx % self.keyframe_every == 0 or (idx == self.n_img-2)) \\\n and (idx not in self.keyframe_list):\n self.keyframe_list.append(idx)\n self.keyframe_dict.append({'gt_c2w': gt_c2w.cpu(), 'idx': idx, 'color': gt_color.cpu(\n ), 'depth': gt_depth.cpu(), 'est_c2w': cur_c2w.clone()})\n\n if self.low_gpu_mem:\n torch.cuda.empty_cache()\n\n init = False\n # mapping of first frame is done, can begin tracking\n self.mapping_first_frame[0] = 1\n\n if True:\n if ((not (idx == 0 and self.no_log_on_first_frame)) and idx % self.ckpt_freq == 0) \\\n or idx == self.n_img-1 or (idx == 4640 and self.scene_id==50):\n self.logger.log(idx, self.keyframe_dict, self.keyframe_list,\n selected_keyframes=self.selected_keyframes\n if self.save_selected_keyframes_info else None)\n\n self.mapping_idx[0] = idx\n self.mapping_cnt[0] += 1\n\n if (idx % self.mesh_freq == 0) and (not (idx == 0 and self.no_mesh_on_first_frame)):\n mesh_out_file = f'{self.output}/mesh/{idx:05d}_mesh.ply'\n self.mesher.get_mesh(mesh_out_file, self.c, self.decoders, self.keyframe_dict, self.estimate_c2w_list,\n idx, tsdf_volume, self.device,\n clean_mesh=self.clean_mesh, get_mask_use_all_frames=False)\n\n if idx == self.n_img-1 or (idx == 4640 and self.scene_id==50):\n mesh_out_file = f'{self.output}/mesh/final_mesh.ply'\n self.mesher.get_mesh(mesh_out_file, self.c, self.decoders, self.keyframe_dict, self.estimate_c2w_list,\n idx, tsdf_volume, self.device,\n clean_mesh=self.clean_mesh, get_mask_use_all_frames=False)\n os.system(\n f\"cp {mesh_out_file} {self.output}/mesh/{idx:05d}_mesh.ply\")\n if self.eval_rec:\n mesh_out_file = f'{self.output}/mesh/final_mesh_eval_rec.ply'\n self.mesher.get_mesh(mesh_out_file, self.c, self.decoders, self.keyframe_dict,\n self.estimate_c2w_list, idx, tsdf_volume, self.device,\n clean_mesh=self.clean_mesh, get_mask_use_all_frames=True)\n break\n\n if idx == self.n_img-1 or (idx == 4640 and self.scene_id==50):\n break" }, { "identifier": "Tracker", "path": "src/Tracker.py", "snippet": "class Tracker(object):\n def __init__(self, cfg, args, slam\n ):\n self.cfg = cfg\n self.args = args\n\n self.scale = cfg['scale']\n self.occupancy = cfg['occupancy']\n self.sync_method = cfg['sync_method']\n\n self.idx = slam.idx\n self.bound = slam.bound\n self.mesher = slam.mesher\n self.output = slam.output\n self.verbose = slam.verbose\n self.shared_c = slam.shared_c\n self.renderer = slam.renderer\n self.gt_c2w_list = slam.gt_c2w_list\n self.low_gpu_mem = slam.low_gpu_mem\n self.mapping_idx = slam.mapping_idx\n self.mapping_cnt = slam.mapping_cnt\n self.shared_decoders = slam.shared_decoders\n self.estimate_c2w_list = slam.estimate_c2w_list\n with torch.no_grad():\n self.tsdf_volume_shared = slam.tsdf_volume_shared\n self.tsdf_bnds = slam.tsdf_bnds\n\n\n self.cam_lr = cfg['tracking']['lr']\n self.device = cfg['tracking']['device']\n self.num_cam_iters = cfg['tracking']['iters']\n self.gt_camera = cfg['tracking']['gt_camera']\n self.tracking_pixels = cfg['tracking']['pixels']\n self.seperate_LR = cfg['tracking']['seperate_LR']\n self.w_color_loss = cfg['tracking']['w_color_loss']\n self.ignore_edge_W = cfg['tracking']['ignore_edge_W']\n self.ignore_edge_H = cfg['tracking']['ignore_edge_H']\n self.handle_dynamic = cfg['tracking']['handle_dynamic']\n self.use_color_in_tracking = cfg['tracking']['use_color_in_tracking']\n self.const_speed_assumption = cfg['tracking']['const_speed_assumption']\n\n self.every_frame = cfg['mapping']['every_frame'] \n self.no_vis_on_first_frame = cfg['mapping']['no_vis_on_first_frame'] # ori mapping\n\n self.prev_mapping_idx = -1\n self.frame_reader = get_dataset(\n cfg, args, self.scale, device=self.device)\n self.n_img = len(self.frame_reader)\n self.frame_loader = DataLoader(\n self.frame_reader, batch_size=1, shuffle=False, num_workers=1)\n self.visualizer = Visualizer(freq=cfg['tracking']['vis_freq'], inside_freq=cfg['tracking']['vis_inside_freq'],\n vis_dir=os.path.join(self.output, 'vis' if 'Demo' in self.output else 'tracking_vis'),\n renderer=self.renderer, verbose=self.verbose, device=self.device)\n self.H, self.W, self.fx, self.fy, self.cx, self.cy = slam.H, slam.W, slam.fx, slam.fy, slam.cx, slam.cy\n\n def optimize_cam_in_batch(self, camera_tensor, gt_color, gt_depth, batch_size, optimizer, tsdf_volume):\n \"\"\"\n Do one iteration of camera iteration. Sample pixels, render depth/color, calculate loss and backpropagation.\n\n Args:\n camera_tensor (tensor): camera tensor.\n gt_color (tensor): ground truth color image of the current frame.\n gt_depth (tensor): ground truth depth image of the current frame.\n batch_size (int): batch size, number of sampling rays.\n optimizer (torch.optim): camera optimizer.\n tsdf_volume (tensor): tsdf volume\n\n Returns:\n loss (float): The value of loss.\n \"\"\"\n device = self.device\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n optimizer.zero_grad()\n c2w = get_camera_from_tensor(camera_tensor)\n tsdf_bnds = self.tsdf_bnds.to(device)\n Wedge = self.ignore_edge_W\n Hedge = self.ignore_edge_H\n batch_rays_o, batch_rays_d, batch_gt_depth, batch_gt_color = get_samples(\n Hedge, H-Hedge, Wedge, W-Wedge, batch_size, H, W, fx, fy, cx, cy, c2w, gt_depth, gt_color, self.device)\n \n # should pre-filter those out of bounding box depth value\n with torch.no_grad():\n det_rays_o = batch_rays_o.clone().detach().unsqueeze(-1) # (N, 3, 1)\n det_rays_d = batch_rays_d.clone().detach().unsqueeze(-1) # (N, 3, 1)\n t = (self.bound.unsqueeze(0).to(device)-det_rays_o)/det_rays_d\n t, _ = torch.min(torch.max(t, dim=2)[0], dim=1)\n inside_mask = t >= batch_gt_depth\n batch_rays_d = batch_rays_d[inside_mask]\n batch_rays_o = batch_rays_o[inside_mask]\n batch_gt_depth = batch_gt_depth[inside_mask]\n batch_gt_color = batch_gt_color[inside_mask]\n\n ret = self.renderer.render_batch_ray(\n self.c, self.decoders, batch_rays_d, batch_rays_o, self.device, tsdf_volume, tsdf_bnds, stage='color', gt_depth=batch_gt_depth) #color\n depth, uncertainty, color, _ = ret\n\n uncertainty = uncertainty.detach()\n if self.handle_dynamic:\n tmp = torch.abs(batch_gt_depth-depth)/torch.sqrt(uncertainty+1e-10)\n mask = (tmp < 10*tmp.median()) & (batch_gt_depth > 0)\n else:\n mask = batch_gt_depth > 0\n\n loss = (torch.abs(batch_gt_depth-depth) /\n torch.sqrt(uncertainty+1e-10))[mask].sum()\n\n if self.use_color_in_tracking:\n color_loss = torch.abs(\n batch_gt_color - color)[mask].sum()\n loss += self.w_color_loss*color_loss\n \n loss.backward(retain_graph=False)\n optimizer.step()\n optimizer.zero_grad()\n return loss.item()\n\n def update_para_from_mapping(self):\n \"\"\"\n Update the parameters of scene representation from the mapping thread.\n\n \"\"\"\n if self.mapping_idx[0] != self.prev_mapping_idx:\n if self.verbose:\n print('Tracking: update the parameters from mapping')\n self.decoders = copy.deepcopy(self.shared_decoders).to(self.device)\n for key, val in self.shared_c.items():\n val = val.clone().to(self.device)\n self.c[key] = val\n self.prev_mapping_idx = self.mapping_idx[0].clone()\n\n def run(self):\n device = self.device\n tsdf_volume = self.tsdf_volume_shared\n tsdf_bnds = self.tsdf_bnds.to(device)\n \n self.c = {}\n if self.verbose:\n pbar = self.frame_loader\n else:\n pbar = tqdm(self.frame_loader)\n\n for idx, gt_color, gt_depth, gt_c2w in pbar:\n if not self.verbose:\n pbar.set_description(f\"Tracking Frame {idx[0]}\")\n\n idx = idx[0]\n gt_depth = gt_depth[0]\n gt_color = gt_color[0]\n gt_c2w = gt_c2w[0]\n\n if self.sync_method == 'strict':\n # strictly mapping and then tracking\n # initiate mapping every self.every_frame frames\n if idx > 0 and (idx % self.every_frame == 1 or self.every_frame == 1):\n while self.mapping_idx[0] != idx-1:\n time.sleep(0.1)\n pre_c2w = self.estimate_c2w_list[idx-1].to(device)\n elif self.sync_method == 'loose':\n # mapping idx can be later than tracking idx is within the bound of\n # [-self.every_frame-self.every_frame//2, -self.every_frame+self.every_frame//2]\n while self.mapping_idx[0] < idx-self.every_frame-self.every_frame//2:\n time.sleep(0.1)\n elif self.sync_method == 'free':\n # pure parallel, if mesh/vis happens may cause inbalance\n pass\n\n self.update_para_from_mapping()\n\n if self.verbose:\n print(Fore.MAGENTA)\n print(\"Tracking Frame \", idx.item())\n print(Style.RESET_ALL)\n \n \n\n if idx == 0 or self.gt_camera:\n c2w = gt_c2w\n if not self.no_vis_on_first_frame:\n self.visualizer.vis(\n idx, 0, gt_depth, gt_color, c2w, self.c, self.decoders, tsdf_volume, tsdf_bnds)\n \n else:\n gt_camera_tensor = get_tensor_from_camera(gt_c2w)\n if self.const_speed_assumption and idx-2 >= 0:\n pre_c2w = pre_c2w.float()\n delta = [email protected]_c2w_list[idx-2].to(\n device).float().inverse()\n estimated_new_cam_c2w = delta@pre_c2w\n else:\n estimated_new_cam_c2w = pre_c2w\n\n camera_tensor = get_tensor_from_camera(\n estimated_new_cam_c2w.detach())\n if self.seperate_LR:\n camera_tensor = camera_tensor.to(device).detach()\n T = camera_tensor[-3:]\n quad = camera_tensor[:4]\n cam_para_list_quad = [quad]\n quad = Variable(quad, requires_grad=True)\n T = Variable(T, requires_grad=True)\n camera_tensor = torch.cat([quad, T], 0)\n cam_para_list_T = [T]\n cam_para_list_quad = [quad]\n optimizer_camera = torch.optim.Adam([{'params': cam_para_list_T, 'lr': self.cam_lr},\n {'params': cam_para_list_quad, 'lr': self.cam_lr*0.2}])\n else:\n camera_tensor = Variable(\n camera_tensor.to(device), requires_grad=True)\n cam_para_list = [camera_tensor]\n optimizer_camera = torch.optim.Adam(\n cam_para_list, lr=self.cam_lr)\n\n initial_loss_camera_tensor = torch.abs(\n gt_camera_tensor.to(device)-camera_tensor).mean().item()\n candidate_cam_tensor = None\n current_min_loss = 10000000000.\n\n \n\n for cam_iter in range(self.num_cam_iters):\n if self.seperate_LR:\n camera_tensor = torch.cat([quad, T], 0).to(self.device)\n\n self.visualizer.vis(\n idx, cam_iter, gt_depth, gt_color, camera_tensor, self.c, self.decoders, tsdf_volume, tsdf_bnds)\n\n loss = self.optimize_cam_in_batch(\n camera_tensor, gt_color, gt_depth, self.tracking_pixels, optimizer_camera, tsdf_volume)\n\n if cam_iter == 0:\n initial_loss = loss\n\n loss_camera_tensor = torch.abs(\n gt_camera_tensor.to(device)-camera_tensor).mean().item()\n if self.verbose:\n if cam_iter == self.num_cam_iters-1:\n print(\n f'Re-rendering loss: {initial_loss:.2f}->{loss:.2f} ' +\n f'camera tensor error: {initial_loss_camera_tensor:.4f}->{loss_camera_tensor:.4f}')\n if loss < current_min_loss:\n current_min_loss = loss\n candidate_cam_tensor = camera_tensor.clone().detach()\n bottom = torch.from_numpy(np.array([0, 0, 0, 1.]).reshape(\n [1, 4])).type(torch.float32).to(self.device)\n c2w = get_camera_from_tensor(\n candidate_cam_tensor.clone().detach())\n c2w = torch.cat([c2w, bottom], dim=0)\n\n \n self.estimate_c2w_list[idx] = c2w.clone().cpu()\n self.gt_c2w_list[idx] = gt_c2w.clone().cpu()\n pre_c2w = c2w.clone()\n self.idx[0] = idx\n if self.low_gpu_mem:\n torch.cuda.empty_cache()" }, { "identifier": "get_dataset", "path": "src/utils/datasets.py", "snippet": "def get_dataset(cfg, args, scale, device='cuda:0'):\n return dataset_dict[cfg['dataset']](cfg, args, scale, device=device)" }, { "identifier": "Logger", "path": "src/utils/Logger.py", "snippet": "class Logger(object):\n \"\"\"\n Save checkpoints to file.\n\n \"\"\"\n\n def __init__(self, cfg, args, slam\n ):\n self.verbose = slam.verbose\n self.ckptsdir = slam.ckptsdir\n self.shared_c = slam.shared_c\n self.gt_c2w_list = slam.gt_c2w_list\n self.shared_decoders = slam.shared_decoders\n self.estimate_c2w_list = slam.estimate_c2w_list\n self.tsdf_volume = slam.tsdf_volume_shared\n\n def log(self, idx, keyframe_dict, keyframe_list, selected_keyframes=None):\n path = os.path.join(self.ckptsdir, '{:05d}.tar'.format(idx))\n torch.save({\n 'c': self.shared_c,\n 'decoder_state_dict': self.shared_decoders.state_dict(),\n 'gt_c2w_list': self.gt_c2w_list,\n 'estimate_c2w_list': self.estimate_c2w_list,\n 'keyframe_list': keyframe_list,\n 'keyframe_dict': keyframe_dict, # to save keyframe_dict into ckpt, uncomment this line\n 'selected_keyframes': selected_keyframes,\n 'idx': idx,\n 'tsdf_volume': self.tsdf_volume,\n }, path, _use_new_zipfile_serialization=False)\n\n if self.verbose:\n print('Saved checkpoints at', path)" }, { "identifier": "Mesher", "path": "src/utils/Mesher.py", "snippet": "class Mesher(object):\n\n def __init__(self, cfg, args, slam, points_batch_size=500000, ray_batch_size=100000):\n \"\"\"\n Mesher class, given a scene representation, the mesher extracts the mesh from it.\n\n Args:\n cfg (dict): parsed config dict.\n args (class 'argparse.Namespace'): argparse arguments.\n slam (class DF_Prior): DF_Prior main class.\n points_batch_size (int): maximum points size for query in one batch. \n Used to alleviate GPU memeory usage. Defaults to 500000.\n ray_batch_size (int): maximum ray size for query in one batch. \n Used to alleviate GPU memeory usage. Defaults to 100000.\n \"\"\"\n self.points_batch_size = points_batch_size\n self.ray_batch_size = ray_batch_size\n self.renderer = slam.renderer\n self.scale = cfg['scale']\n self.occupancy = cfg['occupancy']\n \n self.resolution = cfg['meshing']['resolution']\n self.level_set = cfg['meshing']['level_set']\n self.clean_mesh_bound_scale = cfg['meshing']['clean_mesh_bound_scale']\n self.remove_small_geometry_threshold = cfg['meshing']['remove_small_geometry_threshold']\n self.color_mesh_extraction_method = cfg['meshing']['color_mesh_extraction_method']\n self.get_largest_components = cfg['meshing']['get_largest_components']\n self.depth_test = cfg['meshing']['depth_test']\n \n self.bound = slam.bound\n self.verbose = slam.verbose\n \n\n self.marching_cubes_bound = torch.from_numpy(\n np.array(cfg['mapping']['marching_cubes_bound']) * self.scale)\n\n self.frame_reader = get_dataset(cfg, args, self.scale, device='cpu')\n self.n_img = len(self.frame_reader)\n\n self.H, self.W, self.fx, self.fy, self.cx, self.cy = slam.H, slam.W, slam.fx, slam.fy, slam.cx, slam.cy\n\n self.sample_mode = 'bilinear'\n self.tsdf_bnds = slam.tsdf_bnds\n\n\n\n def point_masks(self, input_points, keyframe_dict, estimate_c2w_list,\n idx, device, get_mask_use_all_frames=False):\n \"\"\"\n Split the input points into seen, unseen, and forcast,\n according to the estimated camera pose and depth image.\n\n Args:\n input_points (tensor): input points.\n keyframe_dict (list): list of keyframe info dictionary.\n estimate_c2w_list (tensor): estimated camera pose.\n idx (int): current frame index.\n device (str): device name to compute on.\n\n Returns:\n seen_mask (tensor): the mask for seen area.\n forecast_mask (tensor): the mask for forecast area.\n unseen_mask (tensor): the mask for unseen area.\n \"\"\"\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n if not isinstance(input_points, torch.Tensor):\n input_points = torch.from_numpy(input_points)\n input_points = input_points.clone().detach()\n seen_mask_list = []\n forecast_mask_list = []\n unseen_mask_list = []\n for i, pnts in enumerate(\n torch.split(input_points, self.points_batch_size, dim=0)):\n points = pnts.to(device).float()\n # should divide the points into three parts, seen and forecast and unseen\n # seen: union of all the points in the viewing frustum of keyframes\n # forecast: union of all the points in the extended edge of the viewing frustum of keyframes\n # unseen: all the other points\n\n seen_mask = torch.zeros((points.shape[0])).bool().to(device)\n forecast_mask = torch.zeros((points.shape[0])).bool().to(device)\n if get_mask_use_all_frames:\n for i in range(0, idx + 1, 1):\n c2w = estimate_c2w_list[i].cpu().numpy()\n w2c = np.linalg.inv(c2w)\n w2c = torch.from_numpy(w2c).to(device).float()\n ones = torch.ones_like(\n points[:, 0]).reshape(-1, 1).to(device)\n homo_points = torch.cat([points, ones], dim=1).reshape(\n -1, 4, 1).to(device).float() # (N, 4)\n # (N, 4, 1)=(4,4)*(N, 4, 1)\n cam_cord_homo = w2c @ homo_points\n cam_cord = cam_cord_homo[:, :3] # (N, 3, 1)\n\n K = torch.from_numpy(\n np.array([[fx, .0, cx], [.0, fy, cy],\n [.0, .0, 1.0]]).reshape(3, 3)).to(device)\n cam_cord[:, 0] *= -1\n uv = K.float() @ cam_cord.float()\n z = uv[:, -1:] + 1e-8\n uv = uv[:, :2] / z\n uv = uv.float()\n edge = 0\n cur_mask_seen = (uv[:, 0] < W - edge) & (\n uv[:, 0] > edge) & (uv[:, 1] < H - edge) & (uv[:, 1] > edge)\n cur_mask_seen = cur_mask_seen & (z[:, :, 0] < 0)\n\n edge = -1000\n cur_mask_forecast = (uv[:, 0] < W - edge) & (\n uv[:, 0] > edge) & (uv[:, 1] < H - edge) & (uv[:, 1] > edge)\n cur_mask_forecast = cur_mask_forecast & (z[:, :, 0] < 0)\n\n # forecast\n cur_mask_forecast = cur_mask_forecast.reshape(-1)\n # seen\n cur_mask_seen = cur_mask_seen.reshape(-1)\n\n seen_mask |= cur_mask_seen\n forecast_mask |= cur_mask_forecast\n else:\n for keyframe in keyframe_dict:\n c2w = keyframe['est_c2w'].cpu().numpy()\n w2c = np.linalg.inv(c2w)\n w2c = torch.from_numpy(w2c).to(device).float()\n ones = torch.ones_like(\n points[:, 0]).reshape(-1, 1).to(device)\n homo_points = torch.cat([points, ones], dim=1).reshape(\n -1, 4, 1).to(device).float()\n cam_cord_homo = w2c @ homo_points\n cam_cord = cam_cord_homo[:, :3]\n\n K = torch.from_numpy(\n np.array([[fx, .0, cx], [.0, fy, cy],\n [.0, .0, 1.0]]).reshape(3, 3)).to(device)\n cam_cord[:, 0] *= -1\n uv = K.float() @ cam_cord.float()\n z = uv[:, -1:] + 1e-8\n uv = uv[:, :2] / z\n uv = uv.float()\n edge = 0\n cur_mask_seen = (uv[:, 0] < W - edge) & (\n uv[:, 0] > edge) & (uv[:, 1] < H - edge) & (uv[:, 1] > edge)\n cur_mask_seen = cur_mask_seen & (z[:, :, 0] < 0)\n\n edge = -1000\n cur_mask_forecast = (uv[:, 0] < W - edge) & (\n uv[:, 0] > edge) & (uv[:, 1] < H - edge) & (uv[:, 1] > edge)\n cur_mask_forecast = cur_mask_forecast & (z[:, :, 0] < 0)\n\n if self.depth_test:\n gt_depth = keyframe['depth'].to(\n device).reshape(1, 1, H, W)\n vgrid = uv.reshape(1, 1, -1, 2)\n # normalized to [-1, 1]\n vgrid[..., 0] = (vgrid[..., 0] / (W-1) * 2.0 - 1.0)\n vgrid[..., 1] = (vgrid[..., 1] / (H-1) * 2.0 - 1.0)\n depth_sample = F.grid_sample(\n gt_depth, vgrid, padding_mode='zeros', align_corners=True)\n depth_sample = depth_sample.reshape(-1)\n max_depth = torch.max(depth_sample)\n # forecast\n cur_mask_forecast = cur_mask_forecast.reshape(-1)\n proj_depth_forecast = -cam_cord[cur_mask_forecast,\n 2].reshape(-1)\n cur_mask_forecast[cur_mask_forecast.clone()] &= proj_depth_forecast < max_depth\n # seen\n cur_mask_seen = cur_mask_seen.reshape(-1)\n proj_depth_seen = - cam_cord[cur_mask_seen, 2].reshape(-1)\n cur_mask_seen[cur_mask_seen.clone()] &= \\\n (proj_depth_seen < depth_sample[cur_mask_seen]+2.4) \\\n & (depth_sample[cur_mask_seen]-2.4 < proj_depth_seen)\n else:\n max_depth = torch.max(keyframe['depth'])*1.1\n\n # forecast\n cur_mask_forecast = cur_mask_forecast.reshape(-1)\n proj_depth_forecast = -cam_cord[cur_mask_forecast,\n 2].reshape(-1)\n cur_mask_forecast[\n cur_mask_forecast.clone()] &= proj_depth_forecast < max_depth\n\n # seen\n cur_mask_seen = cur_mask_seen.reshape(-1)\n proj_depth_seen = - \\\n cam_cord[cur_mask_seen, 2].reshape(-1)\n cur_mask_seen[cur_mask_seen.clone(\n )] &= proj_depth_seen < max_depth\n\n seen_mask |= cur_mask_seen\n forecast_mask |= cur_mask_forecast\n\n forecast_mask &= ~seen_mask\n unseen_mask = ~(seen_mask | forecast_mask)\n\n seen_mask = seen_mask.cpu().numpy()\n forecast_mask = forecast_mask.cpu().numpy()\n unseen_mask = unseen_mask.cpu().numpy()\n\n seen_mask_list.append(seen_mask)\n forecast_mask_list.append(forecast_mask)\n unseen_mask_list.append(unseen_mask)\n\n seen_mask = np.concatenate(seen_mask_list, axis=0)\n forecast_mask = np.concatenate(forecast_mask_list, axis=0)\n unseen_mask = np.concatenate(unseen_mask_list, axis=0)\n return seen_mask, forecast_mask, unseen_mask\n\n def get_bound_from_frames(self, keyframe_dict, scale=1):\n \"\"\"\n Get the scene bound (convex hull),\n using sparse estimated camera poses and corresponding depth images.\n\n Args:\n keyframe_dict (list): list of keyframe info dictionary.\n scale (float): scene scale.\n\n Returns:\n return_mesh (trimesh.Trimesh): the convex hull.\n \"\"\"\n\n H, W, fx, fy, cx, cy = self.H, self.W, self.fx, self.fy, self.cx, self.cy\n\n if version.parse(o3d.__version__) >= version.parse('0.13.0'):\n # for new version as provided in environment.yaml\n volume = o3d.pipelines.integration.ScalableTSDFVolume(\n voxel_length=4.0 * scale / 512.0,\n sdf_trunc=0.04 * scale,\n color_type=o3d.pipelines.integration.TSDFVolumeColorType.RGB8)\n else:\n # for lower version\n volume = o3d.integration.ScalableTSDFVolume(\n voxel_length=4.0 * scale / 512.0,\n sdf_trunc=0.04 * scale,\n color_type=o3d.integration.TSDFVolumeColorType.RGB8)\n cam_points = []\n for keyframe in keyframe_dict:\n c2w = keyframe['est_c2w'].cpu().numpy()\n # convert to open3d camera pose\n c2w[:3, 1] *= -1.0\n c2w[:3, 2] *= -1.0\n w2c = np.linalg.inv(c2w)\n cam_points.append(c2w[:3, 3])\n depth = keyframe['depth'].cpu().numpy()\n color = keyframe['color'].cpu().numpy()\n\n depth = o3d.geometry.Image(depth.astype(np.float32))\n color = o3d.geometry.Image(np.array(\n (color * 255).astype(np.uint8)))\n\n intrinsic = o3d.camera.PinholeCameraIntrinsic(W, H, fx, fy, cx, cy)\n rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(\n color,\n depth,\n depth_scale=1,\n depth_trunc=1000,\n convert_rgb_to_intensity=False)\n volume.integrate(rgbd, intrinsic, w2c)\n\n cam_points = np.stack(cam_points, axis=0)\n mesh = volume.extract_triangle_mesh()\n mesh_points = np.array(mesh.vertices)\n points = np.concatenate([cam_points, mesh_points], axis=0)\n o3d_pc = o3d.geometry.PointCloud(o3d.utility.Vector3dVector(points))\n mesh, _ = o3d_pc.compute_convex_hull()\n mesh.compute_vertex_normals()\n if version.parse(o3d.__version__) >= version.parse('0.13.0'):\n mesh = mesh.scale(self.clean_mesh_bound_scale, mesh.get_center())\n else:\n mesh = mesh.scale(self.clean_mesh_bound_scale, center=True)\n points = np.array(mesh.vertices)\n faces = np.array(mesh.triangles)\n return_mesh = trimesh.Trimesh(vertices=points, faces=faces)\n return return_mesh\n\n def eval_points(self, p, decoders, tsdf_volume, tsdf_bnds, c=None, stage='color', device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): point coordinates.\n decoders (nn.module decoders): decoders.\n tsdf_volume (tensor): tsdf volume.\n tsdf_bnds (tensor): tsdf volume bounds.\n c (dicts, optional): feature grids. Defaults to None.\n stage (str, optional): query stage, corresponds to different levels. Defaults to 'color'.\n device (str, optional): device name to compute on. Defaults to 'cuda:0'.\n\n Returns:\n ret (tensor): occupancy (and color) value of input points.\n \"\"\"\n\n p_split = torch.split(p, self.points_batch_size)\n bound = self.bound\n rets = []\n\n for pi in p_split:\n # mask for points out of bound\n mask_x = (pi[:, 0] < bound[0][1]) & (pi[:, 0] > bound[0][0])\n mask_y = (pi[:, 1] < bound[1][1]) & (pi[:, 1] > bound[1][0])\n mask_z = (pi[:, 2] < bound[2][1]) & (pi[:, 2] > bound[2][0])\n mask = mask_x & mask_y & mask_z\n\n pi = pi.unsqueeze(0)\n ret, _ = decoders(pi, c_grid=c, tsdf_volume=tsdf_volume, tsdf_bnds=tsdf_bnds, stage=stage)\n \n ret = ret.squeeze(0)\n if len(ret.shape) == 1 and ret.shape[0] == 4:\n ret = ret.unsqueeze(0)\n\n ret[~mask, 3] = 100\n rets.append(ret)\n\n ret = torch.cat(rets, dim=0)\n\n return ret\n\n def sample_grid_tsdf(self, p, tsdf_volume, device='cuda:0'):\n\n p_nor = normalize_3d_coordinate(p.clone(), self.tsdf_bnds)\n p_nor = p_nor.unsqueeze(0)\n vgrid = p_nor[:, :, None, None].float()\n # acutally trilinear interpolation if mode = 'bilinear'\n tsdf_value = F.grid_sample(tsdf_volume.to(device), vgrid.to(device), padding_mode='border', align_corners=True,\n mode='bilinear').squeeze(-1).squeeze(-1)\n return tsdf_value\n\n\n def eval_points_tsdf(self, p, tsdf_volume, device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): Point coordinates.\n tsdf_volume (tensor): tsdf volume.\n\n Returns:\n ret (tensor): tsdf value of input points.\n \"\"\"\n\n p_split = torch.split(p, self.points_batch_size)\n tsdf_vals = []\n for pi in p_split:\n pi = pi.unsqueeze(0)\n tsdf_volume_tensor = tsdf_volume\n\n tsdf_val = self.sample_grid_tsdf(pi, tsdf_volume_tensor, device)\n tsdf_val = tsdf_val.squeeze(0)\n tsdf_vals.append(tsdf_val)\n\n tsdf_values = torch.cat(tsdf_vals, dim=1)\n return tsdf_values\n\n\n def get_grid_uniform(self, resolution):\n \"\"\"\n Get query point coordinates for marching cubes.\n\n Args:\n resolution (int): marching cubes resolution.\n\n Returns:\n (dict): points coordinates and sampled coordinates for each axis.\n \"\"\"\n bound = self.marching_cubes_bound\n\n padding = 0.05\n x = np.linspace(bound[0][0] - padding, bound[0][1] + padding,\n resolution)\n y = np.linspace(bound[1][0] - padding, bound[1][1] + padding,\n resolution)\n z = np.linspace(bound[2][0] - padding, bound[2][1] + padding,\n resolution)\n\n xx, yy, zz = np.meshgrid(x, y, z)\n grid_points = np.vstack([xx.ravel(), yy.ravel(), zz.ravel()]).T\n grid_points = torch.tensor(np.vstack(\n [xx.ravel(), yy.ravel(), zz.ravel()]).T,\n dtype=torch.float)\n\n\n\n return {\"grid_points\": grid_points, \"xyz\": [x, y, z]}\n\n def get_mesh(self,\n mesh_out_file,\n c,\n decoders,\n keyframe_dict,\n estimate_c2w_list,\n idx,\n tsdf_volume,\n device='cuda:0',\n color=True,\n clean_mesh=True,\n get_mask_use_all_frames=False):\n \"\"\"\n Extract mesh from scene representation and save mesh to file.\n\n Args:\n mesh_out_file (str): output mesh filename.\n c (dicts): feature grids.\n decoders (nn.module): decoders.\n keyframe_dict (list): list of keyframe info.\n estimate_c2w_list (tensor): estimated camera pose.\n idx (int): current processed camera ID.\n tsdf volume (tensor): tsdf volume.\n device (str, optional): device name to compute on. Defaults to 'cuda:0'.\n color (bool, optional): whether to extract colored mesh. Defaults to True.\n clean_mesh (bool, optional): whether to clean the output mesh \n (remove outliers outside the convexhull and small geometry noise). \n Defaults to True.\n get_mask_use_all_frames (bool, optional): \n whether to use all frames or just keyframes when getting the seen/unseen mask. Defaults to False.\n \"\"\"\n with torch.no_grad():\n\n grid = self.get_grid_uniform(self.resolution) \n points = grid['grid_points']\n points = points.to(device)\n eval_tsdf_volume = tsdf_volume\n\n mesh_bound = self.get_bound_from_frames(\n keyframe_dict, self.scale)\n z = []\n mask = []\n for i, pnts in enumerate(torch.split(points, self.points_batch_size, dim=0)):\n mask.append(mesh_bound.contains(pnts.cpu().numpy()))\n mask = np.concatenate(mask, axis=0)\n for i, pnts in enumerate(torch.split(points, self.points_batch_size, dim=0)):\n eval_tsdf = self.eval_points_tsdf(pnts, eval_tsdf_volume, device)\n eval_tsdf_mask = ((eval_tsdf > -1.0+1e-4) & (eval_tsdf < 1.0-1e-4)).cpu().numpy()\n ret = self.eval_points(pnts, decoders, tsdf_volume, self.tsdf_bnds, c, 'high', device)\n ret = ret.cpu().numpy()[:, -1]\n\n eval_tsdf_mask = eval_tsdf_mask.reshape(ret.shape)\n z.append(ret)\n \n z = np.concatenate(z, axis=0)\n z[~mask] = 100\n z = z.astype(np.float32)\n\n z_uni_m = z.reshape(\n grid['xyz'][1].shape[0], grid['xyz'][0].shape[0],\n grid['xyz'][2].shape[0]).transpose([1, 0, 2])\n\n print('begin marching cube...')\n combine_occ_tsdf = z_uni_m\n\n try:\n if version.parse(\n skimage.__version__) > version.parse('0.15.0'):\n # for new version as provided in environment.yaml\n verts, faces, normals, values = skimage.measure.marching_cubes(\n volume=combine_occ_tsdf,\n level=self.level_set, \n spacing=(grid['xyz'][0][2] - grid['xyz'][0][1],\n grid['xyz'][1][2] - grid['xyz'][1][1],\n grid['xyz'][2][2] - grid['xyz'][2][1]))\n else:\n # for lower version\n verts, faces, normals, values = skimage.measure.marching_cubes_lewiner(\n volume=combine_occ_tsdf,\n level=self.level_set, \n spacing=(grid['xyz'][0][2] - grid['xyz'][0][1],\n grid['xyz'][1][2] - grid['xyz'][1][1],\n grid['xyz'][2][2] - grid['xyz'][2][1]))\n except:\n print(\n 'marching_cubes error. Possibly no surface extracted from the level set.'\n )\n return\n\n # convert back to world coordinates\n vertices = verts + np.array(\n [grid['xyz'][0][0], grid['xyz'][1][0], grid['xyz'][2][0]])\n\n if clean_mesh:\n points = vertices\n mesh = trimesh.Trimesh(vertices=vertices,\n faces=faces,\n process=False)\n seen_mask, _, unseen_mask = self.point_masks(\n points, keyframe_dict, estimate_c2w_list, idx, device=device, \n get_mask_use_all_frames=get_mask_use_all_frames)\n unseen_mask = ~seen_mask\n face_mask = unseen_mask[mesh.faces].all(axis=1)\n mesh.update_faces(~face_mask)\n\n # get connected components\n components = mesh.split(only_watertight=False)\n if self.get_largest_components:\n areas = np.array([c.area for c in components], dtype=np.float)\n mesh = components[areas.argmax()]\n else:\n new_components = []\n for comp in components:\n if comp.area > self.remove_small_geometry_threshold * self.scale * self.scale:\n new_components.append(comp)\n mesh = trimesh.util.concatenate(new_components)\n vertices = mesh.vertices\n faces = mesh.faces\n\n if color:\n if self.color_mesh_extraction_method == 'direct_point_query':\n # color is extracted by passing the coordinates of mesh vertices through the network\n points = torch.from_numpy(vertices)\n z = []\n for i, pnts in enumerate(\n torch.split(points, self.points_batch_size, dim=0)):\n ret = self.eval_points(\n pnts.to(device).float(), decoders, tsdf_volume, self.tsdf_bnds, c, 'color',\n device)\n z_color = ret.cpu()[..., :3]\n z.append(z_color)\n z = torch.cat(z, axis=0)\n vertex_colors = z.numpy()\n\n vertex_colors = np.clip(vertex_colors, 0, 1) * 255\n vertex_colors = vertex_colors.astype(np.uint8)\n\n\n else:\n vertex_colors = None\n\n vertices /= self.scale\n mesh = trimesh.Trimesh(vertices, faces, vertex_colors=vertex_colors)\n mesh.export(mesh_out_file)\n if self.verbose:\n print('Saved mesh at', mesh_out_file)\n\n return z_uni_m" }, { "identifier": "Renderer", "path": "src/utils/Renderer.py", "snippet": "class Renderer(object):\n def __init__(self, cfg, args, slam, points_batch_size=500000, ray_batch_size=100000):\n self.ray_batch_size = ray_batch_size\n self.points_batch_size = points_batch_size\n\n self.lindisp = cfg['rendering']['lindisp']\n self.perturb = cfg['rendering']['perturb']\n self.N_samples = cfg['rendering']['N_samples']\n self.N_surface = cfg['rendering']['N_surface']\n self.N_importance = cfg['rendering']['N_importance']\n\n self.scale = cfg['scale']\n self.occupancy = cfg['occupancy']\n self.bound = slam.bound\n self.sample_mode = 'bilinear'\n self.tsdf_bnds = slam.vol_bnds\n\n self.H, self.W, self.fx, self.fy, self.cx, self.cy = slam.H, slam.W, slam.fx, slam.fy, slam.cx, slam.cy\n\n self.resolution = cfg['meshing']['resolution']\n\n def eval_points(self, p, decoders, tsdf_volume, tsdf_bnds, c=None, stage='color', device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): Point coordinates.\n decoders (nn.module decoders): Decoders.\n tsdf_volume (tensor): tsdf volume.\n tsdf_bnds (tensor): tsdf volume bounds.\n c (dicts, optional): Feature grids. Defaults to None.\n stage (str, optional): Query stage, corresponds to different levels. Defaults to 'color'.\n device (str, optional): CUDA device. Defaults to 'cuda:0'.\n\n Returns:\n ret (tensor): occupancy (and color) value of input points.\n \"\"\"\n\n p_split = torch.split(p, self.points_batch_size)\n bound = self.bound\n rets = []\n weights = []\n\n for pi in p_split:\n # mask for points out of bound\n mask_x = (pi[:, 0] < bound[0][1]) & (pi[:, 0] > bound[0][0])\n mask_y = (pi[:, 1] < bound[1][1]) & (pi[:, 1] > bound[1][0])\n mask_z = (pi[:, 2] < bound[2][1]) & (pi[:, 2] > bound[2][0])\n mask = mask_x & mask_y & mask_z\n\n pi = pi.unsqueeze(0)\n ret, w = decoders(pi, c_grid=c, tsdf_volume=tsdf_volume, tsdf_bnds=tsdf_bnds, stage=stage)\n ret = ret.squeeze(0)\n\n\n if len(ret.shape) == 1 and ret.shape[0] == 4:\n ret = ret.unsqueeze(0)\n\n ret[~mask, 3] = 100 \n rets.append(ret)\n weights.append(w)\n\n ret = torch.cat(rets, dim=0)\n weight = torch.cat(weights, dim=0)\n\n return ret, weight \n\n def sample_grid_tsdf(self, p, tsdf_volume, device='cuda:0'):\n\n p_nor = normalize_3d_coordinate(p.clone(), self.tsdf_bnds)\n p_nor = p_nor.unsqueeze(0)\n vgrid = p_nor[:, :, None, None].float()\n # acutally trilinear interpolation if mode = 'bilinear'\n tsdf_value = F.grid_sample(tsdf_volume.to(device), vgrid.to(device), padding_mode='border', align_corners=True,\n mode='bilinear').squeeze(-1).squeeze(-1)\n return tsdf_value\n\n\n def eval_points_tsdf(self, p, tsdf_volume, device='cuda:0'):\n \"\"\"\n Evaluates the occupancy and/or color value for the points.\n\n Args:\n p (tensor, N*3): Point coordinates.\n \n\n Returns:\n ret (tensor): tsdf value of input points.\n \"\"\"\n\n p_split = torch.split(p, self.points_batch_size)\n tsdf_vals = []\n for pi in p_split:\n pi = pi.unsqueeze(0)\n tsdf_volume_tensor = tsdf_volume\n\n tsdf_val = self.sample_grid_tsdf(pi, tsdf_volume_tensor, device)\n tsdf_val = tsdf_val.squeeze(0)\n tsdf_vals.append(tsdf_val)\n\n tsdf_values = torch.cat(tsdf_vals, dim=1)\n return tsdf_values\n\n\n def render_batch_ray(self, c, decoders, rays_d, rays_o, device, tsdf_volume, tsdf_bnds, stage, gt_depth=None):\n \"\"\"\n Render color, depth and uncertainty of a batch of rays.\n\n Args:\n c (dict): feature grids.\n decoders (nn.module): decoders.\n rays_d (tensor, N*3): rays direction.\n rays_o (tensor, N*3): rays origin.\n device (str): device name to compute on.\n tsdf_volume (tensor): tsdf volume.\n tsdf_bnds (tensor): tsdf volume bounds.\n stage (str): query stage.\n gt_depth (tensor, optional): sensor depth image. Defaults to None.\n\n Returns:\n depth (tensor): rendered depth.\n uncertainty (tensor): rendered uncertainty.\n color (tensor): rendered color.\n weight (tensor): attention weight.\n \"\"\"\n eval_tsdf_volume = tsdf_volume\n \n\n N_samples = self.N_samples\n N_surface = self.N_surface\n N_importance = self.N_importance\n\n N_rays = rays_o.shape[0]\n\n if gt_depth is None:\n N_surface = 0\n near = 0.01\n else:\n gt_depth = gt_depth.reshape(-1, 1)\n gt_depth_samples = gt_depth.repeat(1, N_samples)\n near = gt_depth_samples*0.01\n\n with torch.no_grad():\n det_rays_o = rays_o.clone().detach().unsqueeze(-1) # (N, 3, 1)\n det_rays_d = rays_d.clone().detach().unsqueeze(-1) # (N, 3, 1)\n t = (self.bound.unsqueeze(0).to(device) -\n det_rays_o)/det_rays_d # (N, 3, 2)\n far_bb, _ = torch.min(torch.max(t, dim=2)[0], dim=1)\n far_bb = far_bb.unsqueeze(-1)\n far_bb += 0.01\n\n if gt_depth is not None:\n # in case the bound is too large\n far = torch.clamp(far_bb, 0, torch.max(gt_depth*1.2))\n\n else:\n far = far_bb\n if N_surface > 0:\n if False:\n # this naive implementation downgrades performance\n gt_depth_surface = gt_depth.repeat(1, N_surface)\n t_vals_surface = torch.linspace(\n 0., 1., steps=N_surface).to(device)\n z_vals_surface = 0.95*gt_depth_surface * \\\n (1.-t_vals_surface) + 1.05 * \\\n gt_depth_surface * (t_vals_surface)\n else:\n # since we want to colorize even on regions with no depth sensor readings,\n # meaning colorize on interpolated geometry region,\n # we sample all pixels (not using depth mask) for color loss.\n # Therefore, for pixels with non-zero depth value, we sample near the surface,\n # since it is not a good idea to sample 16 points near (half even behind) camera,\n # for pixels with zero depth value, we sample uniformly from camera to max_depth.\n gt_none_zero_mask = gt_depth > 0\n gt_none_zero = gt_depth[gt_none_zero_mask]\n gt_none_zero = gt_none_zero.unsqueeze(-1)\n gt_depth_surface = gt_none_zero.repeat(1, N_surface)\n t_vals_surface = torch.linspace(\n 0., 1., steps=N_surface).double().to(device)\n # emperical range 0.05*depth\n z_vals_surface_depth_none_zero = 0.95*gt_depth_surface * \\\n (1.-t_vals_surface) + 1.05 * \\\n gt_depth_surface * (t_vals_surface)\n z_vals_surface = torch.zeros(\n gt_depth.shape[0], N_surface).to(device).double()\n gt_none_zero_mask = gt_none_zero_mask.squeeze(-1)\n z_vals_surface[gt_none_zero_mask,\n :] = z_vals_surface_depth_none_zero\n near_surface = 0.001\n far_surface = torch.max(gt_depth)\n z_vals_surface_depth_zero = near_surface * \\\n (1.-t_vals_surface) + far_surface * (t_vals_surface)\n z_vals_surface_depth_zero.unsqueeze(\n 0).repeat((~gt_none_zero_mask).sum(), 1)\n z_vals_surface[~gt_none_zero_mask,\n :] = z_vals_surface_depth_zero\n\n t_vals = torch.linspace(0., 1., steps=N_samples, device=device)\n\n if not self.lindisp:\n z_vals = near * (1.-t_vals) + far * (t_vals)\n else:\n z_vals = 1./(1./near * (1.-t_vals) + 1./far * (t_vals))\n\n if self.perturb > 0.:\n # get intervals between samples\n mids = .5 * (z_vals[..., 1:] + z_vals[..., :-1])\n upper = torch.cat([mids, z_vals[..., -1:]], -1)\n lower = torch.cat([z_vals[..., :1], mids], -1)\n # stratified samples in those intervals\n t_rand = torch.rand(z_vals.shape).to(device)\n z_vals = lower + (upper - lower) * t_rand\n\n if N_surface > 0:\n z_vals, _ = torch.sort(\n torch.cat([z_vals, z_vals_surface.double()], -1), -1)\n\n pts = rays_o[..., None, :] + rays_d[..., None, :] * \\\n z_vals[..., :, None] # [N_rays, N_samples+N_surface, 3]\n pointsf = pts.reshape(-1, 3)\n \n raw, weight = self.eval_points(pointsf, decoders, tsdf_volume, tsdf_bnds, c, stage, device)\n raw = raw.reshape(N_rays, N_samples+N_surface, -1)\n weight = weight.reshape(N_rays, N_samples+N_surface, -1)\n\n\n depth, uncertainty, color, weights = raw2outputs_nerf_color(\n raw, z_vals, rays_d, occupancy=self.occupancy, device=device)\n \n if N_importance > 0:\n z_vals_mid = .5 * (z_vals[..., 1:] + z_vals[..., :-1])\n z_samples = sample_pdf(\n z_vals_mid, weights[..., 1:-1], N_importance, det=(self.perturb == 0.), device=device)\n z_samples = z_samples.detach()\n z_vals, _ = torch.sort(torch.cat([z_vals, z_samples], -1), -1)\n\n pts = rays_o[..., None, :] + \\\n rays_d[..., None, :] * z_vals[..., :, None]\n pts = pts.reshape(-1, 3)\n \n raw, weight = self.eval_points(pointsf, decoders, tsdf_volume, tsdf_bnds, c, stage, device)\n raw = raw.reshape(N_rays, N_samples+N_surface, -1)\n weight = weight.reshape(N_rays, N_samples+N_surface, -1)\n\n depth, uncertainty, color, weights = raw2outputs_nerf_color(\n raw, z_vals, rays_d, occupancy=self.occupancy, device=device)\n return depth, uncertainty, color, weight\n\n\n return depth, uncertainty, color, weight\n\n\n def render_img(self, c, decoders, c2w, device, tsdf_volume, tsdf_bnds, stage, gt_depth=None):\n \"\"\"\n Renders out depth, uncertainty, and color images.\n\n Args:\n c (dict): feature grids.\n decoders (nn.module): decoders.\n c2w (tensor): camera to world matrix of current frame.\n device (str): device name to compute on.\n tsdf_volume (tensor): tsdf volume.\n tsdf_bnds (tensor): tsdf volume bounds.\n stage (str): query stage.\n gt_depth (tensor, optional): sensor depth image. Defaults to None.\n\n Returns:\n depth (tensor, H*W): rendered depth image.\n uncertainty (tensor, H*W): rendered uncertainty image.\n color (tensor, H*W*3): rendered color image.\n \"\"\"\n \n with torch.no_grad():\n H = self.H\n W = self.W\n rays_o, rays_d = get_rays(\n H, W, self.fx, self.fy, self.cx, self.cy, c2w, device)\n rays_o = rays_o.reshape(-1, 3)\n rays_d = rays_d.reshape(-1, 3)\n\n depth_list = []\n uncertainty_list = []\n color_list = []\n\n\n ray_batch_size = self.ray_batch_size\n gt_depth = gt_depth.reshape(-1)\n\n for i in range(0, rays_d.shape[0], ray_batch_size):\n rays_d_batch = rays_d[i:i+ray_batch_size]\n rays_o_batch = rays_o[i:i+ray_batch_size]\n\n iter = 10\n\n if gt_depth is None:\n ret = self.render_batch_ray(\n c, decoders, rays_d_batch, rays_o_batch, device, tsdf_volume, tsdf_bnds, stage, gt_depth=None)\n else:\n gt_depth_batch = gt_depth[i:i+ray_batch_size]\n ret = self.render_batch_ray(\n c, decoders, rays_d_batch, rays_o_batch, device, tsdf_volume, tsdf_bnds, stage, gt_depth=gt_depth_batch)\n\n depth, uncertainty, color, _= ret\n\n \n depth_list.append(depth.double())\n uncertainty_list.append(uncertainty.double())\n color_list.append(color)\n \n \n\n\n\n depth = torch.cat(depth_list, dim=0)\n uncertainty = torch.cat(uncertainty_list, dim=0)\n color = torch.cat(color_list, dim=0)\n \n depth = depth.reshape(H, W)\n uncertainty = uncertainty.reshape(H, W)\n color = color.reshape(H, W, 3)\n\n return depth, uncertainty, color " } ]
import os import time import numpy as np import torch import torch.multiprocessing import torch.multiprocessing as mp from src import config from src.Mapper import Mapper from src.Tracker import Tracker from src.utils.datasets import get_dataset from src.utils.Logger import Logger from src.utils.Mesher import Mesher from src.utils.Renderer import Renderer
20,642
# import src.fusion as fusion # import open3d as o3d torch.multiprocessing.set_sharing_strategy('file_system') class DF_Prior(): """ DF_Prior main class. Mainly allocate shared resources, and dispatch mapping and tracking process. """ def __init__(self, cfg, args): self.cfg = cfg self.args = args self.occupancy = cfg['occupancy'] self.low_gpu_mem = cfg['low_gpu_mem'] self.verbose = cfg['verbose'] self.dataset = cfg['dataset'] if args.output is None: self.output = cfg['data']['output'] else: self.output = args.output self.ckptsdir = os.path.join(self.output, 'ckpts') os.makedirs(self.output, exist_ok=True) os.makedirs(self.ckptsdir, exist_ok=True) os.makedirs(f'{self.output}/mesh', exist_ok=True) self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][ 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] self.update_cam() model = config.get_model(cfg) self.shared_decoders = model self.scale = cfg['scale'] self.load_bound(cfg) self.load_pretrain(cfg) self.grid_init(cfg) # need to use spawn try: mp.set_start_method('spawn', force=True) except RuntimeError: pass self.frame_reader = get_dataset(cfg, args, self.scale) self.n_img = len(self.frame_reader) self.estimate_c2w_list = torch.zeros((self.n_img, 4, 4)) self.estimate_c2w_list.share_memory_() dataset = self.cfg['data']['dataset'] scene_id = self.cfg['data']['id'] self.scene_id = scene_id print(scene_id) # load tsdf grid if dataset == 'scannet': self.tsdf_volume_shared = torch.load(f'scannet_tsdf_volume/scene{scene_id}_tsdf_volume.pt') elif dataset == 'replica': self.tsdf_volume_shared = torch.load(f'replica_tsdf_volume/{scene_id}_tsdf_volume.pt') self.tsdf_volume_shared = self.tsdf_volume_shared.to(self.cfg['mapping']['device']) self.tsdf_volume_shared.share_memory_() # load tsdf grid bound if dataset == 'scannet': self.tsdf_bnds = torch.load(f'scannet_tsdf_volume/scene{scene_id}_bounds.pt') elif dataset == 'replica': self.tsdf_bnds = torch.load(f'replica_tsdf_volume/{scene_id}_bounds.pt') self.tsdf_bnds = torch.tensor(self.tsdf_bnds).to(self.cfg['mapping']['device']) self.tsdf_bnds.share_memory_() self.vol_bnds = self.tsdf_bnds self.vol_bnds.share_memory_() self.gt_c2w_list = torch.zeros((self.n_img, 4, 4)) self.gt_c2w_list.share_memory_() self.idx = torch.zeros((1)).int() self.idx.share_memory_() self.mapping_first_frame = torch.zeros((1)).int() self.mapping_first_frame.share_memory_() # the id of the newest frame Mapper is processing self.mapping_idx = torch.zeros((1)).int() self.mapping_idx.share_memory_() self.mapping_cnt = torch.zeros((1)).int() # counter for mapping self.mapping_cnt.share_memory_() for key, val in self.shared_c.items(): val = val.to(self.cfg['mapping']['device']) val.share_memory_() self.shared_c[key] = val self.shared_decoders = self.shared_decoders.to( self.cfg['mapping']['device']) self.shared_decoders.share_memory() self.renderer = Renderer(cfg, args, self) self.mesher = Mesher(cfg, args, self) self.logger = Logger(cfg, args, self)
# import src.fusion as fusion # import open3d as o3d torch.multiprocessing.set_sharing_strategy('file_system') class DF_Prior(): """ DF_Prior main class. Mainly allocate shared resources, and dispatch mapping and tracking process. """ def __init__(self, cfg, args): self.cfg = cfg self.args = args self.occupancy = cfg['occupancy'] self.low_gpu_mem = cfg['low_gpu_mem'] self.verbose = cfg['verbose'] self.dataset = cfg['dataset'] if args.output is None: self.output = cfg['data']['output'] else: self.output = args.output self.ckptsdir = os.path.join(self.output, 'ckpts') os.makedirs(self.output, exist_ok=True) os.makedirs(self.ckptsdir, exist_ok=True) os.makedirs(f'{self.output}/mesh', exist_ok=True) self.H, self.W, self.fx, self.fy, self.cx, self.cy = cfg['cam']['H'], cfg['cam'][ 'W'], cfg['cam']['fx'], cfg['cam']['fy'], cfg['cam']['cx'], cfg['cam']['cy'] self.update_cam() model = config.get_model(cfg) self.shared_decoders = model self.scale = cfg['scale'] self.load_bound(cfg) self.load_pretrain(cfg) self.grid_init(cfg) # need to use spawn try: mp.set_start_method('spawn', force=True) except RuntimeError: pass self.frame_reader = get_dataset(cfg, args, self.scale) self.n_img = len(self.frame_reader) self.estimate_c2w_list = torch.zeros((self.n_img, 4, 4)) self.estimate_c2w_list.share_memory_() dataset = self.cfg['data']['dataset'] scene_id = self.cfg['data']['id'] self.scene_id = scene_id print(scene_id) # load tsdf grid if dataset == 'scannet': self.tsdf_volume_shared = torch.load(f'scannet_tsdf_volume/scene{scene_id}_tsdf_volume.pt') elif dataset == 'replica': self.tsdf_volume_shared = torch.load(f'replica_tsdf_volume/{scene_id}_tsdf_volume.pt') self.tsdf_volume_shared = self.tsdf_volume_shared.to(self.cfg['mapping']['device']) self.tsdf_volume_shared.share_memory_() # load tsdf grid bound if dataset == 'scannet': self.tsdf_bnds = torch.load(f'scannet_tsdf_volume/scene{scene_id}_bounds.pt') elif dataset == 'replica': self.tsdf_bnds = torch.load(f'replica_tsdf_volume/{scene_id}_bounds.pt') self.tsdf_bnds = torch.tensor(self.tsdf_bnds).to(self.cfg['mapping']['device']) self.tsdf_bnds.share_memory_() self.vol_bnds = self.tsdf_bnds self.vol_bnds.share_memory_() self.gt_c2w_list = torch.zeros((self.n_img, 4, 4)) self.gt_c2w_list.share_memory_() self.idx = torch.zeros((1)).int() self.idx.share_memory_() self.mapping_first_frame = torch.zeros((1)).int() self.mapping_first_frame.share_memory_() # the id of the newest frame Mapper is processing self.mapping_idx = torch.zeros((1)).int() self.mapping_idx.share_memory_() self.mapping_cnt = torch.zeros((1)).int() # counter for mapping self.mapping_cnt.share_memory_() for key, val in self.shared_c.items(): val = val.to(self.cfg['mapping']['device']) val.share_memory_() self.shared_c[key] = val self.shared_decoders = self.shared_decoders.to( self.cfg['mapping']['device']) self.shared_decoders.share_memory() self.renderer = Renderer(cfg, args, self) self.mesher = Mesher(cfg, args, self) self.logger = Logger(cfg, args, self)
self.mapper = Mapper(cfg, args, self)
1
2023-10-13 00:49:57+00:00
24k
fury-05/BookRecomendApp
.pythonlibs/lib/python3.10/site-packages/sklearn/metrics/_ranking.py
[ { "identifier": "UndefinedMetricWarning", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/exceptions.py", "snippet": "class UndefinedMetricWarning(UserWarning):\n \"\"\"Warning used when the metric is invalid\n\n .. versionchanged:: 0.18\n Moved from sklearn.base.\n \"\"\"" }, { "identifier": "label_binarize", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/preprocessing/_label.py", "snippet": "@validate_params(\n {\n \"y\": [\"array-like\"],\n \"classes\": [\"array-like\"],\n \"neg_label\": [Interval(Integral, None, None, closed=\"neither\")],\n \"pos_label\": [Interval(Integral, None, None, closed=\"neither\")],\n \"sparse_output\": [\"boolean\"],\n },\n prefer_skip_nested_validation=True,\n)\ndef label_binarize(y, *, classes, neg_label=0, pos_label=1, sparse_output=False):\n \"\"\"Binarize labels in a one-vs-all fashion.\n\n Several regression and binary classification algorithms are\n available in scikit-learn. A simple way to extend these algorithms\n to the multi-class classification case is to use the so-called\n one-vs-all scheme.\n\n This function makes it possible to compute this transformation for a\n fixed set of class labels known ahead of time.\n\n Parameters\n ----------\n y : array-like\n Sequence of integer labels or multilabel data to encode.\n\n classes : array-like of shape (n_classes,)\n Uniquely holds the label for each class.\n\n neg_label : int, default=0\n Value with which negative labels must be encoded.\n\n pos_label : int, default=1\n Value with which positive labels must be encoded.\n\n sparse_output : bool, default=False,\n Set to true if output binary array is desired in CSR sparse format.\n\n Returns\n -------\n Y : {ndarray, sparse matrix} of shape (n_samples, n_classes)\n Shape will be (n_samples, 1) for binary problems. Sparse matrix will\n be of CSR format.\n\n See Also\n --------\n LabelBinarizer : Class used to wrap the functionality of label_binarize and\n allow for fitting to classes independently of the transform operation.\n\n Examples\n --------\n >>> from sklearn.preprocessing import label_binarize\n >>> label_binarize([1, 6], classes=[1, 2, 4, 6])\n array([[1, 0, 0, 0],\n [0, 0, 0, 1]])\n\n The class ordering is preserved:\n\n >>> label_binarize([1, 6], classes=[1, 6, 4, 2])\n array([[1, 0, 0, 0],\n [0, 1, 0, 0]])\n\n Binary targets transform to a column vector\n\n >>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])\n array([[1],\n [0],\n [0],\n [1]])\n \"\"\"\n if not isinstance(y, list):\n # XXX Workaround that will be removed when list of list format is\n # dropped\n y = check_array(\n y, input_name=\"y\", accept_sparse=\"csr\", ensure_2d=False, dtype=None\n )\n else:\n if _num_samples(y) == 0:\n raise ValueError(\"y has 0 samples: %r\" % y)\n if neg_label >= pos_label:\n raise ValueError(\n \"neg_label={0} must be strictly less than pos_label={1}.\".format(\n neg_label, pos_label\n )\n )\n\n if sparse_output and (pos_label == 0 or neg_label != 0):\n raise ValueError(\n \"Sparse binarization is only supported with non \"\n \"zero pos_label and zero neg_label, got \"\n \"pos_label={0} and neg_label={1}\"\n \"\".format(pos_label, neg_label)\n )\n\n # To account for pos_label == 0 in the dense case\n pos_switch = pos_label == 0\n if pos_switch:\n pos_label = -neg_label\n\n y_type = type_of_target(y)\n if \"multioutput\" in y_type:\n raise ValueError(\n \"Multioutput target data is not supported with label binarization\"\n )\n if y_type == \"unknown\":\n raise ValueError(\"The type of target data is not known\")\n\n n_samples = y.shape[0] if sp.issparse(y) else len(y)\n n_classes = len(classes)\n classes = np.asarray(classes)\n\n if y_type == \"binary\":\n if n_classes == 1:\n if sparse_output:\n return sp.csr_matrix((n_samples, 1), dtype=int)\n else:\n Y = np.zeros((len(y), 1), dtype=int)\n Y += neg_label\n return Y\n elif len(classes) >= 3:\n y_type = \"multiclass\"\n\n sorted_class = np.sort(classes)\n if y_type == \"multilabel-indicator\":\n y_n_classes = y.shape[1] if hasattr(y, \"shape\") else len(y[0])\n if classes.size != y_n_classes:\n raise ValueError(\n \"classes {0} mismatch with the labels {1} found in the data\".format(\n classes, unique_labels(y)\n )\n )\n\n if y_type in (\"binary\", \"multiclass\"):\n y = column_or_1d(y)\n\n # pick out the known labels from y\n y_in_classes = np.isin(y, classes)\n y_seen = y[y_in_classes]\n indices = np.searchsorted(sorted_class, y_seen)\n indptr = np.hstack((0, np.cumsum(y_in_classes)))\n\n data = np.empty_like(indices)\n data.fill(pos_label)\n Y = sp.csr_matrix((data, indices, indptr), shape=(n_samples, n_classes))\n elif y_type == \"multilabel-indicator\":\n Y = sp.csr_matrix(y)\n if pos_label != 1:\n data = np.empty_like(Y.data)\n data.fill(pos_label)\n Y.data = data\n else:\n raise ValueError(\n \"%s target data is not supported with label binarization\" % y_type\n )\n\n if not sparse_output:\n Y = Y.toarray()\n Y = Y.astype(int, copy=False)\n\n if neg_label != 0:\n Y[Y == 0] = neg_label\n\n if pos_switch:\n Y[Y == pos_label] = 0\n else:\n Y.data = Y.data.astype(int, copy=False)\n\n # preserve label ordering\n if np.any(classes != sorted_class):\n indices = np.searchsorted(sorted_class, classes)\n Y = Y[:, indices]\n\n if y_type == \"binary\":\n if sparse_output:\n Y = Y.getcol(-1)\n else:\n Y = Y[:, -1].reshape((-1, 1))\n\n return Y" }, { "identifier": "assert_all_finite", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def assert_all_finite(\n X,\n *,\n allow_nan=False,\n estimator_name=None,\n input_name=\"\",\n):\n \"\"\"Throw a ValueError if X contains NaN or infinity.\n\n Parameters\n ----------\n X : {ndarray, sparse matrix}\n The input data.\n\n allow_nan : bool, default=False\n If True, do not throw error when `X` contains NaN.\n\n estimator_name : str, default=None\n The estimator name, used to construct the error message.\n\n input_name : str, default=\"\"\n The data name used to construct the error message. In particular\n if `input_name` is \"X\" and the data has NaN values and\n allow_nan is False, the error message will link to the imputer\n documentation.\n \"\"\"\n _assert_all_finite(\n X.data if sp.issparse(X) else X,\n allow_nan=allow_nan,\n estimator_name=estimator_name,\n input_name=input_name,\n )" }, { "identifier": "check_array", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def check_array(\n array,\n accept_sparse=False,\n *,\n accept_large_sparse=True,\n dtype=\"numeric\",\n order=None,\n copy=False,\n force_all_finite=True,\n ensure_2d=True,\n allow_nd=False,\n ensure_min_samples=1,\n ensure_min_features=1,\n estimator=None,\n input_name=\"\",\n):\n \"\"\"Input validation on an array, list, sparse matrix or similar.\n\n By default, the input is checked to be a non-empty 2D array containing\n only finite values. If the dtype of the array is object, attempt\n converting to float, raising on failure.\n\n Parameters\n ----------\n array : object\n Input object to check / convert.\n\n accept_sparse : str, bool or list/tuple of str, default=False\n String[s] representing allowed sparse matrix formats, such as 'csc',\n 'csr', etc. If the input is sparse but not in the allowed format,\n it will be converted to the first listed format. True allows the input\n to be any format. False means that a sparse matrix input will\n raise an error.\n\n accept_large_sparse : bool, default=True\n If a CSR, CSC, COO or BSR sparse matrix is supplied and accepted by\n accept_sparse, accept_large_sparse=False will cause it to be accepted\n only if its indices are stored with a 32-bit dtype.\n\n .. versionadded:: 0.20\n\n dtype : 'numeric', type, list of type or None, default='numeric'\n Data type of result. If None, the dtype of the input is preserved.\n If \"numeric\", dtype is preserved unless array.dtype is object.\n If dtype is a list of types, conversion on the first type is only\n performed if the dtype of the input is not in the list.\n\n order : {'F', 'C'} or None, default=None\n Whether an array will be forced to be fortran or c-style.\n When order is None (default), then if copy=False, nothing is ensured\n about the memory layout of the output array; otherwise (copy=True)\n the memory layout of the returned array is kept as close as possible\n to the original array.\n\n copy : bool, default=False\n Whether a forced copy will be triggered. If copy=False, a copy might\n be triggered by a conversion.\n\n force_all_finite : bool or 'allow-nan', default=True\n Whether to raise an error on np.inf, np.nan, pd.NA in array. The\n possibilities are:\n\n - True: Force all values of array to be finite.\n - False: accepts np.inf, np.nan, pd.NA in array.\n - 'allow-nan': accepts only np.nan and pd.NA values in array. Values\n cannot be infinite.\n\n .. versionadded:: 0.20\n ``force_all_finite`` accepts the string ``'allow-nan'``.\n\n .. versionchanged:: 0.23\n Accepts `pd.NA` and converts it into `np.nan`\n\n ensure_2d : bool, default=True\n Whether to raise a value error if array is not 2D.\n\n allow_nd : bool, default=False\n Whether to allow array.ndim > 2.\n\n ensure_min_samples : int, default=1\n Make sure that the array has a minimum number of samples in its first\n axis (rows for a 2D array). Setting to 0 disables this check.\n\n ensure_min_features : int, default=1\n Make sure that the 2D array has some minimum number of features\n (columns). The default value of 1 rejects empty datasets.\n This check is only enforced when the input data has effectively 2\n dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0\n disables this check.\n\n estimator : str or estimator instance, default=None\n If passed, include the name of the estimator in warning messages.\n\n input_name : str, default=\"\"\n The data name used to construct the error message. In particular\n if `input_name` is \"X\" and the data has NaN values and\n allow_nan is False, the error message will link to the imputer\n documentation.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n array_converted : object\n The converted and validated array.\n \"\"\"\n if isinstance(array, np.matrix):\n raise TypeError(\n \"np.matrix is not supported. Please convert to a numpy array with \"\n \"np.asarray. For more information see: \"\n \"https://numpy.org/doc/stable/reference/generated/numpy.matrix.html\"\n )\n\n xp, is_array_api_compliant = get_namespace(array)\n\n # store reference to original array to check if copy is needed when\n # function returns\n array_orig = array\n\n # store whether originally we wanted numeric dtype\n dtype_numeric = isinstance(dtype, str) and dtype == \"numeric\"\n\n dtype_orig = getattr(array, \"dtype\", None)\n if not is_array_api_compliant and not hasattr(dtype_orig, \"kind\"):\n # not a data type (e.g. a column named dtype in a pandas DataFrame)\n dtype_orig = None\n\n # check if the object contains several dtypes (typically a pandas\n # DataFrame), and store them. If not, store None.\n dtypes_orig = None\n pandas_requires_conversion = False\n if hasattr(array, \"dtypes\") and hasattr(array.dtypes, \"__array__\"):\n # throw warning if columns are sparse. If all columns are sparse, then\n # array.sparse exists and sparsity will be preserved (later).\n with suppress(ImportError):\n from pandas import SparseDtype\n\n def is_sparse(dtype):\n return isinstance(dtype, SparseDtype)\n\n if not hasattr(array, \"sparse\") and array.dtypes.apply(is_sparse).any():\n warnings.warn(\n \"pandas.DataFrame with sparse columns found.\"\n \"It will be converted to a dense numpy array.\"\n )\n\n dtypes_orig = list(array.dtypes)\n pandas_requires_conversion = any(\n _pandas_dtype_needs_early_conversion(i) for i in dtypes_orig\n )\n if all(isinstance(dtype_iter, np.dtype) for dtype_iter in dtypes_orig):\n dtype_orig = np.result_type(*dtypes_orig)\n elif pandas_requires_conversion and any(d == object for d in dtypes_orig):\n # Force object if any of the dtypes is an object\n dtype_orig = object\n\n elif (_is_extension_array_dtype(array) or hasattr(array, \"iloc\")) and hasattr(\n array, \"dtype\"\n ):\n # array is a pandas series\n pandas_requires_conversion = _pandas_dtype_needs_early_conversion(array.dtype)\n if isinstance(array.dtype, np.dtype):\n dtype_orig = array.dtype\n else:\n # Set to None to let array.astype work out the best dtype\n dtype_orig = None\n\n if dtype_numeric:\n if (\n dtype_orig is not None\n and hasattr(dtype_orig, \"kind\")\n and dtype_orig.kind == \"O\"\n ):\n # if input is object, convert to float.\n dtype = xp.float64\n else:\n dtype = None\n\n if isinstance(dtype, (list, tuple)):\n if dtype_orig is not None and dtype_orig in dtype:\n # no dtype conversion required\n dtype = None\n else:\n # dtype conversion required. Let's select the first element of the\n # list of accepted types.\n dtype = dtype[0]\n\n if pandas_requires_conversion:\n # pandas dataframe requires conversion earlier to handle extension dtypes with\n # nans\n # Use the original dtype for conversion if dtype is None\n new_dtype = dtype_orig if dtype is None else dtype\n array = array.astype(new_dtype)\n # Since we converted here, we do not need to convert again later\n dtype = None\n\n if dtype is not None and _is_numpy_namespace(xp):\n dtype = np.dtype(dtype)\n\n if force_all_finite not in (True, False, \"allow-nan\"):\n raise ValueError(\n 'force_all_finite should be a bool or \"allow-nan\". Got {!r} instead'.format(\n force_all_finite\n )\n )\n\n if dtype is not None and _is_numpy_namespace(xp):\n # convert to dtype object to conform to Array API to be use `xp.isdtype` later\n dtype = np.dtype(dtype)\n\n estimator_name = _check_estimator_name(estimator)\n context = \" by %s\" % estimator_name if estimator is not None else \"\"\n\n # When all dataframe columns are sparse, convert to a sparse array\n if hasattr(array, \"sparse\") and array.ndim > 1:\n with suppress(ImportError):\n from pandas import SparseDtype # noqa: F811\n\n def is_sparse(dtype):\n return isinstance(dtype, SparseDtype)\n\n if array.dtypes.apply(is_sparse).all():\n # DataFrame.sparse only supports `to_coo`\n array = array.sparse.to_coo()\n if array.dtype == np.dtype(\"object\"):\n unique_dtypes = set([dt.subtype.name for dt in array_orig.dtypes])\n if len(unique_dtypes) > 1:\n raise ValueError(\n \"Pandas DataFrame with mixed sparse extension arrays \"\n \"generated a sparse matrix with object dtype which \"\n \"can not be converted to a scipy sparse matrix.\"\n \"Sparse extension arrays should all have the same \"\n \"numeric type.\"\n )\n\n if sp.issparse(array):\n _ensure_no_complex_data(array)\n array = _ensure_sparse_format(\n array,\n accept_sparse=accept_sparse,\n dtype=dtype,\n copy=copy,\n force_all_finite=force_all_finite,\n accept_large_sparse=accept_large_sparse,\n estimator_name=estimator_name,\n input_name=input_name,\n )\n else:\n # If np.array(..) gives ComplexWarning, then we convert the warning\n # to an error. This is needed because specifying a non complex\n # dtype to the function converts complex to real dtype,\n # thereby passing the test made in the lines following the scope\n # of warnings context manager.\n with warnings.catch_warnings():\n try:\n warnings.simplefilter(\"error\", ComplexWarning)\n if dtype is not None and xp.isdtype(dtype, \"integral\"):\n # Conversion float -> int should not contain NaN or\n # inf (numpy#14412). We cannot use casting='safe' because\n # then conversion float -> int would be disallowed.\n array = _asarray_with_order(array, order=order, xp=xp)\n if xp.isdtype(array.dtype, (\"real floating\", \"complex floating\")):\n _assert_all_finite(\n array,\n allow_nan=False,\n msg_dtype=dtype,\n estimator_name=estimator_name,\n input_name=input_name,\n )\n array = xp.astype(array, dtype, copy=False)\n else:\n array = _asarray_with_order(array, order=order, dtype=dtype, xp=xp)\n except ComplexWarning as complex_warning:\n raise ValueError(\n \"Complex data not supported\\n{}\\n\".format(array)\n ) from complex_warning\n\n # It is possible that the np.array(..) gave no warning. This happens\n # when no dtype conversion happened, for example dtype = None. The\n # result is that np.array(..) produces an array of complex dtype\n # and we need to catch and raise exception for such cases.\n _ensure_no_complex_data(array)\n\n if ensure_2d:\n # If input is scalar raise error\n if array.ndim == 0:\n raise ValueError(\n \"Expected 2D array, got scalar array instead:\\narray={}.\\n\"\n \"Reshape your data either using array.reshape(-1, 1) if \"\n \"your data has a single feature or array.reshape(1, -1) \"\n \"if it contains a single sample.\".format(array)\n )\n # If input is 1D raise error\n if array.ndim == 1:\n raise ValueError(\n \"Expected 2D array, got 1D array instead:\\narray={}.\\n\"\n \"Reshape your data either using array.reshape(-1, 1) if \"\n \"your data has a single feature or array.reshape(1, -1) \"\n \"if it contains a single sample.\".format(array)\n )\n\n if dtype_numeric and hasattr(array.dtype, \"kind\") and array.dtype.kind in \"USV\":\n raise ValueError(\n \"dtype='numeric' is not compatible with arrays of bytes/strings.\"\n \"Convert your data to numeric values explicitly instead.\"\n )\n if not allow_nd and array.ndim >= 3:\n raise ValueError(\n \"Found array with dim %d. %s expected <= 2.\"\n % (array.ndim, estimator_name)\n )\n\n if force_all_finite:\n _assert_all_finite(\n array,\n input_name=input_name,\n estimator_name=estimator_name,\n allow_nan=force_all_finite == \"allow-nan\",\n )\n\n if ensure_min_samples > 0:\n n_samples = _num_samples(array)\n if n_samples < ensure_min_samples:\n raise ValueError(\n \"Found array with %d sample(s) (shape=%s) while a\"\n \" minimum of %d is required%s.\"\n % (n_samples, array.shape, ensure_min_samples, context)\n )\n\n if ensure_min_features > 0 and array.ndim == 2:\n n_features = array.shape[1]\n if n_features < ensure_min_features:\n raise ValueError(\n \"Found array with %d feature(s) (shape=%s) while\"\n \" a minimum of %d is required%s.\"\n % (n_features, array.shape, ensure_min_features, context)\n )\n\n if copy:\n if _is_numpy_namespace(xp):\n # only make a copy if `array` and `array_orig` may share memory`\n if np.may_share_memory(array, array_orig):\n array = _asarray_with_order(\n array, dtype=dtype, order=order, copy=True, xp=xp\n )\n else:\n # always make a copy for non-numpy arrays\n array = _asarray_with_order(\n array, dtype=dtype, order=order, copy=True, xp=xp\n )\n\n return array" }, { "identifier": "check_consistent_length", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def check_consistent_length(*arrays):\n \"\"\"Check that all arrays have consistent first dimensions.\n\n Checks whether all objects in arrays have the same shape or length.\n\n Parameters\n ----------\n *arrays : list or tuple of input objects.\n Objects that will be checked for consistent length.\n \"\"\"\n\n lengths = [_num_samples(X) for X in arrays if X is not None]\n uniques = np.unique(lengths)\n if len(uniques) > 1:\n raise ValueError(\n \"Found input variables with inconsistent numbers of samples: %r\"\n % [int(l) for l in lengths]\n )" }, { "identifier": "column_or_1d", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def column_or_1d(y, *, dtype=None, warn=False):\n \"\"\"Ravel column or 1d numpy array, else raises an error.\n\n Parameters\n ----------\n y : array-like\n Input data.\n\n dtype : data-type, default=None\n Data type for `y`.\n\n .. versionadded:: 1.2\n\n warn : bool, default=False\n To control display of warnings.\n\n Returns\n -------\n y : ndarray\n Output data.\n\n Raises\n ------\n ValueError\n If `y` is not a 1D array or a 2D array with a single row or column.\n \"\"\"\n xp, _ = get_namespace(y)\n y = check_array(\n y,\n ensure_2d=False,\n dtype=dtype,\n input_name=\"y\",\n force_all_finite=False,\n ensure_min_samples=0,\n )\n\n shape = y.shape\n if len(shape) == 1:\n return _asarray_with_order(xp.reshape(y, (-1,)), order=\"C\", xp=xp)\n if len(shape) == 2 and shape[1] == 1:\n if warn:\n warnings.warn(\n (\n \"A column-vector y was passed when a 1d array was\"\n \" expected. Please change the shape of y to \"\n \"(n_samples, ), for example using ravel().\"\n ),\n DataConversionWarning,\n stacklevel=2,\n )\n return _asarray_with_order(xp.reshape(y, (-1,)), order=\"C\", xp=xp)\n\n raise ValueError(\n \"y should be a 1d array, got an array of shape {} instead.\".format(shape)\n )" }, { "identifier": "_encode", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_encode.py", "snippet": "def _encode(values, *, uniques, check_unknown=True):\n \"\"\"Helper function to encode values into [0, n_uniques - 1].\n\n Uses pure python method for object dtype, and numpy method for\n all other dtypes.\n The numpy method has the limitation that the `uniques` need to\n be sorted. Importantly, this is not checked but assumed to already be\n the case. The calling method needs to ensure this for all non-object\n values.\n\n Parameters\n ----------\n values : ndarray\n Values to encode.\n uniques : ndarray\n The unique values in `values`. If the dtype is not object, then\n `uniques` needs to be sorted.\n check_unknown : bool, default=True\n If True, check for values in `values` that are not in `unique`\n and raise an error. This is ignored for object dtype, and treated as\n True in this case. This parameter is useful for\n _BaseEncoder._transform() to avoid calling _check_unknown()\n twice.\n\n Returns\n -------\n encoded : ndarray\n Encoded values\n \"\"\"\n if values.dtype.kind in \"OUS\":\n try:\n return _map_to_integer(values, uniques)\n except KeyError as e:\n raise ValueError(f\"y contains previously unseen labels: {str(e)}\")\n else:\n if check_unknown:\n diff = _check_unknown(values, uniques)\n if diff:\n raise ValueError(f\"y contains previously unseen labels: {str(diff)}\")\n return np.searchsorted(uniques, values)" }, { "identifier": "_unique", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_encode.py", "snippet": "def _unique(values, *, return_inverse=False, return_counts=False):\n \"\"\"Helper function to find unique values with support for python objects.\n\n Uses pure python method for object dtype, and numpy method for\n all other dtypes.\n\n Parameters\n ----------\n values : ndarray\n Values to check for unknowns.\n\n return_inverse : bool, default=False\n If True, also return the indices of the unique values.\n\n return_counts : bool, default=False\n If True, also return the number of times each unique item appears in\n values.\n\n Returns\n -------\n unique : ndarray\n The sorted unique values.\n\n unique_inverse : ndarray\n The indices to reconstruct the original array from the unique array.\n Only provided if `return_inverse` is True.\n\n unique_counts : ndarray\n The number of times each of the unique values comes up in the original\n array. Only provided if `return_counts` is True.\n \"\"\"\n if values.dtype == object:\n return _unique_python(\n values, return_inverse=return_inverse, return_counts=return_counts\n )\n # numerical\n return _unique_np(\n values, return_inverse=return_inverse, return_counts=return_counts\n )" }, { "identifier": "Interval", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "class Interval(_Constraint):\n \"\"\"Constraint representing a typed interval.\n\n Parameters\n ----------\n type : {numbers.Integral, numbers.Real, RealNotInt}\n The set of numbers in which to set the interval.\n\n If RealNotInt, only reals that don't have the integer type\n are allowed. For example 1.0 is allowed but 1 is not.\n\n left : float or int or None\n The left bound of the interval. None means left bound is -∞.\n\n right : float, int or None\n The right bound of the interval. None means right bound is +∞.\n\n closed : {\"left\", \"right\", \"both\", \"neither\"}\n Whether the interval is open or closed. Possible choices are:\n\n - `\"left\"`: the interval is closed on the left and open on the right.\n It is equivalent to the interval `[ left, right )`.\n - `\"right\"`: the interval is closed on the right and open on the left.\n It is equivalent to the interval `( left, right ]`.\n - `\"both\"`: the interval is closed.\n It is equivalent to the interval `[ left, right ]`.\n - `\"neither\"`: the interval is open.\n It is equivalent to the interval `( left, right )`.\n\n Notes\n -----\n Setting a bound to `None` and setting the interval closed is valid. For instance,\n strictly speaking, `Interval(Real, 0, None, closed=\"both\")` corresponds to\n `[0, +∞) U {+∞}`.\n \"\"\"\n\n def __init__(self, type, left, right, *, closed):\n super().__init__()\n self.type = type\n self.left = left\n self.right = right\n self.closed = closed\n\n self._check_params()\n\n def _check_params(self):\n if self.type not in (Integral, Real, RealNotInt):\n raise ValueError(\n \"type must be either numbers.Integral, numbers.Real or RealNotInt.\"\n f\" Got {self.type} instead.\"\n )\n\n if self.closed not in (\"left\", \"right\", \"both\", \"neither\"):\n raise ValueError(\n \"closed must be either 'left', 'right', 'both' or 'neither'. \"\n f\"Got {self.closed} instead.\"\n )\n\n if self.type is Integral:\n suffix = \"for an interval over the integers.\"\n if self.left is not None and not isinstance(self.left, Integral):\n raise TypeError(f\"Expecting left to be an int {suffix}\")\n if self.right is not None and not isinstance(self.right, Integral):\n raise TypeError(f\"Expecting right to be an int {suffix}\")\n if self.left is None and self.closed in (\"left\", \"both\"):\n raise ValueError(\n f\"left can't be None when closed == {self.closed} {suffix}\"\n )\n if self.right is None and self.closed in (\"right\", \"both\"):\n raise ValueError(\n f\"right can't be None when closed == {self.closed} {suffix}\"\n )\n else:\n if self.left is not None and not isinstance(self.left, Real):\n raise TypeError(\"Expecting left to be a real number.\")\n if self.right is not None and not isinstance(self.right, Real):\n raise TypeError(\"Expecting right to be a real number.\")\n\n if self.right is not None and self.left is not None and self.right <= self.left:\n raise ValueError(\n f\"right can't be less than left. Got left={self.left} and \"\n f\"right={self.right}\"\n )\n\n def __contains__(self, val):\n if np.isnan(val):\n return False\n\n left_cmp = operator.lt if self.closed in (\"left\", \"both\") else operator.le\n right_cmp = operator.gt if self.closed in (\"right\", \"both\") else operator.ge\n\n left = -np.inf if self.left is None else self.left\n right = np.inf if self.right is None else self.right\n\n if left_cmp(val, left):\n return False\n if right_cmp(val, right):\n return False\n return True\n\n def is_satisfied_by(self, val):\n if not isinstance(val, self.type):\n return False\n\n return val in self\n\n def __str__(self):\n type_str = \"an int\" if self.type is Integral else \"a float\"\n left_bracket = \"[\" if self.closed in (\"left\", \"both\") else \"(\"\n left_bound = \"-inf\" if self.left is None else self.left\n right_bound = \"inf\" if self.right is None else self.right\n right_bracket = \"]\" if self.closed in (\"right\", \"both\") else \")\"\n\n # better repr if the bounds were given as integers\n if not self.type == Integral and isinstance(self.left, Real):\n left_bound = float(left_bound)\n if not self.type == Integral and isinstance(self.right, Real):\n right_bound = float(right_bound)\n\n return (\n f\"{type_str} in the range \"\n f\"{left_bracket}{left_bound}, {right_bound}{right_bracket}\"\n )" }, { "identifier": "StrOptions", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "class StrOptions(Options):\n \"\"\"Constraint representing a finite set of strings.\n\n Parameters\n ----------\n options : set of str\n The set of valid strings.\n\n deprecated : set of str or None, default=None\n A subset of the `options` to mark as deprecated in the string\n representation of the constraint.\n \"\"\"\n\n def __init__(self, options, *, deprecated=None):\n super().__init__(type=str, options=options, deprecated=deprecated)" }, { "identifier": "validate_params", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/_param_validation.py", "snippet": "def validate_params(parameter_constraints, *, prefer_skip_nested_validation):\n \"\"\"Decorator to validate types and values of functions and methods.\n\n Parameters\n ----------\n parameter_constraints : dict\n A dictionary `param_name: list of constraints`. See the docstring of\n `validate_parameter_constraints` for a description of the accepted constraints.\n\n Note that the *args and **kwargs parameters are not validated and must not be\n present in the parameter_constraints dictionary.\n\n prefer_skip_nested_validation : bool\n If True, the validation of parameters of inner estimators or functions\n called by the decorated function will be skipped.\n\n This is useful to avoid validating many times the parameters passed by the\n user from the public facing API. It's also useful to avoid validating\n parameters that we pass internally to inner functions that are guaranteed to\n be valid by the test suite.\n\n It should be set to True for most functions, except for those that receive\n non-validated objects as parameters or that are just wrappers around classes\n because they only perform a partial validation.\n\n Returns\n -------\n decorated_function : function or method\n The decorated function.\n \"\"\"\n\n def decorator(func):\n # The dict of parameter constraints is set as an attribute of the function\n # to make it possible to dynamically introspect the constraints for\n # automatic testing.\n setattr(func, \"_skl_parameter_constraints\", parameter_constraints)\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n global_skip_validation = get_config()[\"skip_parameter_validation\"]\n if global_skip_validation:\n return func(*args, **kwargs)\n\n func_sig = signature(func)\n\n # Map *args/**kwargs to the function signature\n params = func_sig.bind(*args, **kwargs)\n params.apply_defaults()\n\n # ignore self/cls and positional/keyword markers\n to_ignore = [\n p.name\n for p in func_sig.parameters.values()\n if p.kind in (p.VAR_POSITIONAL, p.VAR_KEYWORD)\n ]\n to_ignore += [\"self\", \"cls\"]\n params = {k: v for k, v in params.arguments.items() if k not in to_ignore}\n\n validate_parameter_constraints(\n parameter_constraints, params, caller_name=func.__qualname__\n )\n\n try:\n with config_context(\n skip_parameter_validation=(\n prefer_skip_nested_validation or global_skip_validation\n )\n ):\n return func(*args, **kwargs)\n except InvalidParameterError as e:\n # When the function is just a wrapper around an estimator, we allow\n # the function to delegate validation to the estimator, but we replace\n # the name of the estimator by the name of the function in the error\n # message to avoid confusion.\n msg = re.sub(\n r\"parameter of \\w+ must be\",\n f\"parameter of {func.__qualname__} must be\",\n str(e),\n )\n raise InvalidParameterError(msg) from e\n\n return wrapper\n\n return decorator" }, { "identifier": "stable_cumsum", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/extmath.py", "snippet": "def stable_cumsum(arr, axis=None, rtol=1e-05, atol=1e-08):\n \"\"\"Use high precision for cumsum and check that final value matches sum.\n\n Warns if the final cumulative sum does not match the sum (up to the chosen\n tolerance).\n\n Parameters\n ----------\n arr : array-like\n To be cumulatively summed as flat.\n axis : int, default=None\n Axis along which the cumulative sum is computed.\n The default (None) is to compute the cumsum over the flattened array.\n rtol : float, default=1e-05\n Relative tolerance, see ``np.allclose``.\n atol : float, default=1e-08\n Absolute tolerance, see ``np.allclose``.\n\n Returns\n -------\n out : ndarray\n Array with the cumulative sums along the chosen axis.\n \"\"\"\n out = np.cumsum(arr, axis=axis, dtype=np.float64)\n expected = np.sum(arr, axis=axis, dtype=np.float64)\n if not np.all(\n np.isclose(\n out.take(-1, axis=axis), expected, rtol=rtol, atol=atol, equal_nan=True\n )\n ):\n warnings.warn(\n (\n \"cumsum was found to be unstable: \"\n \"its last element does not correspond to sum\"\n ),\n RuntimeWarning,\n )\n return out" }, { "identifier": "trapezoid", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/fixes.py", "snippet": "def _object_dtype_isnan(X):\ndef _percentile(a, q, *, method=\"linear\", **kwargs):\ndef _get_threadpool_controller():\ndef threadpool_limits(limits=None, user_api=None):\ndef threadpool_info():\ndef delayed(function):\ndef _mode(a, axis=0):\n def _sparse_linalg_cg(A, b, **kwargs):\ndef _open_text(data_module, data_file_name):\ndef _open_binary(data_module, data_file_name):\ndef _read_text(descr_module, descr_file_name):\ndef _path(data_module, data_file_name):\ndef _is_resource(data_module, data_file_name):\ndef _contents(data_module):" }, { "identifier": "type_of_target", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/multiclass.py", "snippet": "def type_of_target(y, input_name=\"\"):\n \"\"\"Determine the type of data indicated by the target.\n\n Note that this type is the most specific type that can be inferred.\n For example:\n\n * ``binary`` is more specific but compatible with ``multiclass``.\n * ``multiclass`` of integers is more specific but compatible with\n ``continuous``.\n * ``multilabel-indicator`` is more specific but compatible with\n ``multiclass-multioutput``.\n\n Parameters\n ----------\n y : {array-like, sparse matrix}\n Target values. If a sparse matrix, `y` is expected to be a\n CSR/CSC matrix.\n\n input_name : str, default=\"\"\n The data name used to construct the error message.\n\n .. versionadded:: 1.1.0\n\n Returns\n -------\n target_type : str\n One of:\n\n * 'continuous': `y` is an array-like of floats that are not all\n integers, and is 1d or a column vector.\n * 'continuous-multioutput': `y` is a 2d array of floats that are\n not all integers, and both dimensions are of size > 1.\n * 'binary': `y` contains <= 2 discrete values and is 1d or a column\n vector.\n * 'multiclass': `y` contains more than two discrete values, is not a\n sequence of sequences, and is 1d or a column vector.\n * 'multiclass-multioutput': `y` is a 2d array that contains more\n than two discrete values, is not a sequence of sequences, and both\n dimensions are of size > 1.\n * 'multilabel-indicator': `y` is a label indicator matrix, an array\n of two dimensions with at least two columns, and at most 2 unique\n values.\n * 'unknown': `y` is array-like but none of the above, such as a 3d\n array, sequence of sequences, or an array of non-sequence objects.\n\n Examples\n --------\n >>> from sklearn.utils.multiclass import type_of_target\n >>> import numpy as np\n >>> type_of_target([0.1, 0.6])\n 'continuous'\n >>> type_of_target([1, -1, -1, 1])\n 'binary'\n >>> type_of_target(['a', 'b', 'a'])\n 'binary'\n >>> type_of_target([1.0, 2.0])\n 'binary'\n >>> type_of_target([1, 0, 2])\n 'multiclass'\n >>> type_of_target([1.0, 0.0, 3.0])\n 'multiclass'\n >>> type_of_target(['a', 'b', 'c'])\n 'multiclass'\n >>> type_of_target(np.array([[1, 2], [3, 1]]))\n 'multiclass-multioutput'\n >>> type_of_target([[1, 2]])\n 'multilabel-indicator'\n >>> type_of_target(np.array([[1.5, 2.0], [3.0, 1.6]]))\n 'continuous-multioutput'\n >>> type_of_target(np.array([[0, 1], [1, 1]]))\n 'multilabel-indicator'\n \"\"\"\n xp, is_array_api_compliant = get_namespace(y)\n valid = (\n (isinstance(y, Sequence) or issparse(y) or hasattr(y, \"__array__\"))\n and not isinstance(y, str)\n or is_array_api_compliant\n )\n\n if not valid:\n raise ValueError(\n \"Expected array-like (array or non-string sequence), got %r\" % y\n )\n\n sparse_pandas = y.__class__.__name__ in [\"SparseSeries\", \"SparseArray\"]\n if sparse_pandas:\n raise ValueError(\"y cannot be class 'SparseSeries' or 'SparseArray'\")\n\n if is_multilabel(y):\n return \"multilabel-indicator\"\n\n # DeprecationWarning will be replaced by ValueError, see NEP 34\n # https://numpy.org/neps/nep-0034-infer-dtype-is-object.html\n # We therefore catch both deprecation (NumPy < 1.24) warning and\n # value error (NumPy >= 1.24).\n check_y_kwargs = dict(\n accept_sparse=True,\n allow_nd=True,\n force_all_finite=False,\n ensure_2d=False,\n ensure_min_samples=0,\n ensure_min_features=0,\n )\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"error\", VisibleDeprecationWarning)\n if not issparse(y):\n try:\n y = check_array(y, dtype=None, **check_y_kwargs)\n except (VisibleDeprecationWarning, ValueError) as e:\n if str(e).startswith(\"Complex data not supported\"):\n raise\n\n # dtype=object should be provided explicitly for ragged arrays,\n # see NEP 34\n y = check_array(y, dtype=object, **check_y_kwargs)\n\n # The old sequence of sequences format\n try:\n if (\n not hasattr(y[0], \"__array__\")\n and isinstance(y[0], Sequence)\n and not isinstance(y[0], str)\n ):\n raise ValueError(\n \"You appear to be using a legacy multi-label data\"\n \" representation. Sequence of sequences are no\"\n \" longer supported; use a binary array or sparse\"\n \" matrix instead - the MultiLabelBinarizer\"\n \" transformer can convert to this format.\"\n )\n except IndexError:\n pass\n\n # Invalid inputs\n if y.ndim not in (1, 2):\n # Number of dimension greater than 2: [[[1, 2]]]\n return \"unknown\"\n if not min(y.shape):\n # Empty ndarray: []/[[]]\n if y.ndim == 1:\n # 1-D empty array: []\n return \"binary\" # []\n # 2-D empty array: [[]]\n return \"unknown\"\n if not issparse(y) and y.dtype == object and not isinstance(y.flat[0], str):\n # [obj_1] and not [\"label_1\"]\n return \"unknown\"\n\n # Check if multioutput\n if y.ndim == 2 and y.shape[1] > 1:\n suffix = \"-multioutput\" # [[1, 2], [1, 2]]\n else:\n suffix = \"\" # [1, 2, 3] or [[1], [2], [3]]\n\n # Check float and contains non-integer float values\n if xp.isdtype(y.dtype, \"real floating\"):\n # [.1, .2, 3] or [[.1, .2, 3]] or [[1., .2]] and not [1., 2., 3.]\n data = y.data if issparse(y) else y\n if xp.any(data != xp.astype(data, int)):\n _assert_all_finite(data, input_name=input_name)\n return \"continuous\" + suffix\n\n # Check multiclass\n first_row = y[0] if not issparse(y) else y.getrow(0).data\n if xp.unique_values(y).shape[0] > 2 or (y.ndim == 2 and len(first_row) > 1):\n # [1, 2, 3] or [[1., 2., 3]] or [[1, 2]]\n return \"multiclass\" + suffix\n else:\n return \"binary\" # [1, 2] or [[\"a\"], [\"b\"]]" }, { "identifier": "count_nonzero", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/sparsefuncs.py", "snippet": "def count_nonzero(X, axis=None, sample_weight=None):\n \"\"\"A variant of X.getnnz() with extension to weighting on axis 0.\n\n Useful in efficiently calculating multilabel metrics.\n\n Parameters\n ----------\n X : sparse matrix of shape (n_samples, n_labels)\n Input data. It should be of CSR format.\n\n axis : {0, 1}, default=None\n The axis on which the data is aggregated.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Weight for each row of X.\n\n Returns\n -------\n nnz : int, float, ndarray of shape (n_samples,) or ndarray of shape (n_features,)\n Number of non-zero values in the array along a given axis. Otherwise,\n the total number of non-zero values in the array is returned.\n \"\"\"\n if axis == -1:\n axis = 1\n elif axis == -2:\n axis = 0\n elif X.format != \"csr\":\n raise TypeError(\"Expected CSR sparse format, got {0}\".format(X.format))\n\n # We rely here on the fact that np.diff(Y.indptr) for a CSR\n # will return the number of nonzero entries in each row.\n # A bincount over Y.indices will return the number of nonzeros\n # in each column. See ``csr_matrix.getnnz`` in scipy >= 0.14.\n if axis is None:\n if sample_weight is None:\n return X.nnz\n else:\n return np.dot(np.diff(X.indptr), sample_weight)\n elif axis == 1:\n out = np.diff(X.indptr)\n if sample_weight is None:\n # astype here is for consistency with axis=0 dtype\n return out.astype(\"intp\")\n return out * sample_weight\n elif axis == 0:\n if sample_weight is None:\n return np.bincount(X.indices, minlength=X.shape[1])\n else:\n weights = np.repeat(sample_weight, np.diff(X.indptr))\n return np.bincount(X.indices, minlength=X.shape[1], weights=weights)\n else:\n raise ValueError(\"Unsupported axis: {0}\".format(axis))" }, { "identifier": "_check_pos_label_consistency", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def _check_pos_label_consistency(pos_label, y_true):\n \"\"\"Check if `pos_label` need to be specified or not.\n\n In binary classification, we fix `pos_label=1` if the labels are in the set\n {-1, 1} or {0, 1}. Otherwise, we raise an error asking to specify the\n `pos_label` parameters.\n\n Parameters\n ----------\n pos_label : int, float, bool, str or None\n The positive label.\n y_true : ndarray of shape (n_samples,)\n The target vector.\n\n Returns\n -------\n pos_label : int, float, bool or str\n If `pos_label` can be inferred, it will be returned.\n\n Raises\n ------\n ValueError\n In the case that `y_true` does not have label in {-1, 1} or {0, 1},\n it will raise a `ValueError`.\n \"\"\"\n # ensure binary classification if pos_label is not specified\n # classes.dtype.kind in ('O', 'U', 'S') is required to avoid\n # triggering a FutureWarning by calling np.array_equal(a, b)\n # when elements in the two arrays are not comparable.\n classes = np.unique(y_true)\n if pos_label is None and (\n classes.dtype.kind in \"OUS\"\n or not (\n np.array_equal(classes, [0, 1])\n or np.array_equal(classes, [-1, 1])\n or np.array_equal(classes, [0])\n or np.array_equal(classes, [-1])\n or np.array_equal(classes, [1])\n )\n ):\n classes_repr = \", \".join([repr(c) for c in classes.tolist()])\n raise ValueError(\n f\"y_true takes value in {{{classes_repr}}} and pos_label is not \"\n \"specified: either make y_true take value in {0, 1} or \"\n \"{-1, 1} or pass pos_label explicitly.\"\n )\n elif pos_label is None:\n pos_label = 1\n\n return pos_label" }, { "identifier": "_check_sample_weight", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/utils/validation.py", "snippet": "def _check_sample_weight(\n sample_weight, X, dtype=None, copy=False, only_non_negative=False\n):\n \"\"\"Validate sample weights.\n\n Note that passing sample_weight=None will output an array of ones.\n Therefore, in some cases, you may want to protect the call with:\n if sample_weight is not None:\n sample_weight = _check_sample_weight(...)\n\n Parameters\n ----------\n sample_weight : {ndarray, Number or None}, shape (n_samples,)\n Input sample weights.\n\n X : {ndarray, list, sparse matrix}\n Input data.\n\n only_non_negative : bool, default=False,\n Whether or not the weights are expected to be non-negative.\n\n .. versionadded:: 1.0\n\n dtype : dtype, default=None\n dtype of the validated `sample_weight`.\n If None, and the input `sample_weight` is an array, the dtype of the\n input is preserved; otherwise an array with the default numpy dtype\n is be allocated. If `dtype` is not one of `float32`, `float64`,\n `None`, the output will be of dtype `float64`.\n\n copy : bool, default=False\n If True, a copy of sample_weight will be created.\n\n Returns\n -------\n sample_weight : ndarray of shape (n_samples,)\n Validated sample weight. It is guaranteed to be \"C\" contiguous.\n \"\"\"\n n_samples = _num_samples(X)\n\n if dtype is not None and dtype not in [np.float32, np.float64]:\n dtype = np.float64\n\n if sample_weight is None:\n sample_weight = np.ones(n_samples, dtype=dtype)\n elif isinstance(sample_weight, numbers.Number):\n sample_weight = np.full(n_samples, sample_weight, dtype=dtype)\n else:\n if dtype is None:\n dtype = [np.float64, np.float32]\n sample_weight = check_array(\n sample_weight,\n accept_sparse=False,\n ensure_2d=False,\n dtype=dtype,\n order=\"C\",\n copy=copy,\n input_name=\"sample_weight\",\n )\n if sample_weight.ndim != 1:\n raise ValueError(\"Sample weights must be 1D array or scalar\")\n\n if sample_weight.shape != (n_samples,):\n raise ValueError(\n \"sample_weight.shape == {}, expected {}!\".format(\n sample_weight.shape, (n_samples,)\n )\n )\n\n if only_non_negative:\n check_non_negative(sample_weight, \"`sample_weight`\")\n\n return sample_weight" }, { "identifier": "_average_binary_score", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/metrics/_base.py", "snippet": "def _average_binary_score(binary_metric, y_true, y_score, average, sample_weight=None):\n \"\"\"Average a binary metric for multilabel classification.\n\n Parameters\n ----------\n y_true : array, shape = [n_samples] or [n_samples, n_classes]\n True binary labels in binary label indicators.\n\n y_score : array, shape = [n_samples] or [n_samples, n_classes]\n Target scores, can either be probability estimates of the positive\n class, confidence values, or binary decisions.\n\n average : {None, 'micro', 'macro', 'samples', 'weighted'}, default='macro'\n If ``None``, the scores for each class are returned. Otherwise,\n this determines the type of averaging performed on the data:\n\n ``'micro'``:\n Calculate metrics globally by considering each element of the label\n indicator matrix as a label.\n ``'macro'``:\n Calculate metrics for each label, and find their unweighted\n mean. This does not take label imbalance into account.\n ``'weighted'``:\n Calculate metrics for each label, and find their average, weighted\n by support (the number of true instances for each label).\n ``'samples'``:\n Calculate metrics for each instance, and find their average.\n\n Will be ignored when ``y_true`` is binary.\n\n sample_weight : array-like of shape (n_samples,), default=None\n Sample weights.\n\n binary_metric : callable, returns shape [n_classes]\n The binary metric function to use.\n\n Returns\n -------\n score : float or array of shape [n_classes]\n If not ``None``, average the score, else return the score for each\n classes.\n\n \"\"\"\n average_options = (None, \"micro\", \"macro\", \"weighted\", \"samples\")\n if average not in average_options:\n raise ValueError(\"average has to be one of {0}\".format(average_options))\n\n y_type = type_of_target(y_true)\n if y_type not in (\"binary\", \"multilabel-indicator\"):\n raise ValueError(\"{0} format is not supported\".format(y_type))\n\n if y_type == \"binary\":\n return binary_metric(y_true, y_score, sample_weight=sample_weight)\n\n check_consistent_length(y_true, y_score, sample_weight)\n y_true = check_array(y_true)\n y_score = check_array(y_score)\n\n not_average_axis = 1\n score_weight = sample_weight\n average_weight = None\n\n if average == \"micro\":\n if score_weight is not None:\n score_weight = np.repeat(score_weight, y_true.shape[1])\n y_true = y_true.ravel()\n y_score = y_score.ravel()\n\n elif average == \"weighted\":\n if score_weight is not None:\n average_weight = np.sum(\n np.multiply(y_true, np.reshape(score_weight, (-1, 1))), axis=0\n )\n else:\n average_weight = np.sum(y_true, axis=0)\n if np.isclose(average_weight.sum(), 0.0):\n return 0\n\n elif average == \"samples\":\n # swap average_weight <-> score_weight\n average_weight = score_weight\n score_weight = None\n not_average_axis = 0\n\n if y_true.ndim == 1:\n y_true = y_true.reshape((-1, 1))\n\n if y_score.ndim == 1:\n y_score = y_score.reshape((-1, 1))\n\n n_classes = y_score.shape[not_average_axis]\n score = np.zeros((n_classes,))\n for c in range(n_classes):\n y_true_c = y_true.take([c], axis=not_average_axis).ravel()\n y_score_c = y_score.take([c], axis=not_average_axis).ravel()\n score[c] = binary_metric(y_true_c, y_score_c, sample_weight=score_weight)\n\n # Average the results\n if average is not None:\n if average_weight is not None:\n # Scores with 0 weights are forced to be 0, preventing the average\n # score from being affected by 0-weighted NaN elements.\n average_weight = np.asarray(average_weight)\n score[average_weight == 0] = 0\n return np.average(score, weights=average_weight)\n else:\n return score" }, { "identifier": "_average_multiclass_ovo_score", "path": ".pythonlibs/lib/python3.10/site-packages/sklearn/metrics/_base.py", "snippet": "def _average_multiclass_ovo_score(binary_metric, y_true, y_score, average=\"macro\"):\n \"\"\"Average one-versus-one scores for multiclass classification.\n\n Uses the binary metric for one-vs-one multiclass classification,\n where the score is computed according to the Hand & Till (2001) algorithm.\n\n Parameters\n ----------\n binary_metric : callable\n The binary metric function to use that accepts the following as input:\n y_true_target : array, shape = [n_samples_target]\n Some sub-array of y_true for a pair of classes designated\n positive and negative in the one-vs-one scheme.\n y_score_target : array, shape = [n_samples_target]\n Scores corresponding to the probability estimates\n of a sample belonging to the designated positive class label\n\n y_true : array-like of shape (n_samples,)\n True multiclass labels.\n\n y_score : array-like of shape (n_samples, n_classes)\n Target scores corresponding to probability estimates of a sample\n belonging to a particular class.\n\n average : {'macro', 'weighted'}, default='macro'\n Determines the type of averaging performed on the pairwise binary\n metric scores:\n ``'macro'``:\n Calculate metrics for each label, and find their unweighted\n mean. This does not take label imbalance into account. Classes\n are assumed to be uniformly distributed.\n ``'weighted'``:\n Calculate metrics for each label, taking into account the\n prevalence of the classes.\n\n Returns\n -------\n score : float\n Average of the pairwise binary metric scores.\n \"\"\"\n check_consistent_length(y_true, y_score)\n\n y_true_unique = np.unique(y_true)\n n_classes = y_true_unique.shape[0]\n n_pairs = n_classes * (n_classes - 1) // 2\n pair_scores = np.empty(n_pairs)\n\n is_weighted = average == \"weighted\"\n prevalence = np.empty(n_pairs) if is_weighted else None\n\n # Compute scores treating a as positive class and b as negative class,\n # then b as positive class and a as negative class\n for ix, (a, b) in enumerate(combinations(y_true_unique, 2)):\n a_mask = y_true == a\n b_mask = y_true == b\n ab_mask = np.logical_or(a_mask, b_mask)\n\n if is_weighted:\n prevalence[ix] = np.average(ab_mask)\n\n a_true = a_mask[ab_mask]\n b_true = b_mask[ab_mask]\n\n a_true_score = binary_metric(a_true, y_score[ab_mask, a])\n b_true_score = binary_metric(b_true, y_score[ab_mask, b])\n pair_scores[ix] = (a_true_score + b_true_score) / 2\n\n return np.average(pair_scores, weights=prevalence)" } ]
import warnings import numpy as np from functools import partial from numbers import Integral, Real from scipy.sparse import csr_matrix, issparse from scipy.stats import rankdata from ..exceptions import UndefinedMetricWarning from ..preprocessing import label_binarize from ..utils import ( assert_all_finite, check_array, check_consistent_length, column_or_1d, ) from ..utils._encode import _encode, _unique from ..utils._param_validation import Interval, StrOptions, validate_params from ..utils.extmath import stable_cumsum from ..utils.fixes import trapezoid from ..utils.multiclass import type_of_target from ..utils.sparsefuncs import count_nonzero from ..utils.validation import _check_pos_label_consistency, _check_sample_weight from ._base import _average_binary_score, _average_multiclass_ovo_score
17,987
"""Compute Receiver operating characteristic (ROC). Note: this implementation is restricted to the binary classification task. Read more in the :ref:`User Guide <roc_metrics>`. Parameters ---------- y_true : array-like of shape (n_samples,) True binary labels. If labels are not either {-1, 1} or {0, 1}, then pos_label should be explicitly given. y_score : array-like of shape (n_samples,) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). pos_label : int, float, bool or str, default=None The label of the positive class. When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1}, ``pos_label`` is set to 1, otherwise an error will be raised. sample_weight : array-like of shape (n_samples,), default=None Sample weights. drop_intermediate : bool, default=True Whether to drop some suboptimal thresholds which would not appear on a plotted ROC curve. This is useful in order to create lighter ROC curves. .. versionadded:: 0.17 parameter *drop_intermediate*. Returns ------- fpr : ndarray of shape (>2,) Increasing false positive rates such that element i is the false positive rate of predictions with score >= `thresholds[i]`. tpr : ndarray of shape (>2,) Increasing true positive rates such that element `i` is the true positive rate of predictions with score >= `thresholds[i]`. thresholds : ndarray of shape (n_thresholds,) Decreasing thresholds on the decision function used to compute fpr and tpr. `thresholds[0]` represents no instances being predicted and is arbitrarily set to `np.inf`. See Also -------- RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic (ROC) curve given an estimator and some data. RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic (ROC) curve given the true and predicted values. det_curve: Compute error rates for different probability thresholds. roc_auc_score : Compute the area under the ROC curve. Notes ----- Since the thresholds are sorted from low to high values, they are reversed upon returning them to ensure they correspond to both ``fpr`` and ``tpr``, which are sorted in reversed order during their calculation. An arbitrary threshold is added for the case `tpr=0` and `fpr=0` to ensure that the curve starts at `(0, 0)`. This threshold corresponds to the `np.inf`. References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ .. [2] Fawcett T. An introduction to ROC analysis[J]. Pattern Recognition Letters, 2006, 27(8):861-874. Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2) >>> fpr array([0. , 0. , 0.5, 0.5, 1. ]) >>> tpr array([0. , 0.5, 0.5, 1. , 1. ]) >>> thresholds array([ inf, 0.8 , 0.4 , 0.35, 0.1 ]) """ fps, tps, thresholds = _binary_clf_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight ) # Attempt to drop thresholds corresponding to points in between and # collinear with other points. These are always suboptimal and do not # appear on a plotted ROC curve (and thus do not affect the AUC). # Here np.diff(_, 2) is used as a "second derivative" to tell if there # is a corner at the point. Both fps and tps must be tested to handle # thresholds with multiple data points (which are combined in # _binary_clf_curve). This keeps all cases where the point should be kept, # but does not drop more complicated cases like fps = [1, 3, 7], # tps = [1, 2, 4]; there is no harm in keeping too many thresholds. if drop_intermediate and len(fps) > 2: optimal_idxs = np.where( np.r_[True, np.logical_or(np.diff(fps, 2), np.diff(tps, 2)), True] )[0] fps = fps[optimal_idxs] tps = tps[optimal_idxs] thresholds = thresholds[optimal_idxs] # Add an extra threshold position # to make sure that the curve starts at (0, 0) tps = np.r_[0, tps] fps = np.r_[0, fps] # get dtype of `y_score` even if it is an array-like thresholds = np.r_[np.inf, thresholds] if fps[-1] <= 0: warnings.warn( "No negative samples in y_true, false positive value should be meaningless",
"""Metrics to assess performance on classification task given scores. Functions named as ``*_score`` return a scalar value to maximize: the higher the better. Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize: the lower the better. """ # Authors: Alexandre Gramfort <[email protected]> # Mathieu Blondel <[email protected]> # Olivier Grisel <[email protected]> # Arnaud Joly <[email protected]> # Jochen Wersdorfer <[email protected]> # Lars Buitinck # Joel Nothman <[email protected]> # Noel Dawe <[email protected]> # Michal Karbownik <[email protected]> # License: BSD 3 clause @validate_params( {"x": ["array-like"], "y": ["array-like"]}, prefer_skip_nested_validation=True, ) def auc(x, y): """Compute Area Under the Curve (AUC) using the trapezoidal rule. This is a general function, given points on a curve. For computing the area under the ROC-curve, see :func:`roc_auc_score`. For an alternative way to summarize a precision-recall curve, see :func:`average_precision_score`. Parameters ---------- x : array-like of shape (n,) X coordinates. These must be either monotonic increasing or monotonic decreasing. y : array-like of shape (n,) Y coordinates. Returns ------- auc : float Area Under the Curve. See Also -------- roc_auc_score : Compute the area under the ROC curve. average_precision_score : Compute average precision from prediction scores. precision_recall_curve : Compute precision-recall pairs for different probability thresholds. Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> pred = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2) >>> metrics.auc(fpr, tpr) 0.75 """ check_consistent_length(x, y) x = column_or_1d(x) y = column_or_1d(y) if x.shape[0] < 2: raise ValueError( "At least 2 points are needed to compute area under curve, but x.shape = %s" % x.shape ) direction = 1 dx = np.diff(x) if np.any(dx < 0): if np.all(dx <= 0): direction = -1 else: raise ValueError("x is neither increasing nor decreasing : {}.".format(x)) area = direction * trapezoid(y, x) if isinstance(area, np.memmap): # Reductions such as .sum used internally in trapezoid do not return a # scalar by default for numpy.memmap instances contrary to # regular numpy.ndarray instances. area = area.dtype.type(area) return area @validate_params( { "y_true": ["array-like"], "y_score": ["array-like"], "average": [StrOptions({"micro", "samples", "weighted", "macro"}), None], "pos_label": [Real, str, "boolean"], "sample_weight": ["array-like", None], }, prefer_skip_nested_validation=True, ) def average_precision_score( y_true, y_score, *, average="macro", pos_label=1, sample_weight=None ): """Compute average precision (AP) from prediction scores. AP summarizes a precision-recall curve as the weighted mean of precisions achieved at each threshold, with the increase in recall from the previous threshold used as the weight: .. math:: \\text{AP} = \\sum_n (R_n - R_{n-1}) P_n where :math:`P_n` and :math:`R_n` are the precision and recall at the nth threshold [1]_. This implementation is not interpolated and is different from computing the area under the precision-recall curve with the trapezoidal rule, which uses linear interpolation and can be too optimistic. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_classes) True binary labels or binary label indicators. y_score : array-like of shape (n_samples,) or (n_samples, n_classes) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by :term:`decision_function` on some classifiers). average : {'micro', 'samples', 'weighted', 'macro'} or None, \ default='macro' If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data: ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. Will be ignored when ``y_true`` is binary. pos_label : int, float, bool or str, default=1 The label of the positive class. Only applied to binary ``y_true``. For multilabel-indicator ``y_true``, ``pos_label`` is fixed to 1. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- average_precision : float Average precision score. See Also -------- roc_auc_score : Compute the area under the ROC curve. precision_recall_curve : Compute precision-recall pairs for different probability thresholds. Notes ----- .. versionchanged:: 0.19 Instead of linearly interpolating between operating points, precisions are weighted by the change in recall since the last operating point. References ---------- .. [1] `Wikipedia entry for the Average precision <https://en.wikipedia.org/w/index.php?title=Information_retrieval& oldid=793358396#Average_precision>`_ Examples -------- >>> import numpy as np >>> from sklearn.metrics import average_precision_score >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> average_precision_score(y_true, y_scores) 0.83... >>> y_true = np.array([0, 0, 1, 1, 2, 2]) >>> y_scores = np.array([ ... [0.7, 0.2, 0.1], ... [0.4, 0.3, 0.3], ... [0.1, 0.8, 0.1], ... [0.2, 0.3, 0.5], ... [0.4, 0.4, 0.2], ... [0.1, 0.2, 0.7], ... ]) >>> average_precision_score(y_true, y_scores) 0.77... """ def _binary_uninterpolated_average_precision( y_true, y_score, pos_label=1, sample_weight=None ): precision, recall, _ = precision_recall_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight ) # Return the step function integral # The following works because the last entry of precision is # guaranteed to be 1, as returned by precision_recall_curve return -np.sum(np.diff(recall) * np.array(precision)[:-1]) y_type = type_of_target(y_true, input_name="y_true") # Convert to Python primitive type to avoid NumPy type / Python str # comparison. See https://github.com/numpy/numpy/issues/6784 present_labels = np.unique(y_true).tolist() if y_type == "binary": if len(present_labels) == 2 and pos_label not in present_labels: raise ValueError( f"pos_label={pos_label} is not a valid label. It should be " f"one of {present_labels}" ) elif y_type == "multilabel-indicator" and pos_label != 1: raise ValueError( "Parameter pos_label is fixed to 1 for multilabel-indicator y_true. " "Do not set pos_label or set pos_label to 1." ) elif y_type == "multiclass": if pos_label != 1: raise ValueError( "Parameter pos_label is fixed to 1 for multiclass y_true. " "Do not set pos_label or set pos_label to 1." ) y_true = label_binarize(y_true, classes=present_labels) average_precision = partial( _binary_uninterpolated_average_precision, pos_label=pos_label ) return _average_binary_score( average_precision, y_true, y_score, average, sample_weight=sample_weight ) @validate_params( { "y_true": ["array-like"], "y_score": ["array-like"], "pos_label": [Real, str, "boolean", None], "sample_weight": ["array-like", None], }, prefer_skip_nested_validation=True, ) def det_curve(y_true, y_score, pos_label=None, sample_weight=None): """Compute error rates for different probability thresholds. .. note:: This metric is used for evaluation of ranking and error tradeoffs of a binary classification task. Read more in the :ref:`User Guide <det_curve>`. .. versionadded:: 0.24 Parameters ---------- y_true : ndarray of shape (n_samples,) True binary labels. If labels are not either {-1, 1} or {0, 1}, then pos_label should be explicitly given. y_score : ndarray of shape of (n_samples,) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). pos_label : int, float, bool or str, default=None The label of the positive class. When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1}, ``pos_label`` is set to 1, otherwise an error will be raised. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- fpr : ndarray of shape (n_thresholds,) False positive rate (FPR) such that element i is the false positive rate of predictions with score >= thresholds[i]. This is occasionally referred to as false acceptance probability or fall-out. fnr : ndarray of shape (n_thresholds,) False negative rate (FNR) such that element i is the false negative rate of predictions with score >= thresholds[i]. This is occasionally referred to as false rejection or miss rate. thresholds : ndarray of shape (n_thresholds,) Decreasing score values. See Also -------- DetCurveDisplay.from_estimator : Plot DET curve given an estimator and some data. DetCurveDisplay.from_predictions : Plot DET curve given the true and predicted labels. DetCurveDisplay : DET curve visualization. roc_curve : Compute Receiver operating characteristic (ROC) curve. precision_recall_curve : Compute precision-recall curve. Examples -------- >>> import numpy as np >>> from sklearn.metrics import det_curve >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, fnr, thresholds = det_curve(y_true, y_scores) >>> fpr array([0.5, 0.5, 0. ]) >>> fnr array([0. , 0.5, 0.5]) >>> thresholds array([0.35, 0.4 , 0.8 ]) """ fps, tps, thresholds = _binary_clf_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight ) if len(np.unique(y_true)) != 2: raise ValueError( "Only one class present in y_true. Detection error " "tradeoff curve is not defined in that case." ) fns = tps[-1] - tps p_count = tps[-1] n_count = fps[-1] # start with false positives zero first_ind = ( fps.searchsorted(fps[0], side="right") - 1 if fps.searchsorted(fps[0], side="right") > 0 else None ) # stop with false negatives zero last_ind = tps.searchsorted(tps[-1]) + 1 sl = slice(first_ind, last_ind) # reverse the output such that list of false positives is decreasing return (fps[sl][::-1] / n_count, fns[sl][::-1] / p_count, thresholds[sl][::-1]) def _binary_roc_auc_score(y_true, y_score, sample_weight=None, max_fpr=None): """Binary roc auc score.""" if len(np.unique(y_true)) != 2: raise ValueError( "Only one class present in y_true. ROC AUC score " "is not defined in that case." ) fpr, tpr, _ = roc_curve(y_true, y_score, sample_weight=sample_weight) if max_fpr is None or max_fpr == 1: return auc(fpr, tpr) if max_fpr <= 0 or max_fpr > 1: raise ValueError("Expected max_fpr in range (0, 1], got: %r" % max_fpr) # Add a single point at max_fpr by linear interpolation stop = np.searchsorted(fpr, max_fpr, "right") x_interp = [fpr[stop - 1], fpr[stop]] y_interp = [tpr[stop - 1], tpr[stop]] tpr = np.append(tpr[:stop], np.interp(max_fpr, x_interp, y_interp)) fpr = np.append(fpr[:stop], max_fpr) partial_auc = auc(fpr, tpr) # McClish correction: standardize result to be 0.5 if non-discriminant # and 1 if maximal min_area = 0.5 * max_fpr**2 max_area = max_fpr return 0.5 * (1 + (partial_auc - min_area) / (max_area - min_area)) @validate_params( { "y_true": ["array-like"], "y_score": ["array-like"], "average": [StrOptions({"micro", "macro", "samples", "weighted"}), None], "sample_weight": ["array-like", None], "max_fpr": [Interval(Real, 0.0, 1, closed="right"), None], "multi_class": [StrOptions({"raise", "ovr", "ovo"})], "labels": ["array-like", None], }, prefer_skip_nested_validation=True, ) def roc_auc_score( y_true, y_score, *, average="macro", sample_weight=None, max_fpr=None, multi_class="raise", labels=None, ): """Compute Area Under the Receiver Operating Characteristic Curve (ROC AUC) \ from prediction scores. Note: this implementation can be used with binary, multiclass and multilabel classification, but some restrictions apply (see Parameters). Read more in the :ref:`User Guide <roc_metrics>`. Parameters ---------- y_true : array-like of shape (n_samples,) or (n_samples, n_classes) True labels or binary label indicators. The binary and multiclass cases expect labels with shape (n_samples,) while the multilabel case expects binary label indicators with shape (n_samples, n_classes). y_score : array-like of shape (n_samples,) or (n_samples, n_classes) Target scores. * In the binary case, it corresponds to an array of shape `(n_samples,)`. Both probability estimates and non-thresholded decision values can be provided. The probability estimates correspond to the **probability of the class with the greater label**, i.e. `estimator.classes_[1]` and thus `estimator.predict_proba(X, y)[:, 1]`. The decision values corresponds to the output of `estimator.decision_function(X, y)`. See more information in the :ref:`User guide <roc_auc_binary>`; * In the multiclass case, it corresponds to an array of shape `(n_samples, n_classes)` of probability estimates provided by the `predict_proba` method. The probability estimates **must** sum to 1 across the possible classes. In addition, the order of the class scores must correspond to the order of ``labels``, if provided, or else to the numerical or lexicographical order of the labels in ``y_true``. See more information in the :ref:`User guide <roc_auc_multiclass>`; * In the multilabel case, it corresponds to an array of shape `(n_samples, n_classes)`. Probability estimates are provided by the `predict_proba` method and the non-thresholded decision values by the `decision_function` method. The probability estimates correspond to the **probability of the class with the greater label for each output** of the classifier. See more information in the :ref:`User guide <roc_auc_multilabel>`. average : {'micro', 'macro', 'samples', 'weighted'} or None, \ default='macro' If ``None``, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Note: multiclass ROC AUC currently only handles the 'macro' and 'weighted' averages. For multiclass targets, `average=None` is only implemented for `multi_class='ovr'` and `average='micro'` is only implemented for `multi_class='ovr'`. ``'micro'``: Calculate metrics globally by considering each element of the label indicator matrix as a label. ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. ``'weighted'``: Calculate metrics for each label, and find their average, weighted by support (the number of true instances for each label). ``'samples'``: Calculate metrics for each instance, and find their average. Will be ignored when ``y_true`` is binary. sample_weight : array-like of shape (n_samples,), default=None Sample weights. max_fpr : float > 0 and <= 1, default=None If not ``None``, the standardized partial AUC [2]_ over the range [0, max_fpr] is returned. For the multiclass case, ``max_fpr``, should be either equal to ``None`` or ``1.0`` as AUC ROC partial computation currently is not supported for multiclass. multi_class : {'raise', 'ovr', 'ovo'}, default='raise' Only used for multiclass targets. Determines the type of configuration to use. The default value raises an error, so either ``'ovr'`` or ``'ovo'`` must be passed explicitly. ``'ovr'``: Stands for One-vs-rest. Computes the AUC of each class against the rest [3]_ [4]_. This treats the multiclass case in the same way as the multilabel case. Sensitive to class imbalance even when ``average == 'macro'``, because class imbalance affects the composition of each of the 'rest' groupings. ``'ovo'``: Stands for One-vs-one. Computes the average AUC of all possible pairwise combinations of classes [5]_. Insensitive to class imbalance when ``average == 'macro'``. labels : array-like of shape (n_classes,), default=None Only used for multiclass targets. List of labels that index the classes in ``y_score``. If ``None``, the numerical or lexicographical order of the labels in ``y_true`` is used. Returns ------- auc : float Area Under the Curve score. See Also -------- average_precision_score : Area under the precision-recall curve. roc_curve : Compute Receiver operating characteristic (ROC) curve. RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic (ROC) curve given an estimator and some data. RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic (ROC) curve given the true and predicted values. References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ .. [2] `Analyzing a portion of the ROC curve. McClish, 1989 <https://www.ncbi.nlm.nih.gov/pubmed/2668680>`_ .. [3] Provost, F., Domingos, P. (2000). Well-trained PETs: Improving probability estimation trees (Section 6.2), CeDER Working Paper #IS-00-04, Stern School of Business, New York University. .. [4] `Fawcett, T. (2006). An introduction to ROC analysis. Pattern Recognition Letters, 27(8), 861-874. <https://www.sciencedirect.com/science/article/pii/S016786550500303X>`_ .. [5] `Hand, D.J., Till, R.J. (2001). A Simple Generalisation of the Area Under the ROC Curve for Multiple Class Classification Problems. Machine Learning, 45(2), 171-186. <http://link.springer.com/article/10.1023/A:1010920819831>`_ Examples -------- Binary case: >>> from sklearn.datasets import load_breast_cancer >>> from sklearn.linear_model import LogisticRegression >>> from sklearn.metrics import roc_auc_score >>> X, y = load_breast_cancer(return_X_y=True) >>> clf = LogisticRegression(solver="liblinear", random_state=0).fit(X, y) >>> roc_auc_score(y, clf.predict_proba(X)[:, 1]) 0.99... >>> roc_auc_score(y, clf.decision_function(X)) 0.99... Multiclass case: >>> from sklearn.datasets import load_iris >>> X, y = load_iris(return_X_y=True) >>> clf = LogisticRegression(solver="liblinear").fit(X, y) >>> roc_auc_score(y, clf.predict_proba(X), multi_class='ovr') 0.99... Multilabel case: >>> import numpy as np >>> from sklearn.datasets import make_multilabel_classification >>> from sklearn.multioutput import MultiOutputClassifier >>> X, y = make_multilabel_classification(random_state=0) >>> clf = MultiOutputClassifier(clf).fit(X, y) >>> # get a list of n_output containing probability arrays of shape >>> # (n_samples, n_classes) >>> y_pred = clf.predict_proba(X) >>> # extract the positive columns for each output >>> y_pred = np.transpose([pred[:, 1] for pred in y_pred]) >>> roc_auc_score(y, y_pred, average=None) array([0.82..., 0.86..., 0.94..., 0.85... , 0.94...]) >>> from sklearn.linear_model import RidgeClassifierCV >>> clf = RidgeClassifierCV().fit(X, y) >>> roc_auc_score(y, clf.decision_function(X), average=None) array([0.81..., 0.84... , 0.93..., 0.87..., 0.94...]) """ y_type = type_of_target(y_true, input_name="y_true") y_true = check_array(y_true, ensure_2d=False, dtype=None) y_score = check_array(y_score, ensure_2d=False) if y_type == "multiclass" or ( y_type == "binary" and y_score.ndim == 2 and y_score.shape[1] > 2 ): # do not support partial ROC computation for multiclass if max_fpr is not None and max_fpr != 1.0: raise ValueError( "Partial AUC computation not available in " "multiclass setting, 'max_fpr' must be" " set to `None`, received `max_fpr={0}` " "instead".format(max_fpr) ) if multi_class == "raise": raise ValueError("multi_class must be in ('ovo', 'ovr')") return _multiclass_roc_auc_score( y_true, y_score, labels, multi_class, average, sample_weight ) elif y_type == "binary": labels = np.unique(y_true) y_true = label_binarize(y_true, classes=labels)[:, 0] return _average_binary_score( partial(_binary_roc_auc_score, max_fpr=max_fpr), y_true, y_score, average, sample_weight=sample_weight, ) else: # multilabel-indicator return _average_binary_score( partial(_binary_roc_auc_score, max_fpr=max_fpr), y_true, y_score, average, sample_weight=sample_weight, ) def _multiclass_roc_auc_score( y_true, y_score, labels, multi_class, average, sample_weight ): """Multiclass roc auc score. Parameters ---------- y_true : array-like of shape (n_samples,) True multiclass labels. y_score : array-like of shape (n_samples, n_classes) Target scores corresponding to probability estimates of a sample belonging to a particular class labels : array-like of shape (n_classes,) or None List of labels to index ``y_score`` used for multiclass. If ``None``, the lexical order of ``y_true`` is used to index ``y_score``. multi_class : {'ovr', 'ovo'} Determines the type of multiclass configuration to use. ``'ovr'``: Calculate metrics for the multiclass case using the one-vs-rest approach. ``'ovo'``: Calculate metrics for the multiclass case using the one-vs-one approach. average : {'micro', 'macro', 'weighted'} Determines the type of averaging performed on the pairwise binary metric scores ``'micro'``: Calculate metrics for the binarized-raveled classes. Only supported for `multi_class='ovr'`. .. versionadded:: 1.2 ``'macro'``: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account. Classes are assumed to be uniformly distributed. ``'weighted'``: Calculate metrics for each label, taking into account the prevalence of the classes. sample_weight : array-like of shape (n_samples,) or None Sample weights. """ # validation of the input y_score if not np.allclose(1, y_score.sum(axis=1)): raise ValueError( "Target scores need to be probabilities for multiclass " "roc_auc, i.e. they should sum up to 1.0 over classes" ) # validation for multiclass parameter specifications average_options = ("macro", "weighted", None) if multi_class == "ovr": average_options = ("micro",) + average_options if average not in average_options: raise ValueError( "average must be one of {0} for multiclass problems".format(average_options) ) multiclass_options = ("ovo", "ovr") if multi_class not in multiclass_options: raise ValueError( "multi_class='{0}' is not supported " "for multiclass ROC AUC, multi_class must be " "in {1}".format(multi_class, multiclass_options) ) if average is None and multi_class == "ovo": raise NotImplementedError( "average=None is not implemented for multi_class='ovo'." ) if labels is not None: labels = column_or_1d(labels) classes = _unique(labels) if len(classes) != len(labels): raise ValueError("Parameter 'labels' must be unique") if not np.array_equal(classes, labels): raise ValueError("Parameter 'labels' must be ordered") if len(classes) != y_score.shape[1]: raise ValueError( "Number of given labels, {0}, not equal to the number " "of columns in 'y_score', {1}".format(len(classes), y_score.shape[1]) ) if len(np.setdiff1d(y_true, classes)): raise ValueError("'y_true' contains labels not in parameter 'labels'") else: classes = _unique(y_true) if len(classes) != y_score.shape[1]: raise ValueError( "Number of classes in y_true not equal to the number of " "columns in 'y_score'" ) if multi_class == "ovo": if sample_weight is not None: raise ValueError( "sample_weight is not supported " "for multiclass one-vs-one ROC AUC, " "'sample_weight' must be None in this case." ) y_true_encoded = _encode(y_true, uniques=classes) # Hand & Till (2001) implementation (ovo) return _average_multiclass_ovo_score( _binary_roc_auc_score, y_true_encoded, y_score, average=average ) else: # ovr is same as multi-label y_true_multilabel = label_binarize(y_true, classes=classes) return _average_binary_score( _binary_roc_auc_score, y_true_multilabel, y_score, average, sample_weight=sample_weight, ) def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None): """Calculate true and false positives per binary classification threshold. Parameters ---------- y_true : ndarray of shape (n_samples,) True targets of binary classification. y_score : ndarray of shape (n_samples,) Estimated probabilities or output of a decision function. pos_label : int, float, bool or str, default=None The label of the positive class. sample_weight : array-like of shape (n_samples,), default=None Sample weights. Returns ------- fps : ndarray of shape (n_thresholds,) A count of false positives, at index i being the number of negative samples assigned a score >= thresholds[i]. The total number of negative samples is equal to fps[-1] (thus true negatives are given by fps[-1] - fps). tps : ndarray of shape (n_thresholds,) An increasing count of true positives, at index i being the number of positive samples assigned a score >= thresholds[i]. The total number of positive samples is equal to tps[-1] (thus false negatives are given by tps[-1] - tps). thresholds : ndarray of shape (n_thresholds,) Decreasing score values. """ # Check to make sure y_true is valid y_type = type_of_target(y_true, input_name="y_true") if not (y_type == "binary" or (y_type == "multiclass" and pos_label is not None)): raise ValueError("{0} format is not supported".format(y_type)) check_consistent_length(y_true, y_score, sample_weight) y_true = column_or_1d(y_true) y_score = column_or_1d(y_score) assert_all_finite(y_true) assert_all_finite(y_score) # Filter out zero-weighted samples, as they should not impact the result if sample_weight is not None: sample_weight = column_or_1d(sample_weight) sample_weight = _check_sample_weight(sample_weight, y_true) nonzero_weight_mask = sample_weight != 0 y_true = y_true[nonzero_weight_mask] y_score = y_score[nonzero_weight_mask] sample_weight = sample_weight[nonzero_weight_mask] pos_label = _check_pos_label_consistency(pos_label, y_true) # make y_true a boolean vector y_true = y_true == pos_label # sort scores and corresponding truth values desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1] y_score = y_score[desc_score_indices] y_true = y_true[desc_score_indices] if sample_weight is not None: weight = sample_weight[desc_score_indices] else: weight = 1.0 # y_score typically has many tied values. Here we extract # the indices associated with the distinct values. We also # concatenate a value for the end of the curve. distinct_value_indices = np.where(np.diff(y_score))[0] threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1] # accumulate the true positives with decreasing threshold tps = stable_cumsum(y_true * weight)[threshold_idxs] if sample_weight is not None: # express fps as a cumsum to ensure fps is increasing even in # the presence of floating point errors fps = stable_cumsum((1 - y_true) * weight)[threshold_idxs] else: fps = 1 + threshold_idxs - tps return fps, tps, y_score[threshold_idxs] @validate_params( { "y_true": ["array-like"], "probas_pred": ["array-like"], "pos_label": [Real, str, "boolean", None], "sample_weight": ["array-like", None], "drop_intermediate": ["boolean"], }, prefer_skip_nested_validation=True, ) def precision_recall_curve( y_true, probas_pred, *, pos_label=None, sample_weight=None, drop_intermediate=False ): """Compute precision-recall pairs for different probability thresholds. Note: this implementation is restricted to the binary classification task. The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of true positives and ``fp`` the number of false positives. The precision is intuitively the ability of the classifier not to label as positive a sample that is negative. The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of true positives and ``fn`` the number of false negatives. The recall is intuitively the ability of the classifier to find all the positive samples. The last precision and recall values are 1. and 0. respectively and do not have a corresponding threshold. This ensures that the graph starts on the y axis. The first precision and recall values are precision=class balance and recall=1.0 which corresponds to a classifier that always predicts the positive class. Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`. Parameters ---------- y_true : array-like of shape (n_samples,) True binary labels. If labels are not either {-1, 1} or {0, 1}, then pos_label should be explicitly given. probas_pred : array-like of shape (n_samples,) Target scores, can either be probability estimates of the positive class, or non-thresholded measure of decisions (as returned by `decision_function` on some classifiers). pos_label : int, float, bool or str, default=None The label of the positive class. When ``pos_label=None``, if y_true is in {-1, 1} or {0, 1}, ``pos_label`` is set to 1, otherwise an error will be raised. sample_weight : array-like of shape (n_samples,), default=None Sample weights. drop_intermediate : bool, default=False Whether to drop some suboptimal thresholds which would not appear on a plotted precision-recall curve. This is useful in order to create lighter precision-recall curves. .. versionadded:: 1.3 Returns ------- precision : ndarray of shape (n_thresholds + 1,) Precision values such that element i is the precision of predictions with score >= thresholds[i] and the last element is 1. recall : ndarray of shape (n_thresholds + 1,) Decreasing recall values such that element i is the recall of predictions with score >= thresholds[i] and the last element is 0. thresholds : ndarray of shape (n_thresholds,) Increasing thresholds on the decision function used to compute precision and recall where `n_thresholds = len(np.unique(probas_pred))`. See Also -------- PrecisionRecallDisplay.from_estimator : Plot Precision Recall Curve given a binary classifier. PrecisionRecallDisplay.from_predictions : Plot Precision Recall Curve using predictions from a binary classifier. average_precision_score : Compute average precision from prediction scores. det_curve: Compute error rates for different probability thresholds. roc_curve : Compute Receiver operating characteristic (ROC) curve. Examples -------- >>> import numpy as np >>> from sklearn.metrics import precision_recall_curve >>> y_true = np.array([0, 0, 1, 1]) >>> y_scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> precision, recall, thresholds = precision_recall_curve( ... y_true, y_scores) >>> precision array([0.5 , 0.66666667, 0.5 , 1. , 1. ]) >>> recall array([1. , 1. , 0.5, 0.5, 0. ]) >>> thresholds array([0.1 , 0.35, 0.4 , 0.8 ]) """ fps, tps, thresholds = _binary_clf_curve( y_true, probas_pred, pos_label=pos_label, sample_weight=sample_weight ) if drop_intermediate and len(fps) > 2: # Drop thresholds corresponding to points where true positives (tps) # do not change from the previous or subsequent point. This will keep # only the first and last point for each tps value. All points # with the same tps value have the same recall and thus x coordinate. # They appear as a vertical line on the plot. optimal_idxs = np.where( np.concatenate( [[True], np.logical_or(np.diff(tps[:-1]), np.diff(tps[1:])), [True]] ) )[0] fps = fps[optimal_idxs] tps = tps[optimal_idxs] thresholds = thresholds[optimal_idxs] ps = tps + fps # Initialize the result array with zeros to make sure that precision[ps == 0] # does not contain uninitialized values. precision = np.zeros_like(tps) np.divide(tps, ps, out=precision, where=(ps != 0)) # When no positive label in y_true, recall is set to 1 for all thresholds # tps[-1] == 0 <=> y_true == all negative labels if tps[-1] == 0: warnings.warn( "No positive class found in y_true, " "recall is set to one for all thresholds." ) recall = np.ones_like(tps) else: recall = tps / tps[-1] # reverse the outputs so recall is decreasing sl = slice(None, None, -1) return np.hstack((precision[sl], 1)), np.hstack((recall[sl], 0)), thresholds[sl] @validate_params( { "y_true": ["array-like"], "y_score": ["array-like"], "pos_label": [Real, str, "boolean", None], "sample_weight": ["array-like", None], "drop_intermediate": ["boolean"], }, prefer_skip_nested_validation=True, ) def roc_curve( y_true, y_score, *, pos_label=None, sample_weight=None, drop_intermediate=True ): """Compute Receiver operating characteristic (ROC). Note: this implementation is restricted to the binary classification task. Read more in the :ref:`User Guide <roc_metrics>`. Parameters ---------- y_true : array-like of shape (n_samples,) True binary labels. If labels are not either {-1, 1} or {0, 1}, then pos_label should be explicitly given. y_score : array-like of shape (n_samples,) Target scores, can either be probability estimates of the positive class, confidence values, or non-thresholded measure of decisions (as returned by "decision_function" on some classifiers). pos_label : int, float, bool or str, default=None The label of the positive class. When ``pos_label=None``, if `y_true` is in {-1, 1} or {0, 1}, ``pos_label`` is set to 1, otherwise an error will be raised. sample_weight : array-like of shape (n_samples,), default=None Sample weights. drop_intermediate : bool, default=True Whether to drop some suboptimal thresholds which would not appear on a plotted ROC curve. This is useful in order to create lighter ROC curves. .. versionadded:: 0.17 parameter *drop_intermediate*. Returns ------- fpr : ndarray of shape (>2,) Increasing false positive rates such that element i is the false positive rate of predictions with score >= `thresholds[i]`. tpr : ndarray of shape (>2,) Increasing true positive rates such that element `i` is the true positive rate of predictions with score >= `thresholds[i]`. thresholds : ndarray of shape (n_thresholds,) Decreasing thresholds on the decision function used to compute fpr and tpr. `thresholds[0]` represents no instances being predicted and is arbitrarily set to `np.inf`. See Also -------- RocCurveDisplay.from_estimator : Plot Receiver Operating Characteristic (ROC) curve given an estimator and some data. RocCurveDisplay.from_predictions : Plot Receiver Operating Characteristic (ROC) curve given the true and predicted values. det_curve: Compute error rates for different probability thresholds. roc_auc_score : Compute the area under the ROC curve. Notes ----- Since the thresholds are sorted from low to high values, they are reversed upon returning them to ensure they correspond to both ``fpr`` and ``tpr``, which are sorted in reversed order during their calculation. An arbitrary threshold is added for the case `tpr=0` and `fpr=0` to ensure that the curve starts at `(0, 0)`. This threshold corresponds to the `np.inf`. References ---------- .. [1] `Wikipedia entry for the Receiver operating characteristic <https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_ .. [2] Fawcett T. An introduction to ROC analysis[J]. Pattern Recognition Letters, 2006, 27(8):861-874. Examples -------- >>> import numpy as np >>> from sklearn import metrics >>> y = np.array([1, 1, 2, 2]) >>> scores = np.array([0.1, 0.4, 0.35, 0.8]) >>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2) >>> fpr array([0. , 0. , 0.5, 0.5, 1. ]) >>> tpr array([0. , 0.5, 0.5, 1. , 1. ]) >>> thresholds array([ inf, 0.8 , 0.4 , 0.35, 0.1 ]) """ fps, tps, thresholds = _binary_clf_curve( y_true, y_score, pos_label=pos_label, sample_weight=sample_weight ) # Attempt to drop thresholds corresponding to points in between and # collinear with other points. These are always suboptimal and do not # appear on a plotted ROC curve (and thus do not affect the AUC). # Here np.diff(_, 2) is used as a "second derivative" to tell if there # is a corner at the point. Both fps and tps must be tested to handle # thresholds with multiple data points (which are combined in # _binary_clf_curve). This keeps all cases where the point should be kept, # but does not drop more complicated cases like fps = [1, 3, 7], # tps = [1, 2, 4]; there is no harm in keeping too many thresholds. if drop_intermediate and len(fps) > 2: optimal_idxs = np.where( np.r_[True, np.logical_or(np.diff(fps, 2), np.diff(tps, 2)), True] )[0] fps = fps[optimal_idxs] tps = tps[optimal_idxs] thresholds = thresholds[optimal_idxs] # Add an extra threshold position # to make sure that the curve starts at (0, 0) tps = np.r_[0, tps] fps = np.r_[0, fps] # get dtype of `y_score` even if it is an array-like thresholds = np.r_[np.inf, thresholds] if fps[-1] <= 0: warnings.warn( "No negative samples in y_true, false positive value should be meaningless",
UndefinedMetricWarning,
0
2023-10-07 13:19:48+00:00
24k
zbzhu99/madiff
diffuser/models/diffusion.py
[ { "identifier": "DPM_Solver", "path": "diffuser/utils/dpm_solver.py", "snippet": "class DPM_Solver:\n def __init__(\n self,\n model_fn,\n noise_schedule,\n algorithm_type=\"dpmsolver++\",\n correcting_x0_fn=None,\n correcting_xt_fn=None,\n thresholding_max_val=1.0,\n dynamic_thresholding_ratio=0.995,\n ):\n \"\"\"Construct a DPM-Solver.\n\n We support both DPM-Solver (`algorithm_type=\"dpmsolver\"`) and DPM-Solver++ (`algorithm_type=\"dpmsolver++\"`).\n\n We also support the \"dynamic thresholding\" method in Imagen[1]. For pixel-space diffusion models, you\n can set both `algorithm_type=\"dpmsolver++\"` and `correcting_x0_fn=\"dynamic_thresholding\"` to use the\n dynamic thresholding. The \"dynamic thresholding\" can greatly improve the sample quality for pixel-space\n DPMs with large guidance scales. Note that the thresholding method is **unsuitable** for latent-space\n DPMs (such as stable-diffusion).\n\n To support advanced algorithms in image-to-image applications, we also support corrector functions for\n both x0 and xt.\n\n Args:\n model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):\n ``\n def model_fn(x, t_continuous):\n return noise\n ``\n The shape of `x` is `(batch_size, **shape)`, and the shape of `t_continuous` is `(batch_size,)`.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n algorithm_type: A `str`. Either \"dpmsolver\" or \"dpmsolver++\".\n correcting_x0_fn: A `str` or a function with the following format:\n ```\n def correcting_x0_fn(x0, t):\n x0_new = ...\n return x0_new\n ```\n This function is to correct the outputs of the data prediction model at each sampling step. e.g.,\n ```\n x0_pred = data_pred_model(xt, t)\n if correcting_x0_fn is not None:\n x0_pred = correcting_x0_fn(x0_pred, t)\n xt_1 = update(x0_pred, xt, t)\n ```\n If `correcting_x0_fn=\"dynamic_thresholding\"`, we use the dynamic thresholding proposed in Imagen[1].\n correcting_xt_fn: A function with the following format:\n ```\n def correcting_xt_fn(xt, t, step):\n x_new = ...\n return x_new\n ```\n This function is to correct the intermediate samples xt at each sampling step. e.g.,\n ```\n xt = ...\n xt = correcting_xt_fn(xt, t, step)\n ```\n thresholding_max_val: A `float`. The max value for thresholding.\n Valid only when use `dpmsolver++` and `correcting_x0_fn=\"dynamic_thresholding\"`.\n dynamic_thresholding_ratio: A `float`. The ratio for dynamic thresholding (see Imagen[1] for details).\n Valid only when use `dpmsolver++` and `correcting_x0_fn=\"dynamic_thresholding\"`.\n\n [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour,\n Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models\n with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.\n \"\"\"\n self.model = lambda x, t: model_fn(x, t.expand((x.shape[0])))\n self.noise_schedule = noise_schedule\n assert algorithm_type in [\"dpmsolver\", \"dpmsolver++\"]\n self.algorithm_type = algorithm_type\n if correcting_x0_fn == \"dynamic_thresholding\":\n self.correcting_x0_fn = self.dynamic_thresholding_fn\n else:\n self.correcting_x0_fn = correcting_x0_fn\n self.correcting_xt_fn = correcting_xt_fn\n self.dynamic_thresholding_ratio = dynamic_thresholding_ratio\n self.thresholding_max_val = thresholding_max_val\n\n def dynamic_thresholding_fn(self, x0, t):\n \"\"\"\n The dynamic thresholding method.\n \"\"\"\n dims = x0.dim()\n p = self.dynamic_thresholding_ratio\n s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)\n s = expand_dims(\n torch.maximum(\n s, self.thresholding_max_val * torch.ones_like(s).to(s.device)\n ),\n dims,\n )\n x0 = torch.clamp(x0, -s, s) / s\n return x0\n\n def noise_prediction_fn(self, x, t):\n \"\"\"\n Return the noise prediction model.\n \"\"\"\n return self.model(x, t)\n\n def data_prediction_fn(self, x, t):\n \"\"\"\n Return the data prediction model (with corrector).\n \"\"\"\n noise = self.noise_prediction_fn(x, t)\n alpha_t, sigma_t = self.noise_schedule.marginal_alpha(\n t\n ), self.noise_schedule.marginal_std(t)\n x0 = (x - sigma_t * noise) / alpha_t\n if self.correcting_x0_fn is not None:\n x0 = self.correcting_x0_fn(x0, t)\n return x0\n\n def model_fn(self, x, t):\n \"\"\"\n Convert the model to the noise prediction model or the data prediction model.\n \"\"\"\n if self.algorithm_type == \"dpmsolver++\":\n return self.data_prediction_fn(x, t)\n else:\n return self.noise_prediction_fn(x, t)\n\n def get_time_steps(self, skip_type, t_T, t_0, N, device):\n \"\"\"Compute the intermediate time steps for sampling.\n\n Args:\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n N: A `int`. The total number of the spacing of the time steps.\n device: A torch device.\n Returns:\n A pytorch tensor of the time steps, with the shape (N + 1,).\n \"\"\"\n if skip_type == \"logSNR\":\n lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))\n lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))\n logSNR_steps = torch.linspace(\n lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1\n ).to(device)\n return self.noise_schedule.inverse_lambda(logSNR_steps)\n elif skip_type == \"time_uniform\":\n return torch.linspace(t_T, t_0, N + 1).to(device)\n elif skip_type == \"time_quadratic\":\n t_order = 2\n t = (\n torch.linspace(t_T ** (1.0 / t_order), t_0 ** (1.0 / t_order), N + 1)\n .pow(t_order)\n .to(device)\n )\n return t\n else:\n raise ValueError(\n \"Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'\".format(\n skip_type\n )\n )\n\n def get_orders_and_timesteps_for_singlestep_solver(\n self, steps, order, skip_type, t_T, t_0, device\n ):\n \"\"\"\n Get the order of each step for sampling by the singlestep DPM-Solver.\n\n We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as \"DPM-Solver-fast\".\n Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:\n - If order == 1:\n We take `steps` of DPM-Solver-1 (i.e. DDIM).\n - If order == 2:\n - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of DPM-Solver-2.\n - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If order == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.\n\n ============================================\n Args:\n order: A `int`. The max order for the solver (2 or 3).\n steps: A `int`. The total number of function evaluations (NFE).\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n device: A torch device.\n Returns:\n orders: A list of the solver order of each step.\n \"\"\"\n if order == 3:\n K = steps // 3 + 1\n if steps % 3 == 0:\n orders = [\n 3,\n ] * (\n K - 2\n ) + [2, 1]\n elif steps % 3 == 1:\n orders = [\n 3,\n ] * (\n K - 1\n ) + [1]\n else:\n orders = [\n 3,\n ] * (\n K - 1\n ) + [2]\n elif order == 2:\n if steps % 2 == 0:\n K = steps // 2\n orders = [\n 2,\n ] * K\n else:\n K = steps // 2 + 1\n orders = [\n 2,\n ] * (\n K - 1\n ) + [1]\n elif order == 1:\n K = 1\n orders = [\n 1,\n ] * steps\n else:\n raise ValueError(\"'order' must be '1' or '2' or '3'.\")\n if skip_type == \"logSNR\":\n # To reproduce the results in DPM-Solver paper\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)\n else:\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[\n torch.cumsum(\n torch.tensor(\n [\n 0,\n ]\n + orders\n ),\n 0,\n ).to(device)\n ]\n return timesteps_outer, orders\n\n def denoise_to_zero_fn(self, x, s):\n \"\"\"\n Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.\n \"\"\"\n return self.data_prediction_fn(x, s)\n\n def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):\n \"\"\"\n DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n dims = x.dim()\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(\n s\n ), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = sigma_t / sigma_s * x - alpha_t * phi_1 * model_s\n if return_intermediate:\n return x_t, {\"model_s\": model_s}\n else:\n return x_t\n else:\n phi_1 = torch.expm1(h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = torch.exp(log_alpha_t - log_alpha_s) * x - (sigma_t * phi_1) * model_s\n if return_intermediate:\n return x_t, {\"model_s\": model_s}\n else:\n return x_t\n\n def singlestep_dpm_solver_second_update(\n self,\n x,\n s,\n t,\n r1=0.5,\n model_s=None,\n return_intermediate=False,\n solver_type=\"dpmsolver\",\n ):\n \"\"\"\n Singlestep solver DPM-Solver-2 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n r1: A `float`. The hyperparameter of the second-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in [\"dpmsolver\", \"taylor\"]:\n raise ValueError(\n \"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(\n solver_type\n )\n )\n if r1 is None:\n r1 = 0.5\n ns = self.noise_schedule\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n s1 = ns.inverse_lambda(lambda_s1)\n log_alpha_s, log_alpha_s1, log_alpha_t = (\n ns.marginal_log_mean_coeff(s),\n ns.marginal_log_mean_coeff(s1),\n ns.marginal_log_mean_coeff(t),\n )\n sigma_s, sigma_s1, sigma_t = (\n ns.marginal_std(s),\n ns.marginal_std(s1),\n ns.marginal_std(t),\n )\n alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_11 = torch.expm1(-r1 * h)\n phi_1 = torch.expm1(-h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (sigma_s1 / sigma_s) * x - (alpha_s1 * phi_11) * model_s\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == \"dpmsolver\":\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n - (0.5 / r1) * (alpha_t * phi_1) * (model_s1 - model_s)\n )\n elif solver_type == \"taylor\":\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (1.0 / r1) * (alpha_t * (phi_1 / h + 1.0)) * (model_s1 - model_s)\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_1 = torch.expm1(h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (\n torch.exp(log_alpha_s1 - log_alpha_s) * x\n - (sigma_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == \"dpmsolver\":\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n - (0.5 / r1) * (sigma_t * phi_1) * (model_s1 - model_s)\n )\n elif solver_type == \"taylor\":\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n - (1.0 / r1) * (sigma_t * (phi_1 / h - 1.0)) * (model_s1 - model_s)\n )\n if return_intermediate:\n return x_t, {\"model_s\": model_s, \"model_s1\": model_s1}\n else:\n return x_t\n\n def singlestep_dpm_solver_third_update(\n self,\n x,\n s,\n t,\n r1=1.0 / 3.0,\n r2=2.0 / 3.0,\n model_s=None,\n model_s1=None,\n return_intermediate=False,\n solver_type=\"dpmsolver\",\n ):\n \"\"\"\n Singlestep solver DPM-Solver-3 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n r1: A `float`. The hyperparameter of the third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).\n If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in [\"dpmsolver\", \"taylor\"]:\n raise ValueError(\n \"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(\n solver_type\n )\n )\n if r1 is None:\n r1 = 1.0 / 3.0\n if r2 is None:\n r2 = 2.0 / 3.0\n ns = self.noise_schedule\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n lambda_s2 = lambda_s + r2 * h\n s1 = ns.inverse_lambda(lambda_s1)\n s2 = ns.inverse_lambda(lambda_s2)\n log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = (\n ns.marginal_log_mean_coeff(s),\n ns.marginal_log_mean_coeff(s1),\n ns.marginal_log_mean_coeff(s2),\n ns.marginal_log_mean_coeff(t),\n )\n sigma_s, sigma_s1, sigma_s2, sigma_t = (\n ns.marginal_std(s),\n ns.marginal_std(s1),\n ns.marginal_std(s2),\n ns.marginal_std(t),\n )\n alpha_s1, alpha_s2, alpha_t = (\n torch.exp(log_alpha_s1),\n torch.exp(log_alpha_s2),\n torch.exp(log_alpha_t),\n )\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_11 = torch.expm1(-r1 * h)\n phi_12 = torch.expm1(-r2 * h)\n phi_1 = torch.expm1(-h)\n phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.0\n phi_2 = phi_1 / h + 1.0\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (sigma_s1 / sigma_s) * x - (alpha_s1 * phi_11) * model_s\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n (sigma_s2 / sigma_s) * x\n - (alpha_s2 * phi_12) * model_s\n + r2 / r1 * (alpha_s2 * phi_22) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == \"dpmsolver\":\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (1.0 / r2) * (alpha_t * phi_2) * (model_s2 - model_s)\n )\n elif solver_type == \"taylor\":\n D1_0 = (1.0 / r1) * (model_s1 - model_s)\n D1_1 = (1.0 / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2.0 * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (alpha_t * phi_2) * D1\n - (alpha_t * phi_3) * D2\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_12 = torch.expm1(r2 * h)\n phi_1 = torch.expm1(h)\n phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.0\n phi_2 = phi_1 / h - 1.0\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (torch.exp(log_alpha_s1 - log_alpha_s)) * x - (\n sigma_s1 * phi_11\n ) * model_s\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n (torch.exp(log_alpha_s2 - log_alpha_s)) * x\n - (sigma_s2 * phi_12) * model_s\n - r2 / r1 * (sigma_s2 * phi_22) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == \"dpmsolver\":\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_s)) * x\n - (sigma_t * phi_1) * model_s\n - (1.0 / r2) * (sigma_t * phi_2) * (model_s2 - model_s)\n )\n elif solver_type == \"taylor\":\n D1_0 = (1.0 / r1) * (model_s1 - model_s)\n D1_1 = (1.0 / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2.0 * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_s)) * x\n - (sigma_t * phi_1) * model_s\n - (sigma_t * phi_2) * D1\n - (sigma_t * phi_3) * D2\n )\n\n if return_intermediate:\n return x_t, {\"model_s\": model_s, \"model_s1\": model_s1, \"model_s2\": model_s2}\n else:\n return x_t\n\n def multistep_dpm_solver_second_update(\n self, x, model_prev_list, t_prev_list, t, solver_type=\"dpmsolver\"\n ):\n \"\"\"\n Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in [\"dpmsolver\", \"taylor\"]:\n raise ValueError(\n \"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(\n solver_type\n )\n )\n ns = self.noise_schedule\n model_prev_1, model_prev_0 = model_prev_list[-2], model_prev_list[-1]\n t_prev_1, t_prev_0 = t_prev_list[-2], t_prev_list[-1]\n lambda_prev_1, lambda_prev_0, lambda_t = (\n ns.marginal_lambda(t_prev_1),\n ns.marginal_lambda(t_prev_0),\n ns.marginal_lambda(t),\n )\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(\n t_prev_0\n ), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0 = h_0 / h\n D1_0 = (1.0 / r0) * (model_prev_0 - model_prev_1)\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n if solver_type == \"dpmsolver\":\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n - 0.5 * (alpha_t * phi_1) * D1_0\n )\n elif solver_type == \"taylor\":\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n + (alpha_t * (phi_1 / h + 1.0)) * D1_0\n )\n else:\n phi_1 = torch.expm1(h)\n if solver_type == \"dpmsolver\":\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - 0.5 * (sigma_t * phi_1) * D1_0\n )\n elif solver_type == \"taylor\":\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - (sigma_t * (phi_1 / h - 1.0)) * D1_0\n )\n return x_t\n\n def multistep_dpm_solver_third_update(\n self, x, model_prev_list, t_prev_list, t, solver_type=\"dpmsolver\"\n ):\n \"\"\"\n Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n model_prev_2, model_prev_1, model_prev_0 = model_prev_list\n t_prev_2, t_prev_1, t_prev_0 = t_prev_list\n lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = (\n ns.marginal_lambda(t_prev_2),\n ns.marginal_lambda(t_prev_1),\n ns.marginal_lambda(t_prev_0),\n ns.marginal_lambda(t),\n )\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(\n t_prev_0\n ), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_1 = lambda_prev_1 - lambda_prev_2\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0, r1 = h_0 / h, h_1 / h\n D1_0 = (1.0 / r0) * (model_prev_0 - model_prev_1)\n D1_1 = (1.0 / r1) * (model_prev_1 - model_prev_2)\n D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1)\n D2 = (1.0 / (r0 + r1)) * (D1_0 - D1_1)\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n phi_2 = phi_1 / h + 1.0\n phi_3 = phi_2 / h - 0.5\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n + (alpha_t * phi_2) * D1\n - (alpha_t * phi_3) * D2\n )\n else:\n phi_1 = torch.expm1(h)\n phi_2 = phi_1 / h - 1.0\n phi_3 = phi_2 / h - 0.5\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - (sigma_t * phi_2) * D1\n - (sigma_t * phi_3) * D2\n )\n return x_t\n\n def singlestep_dpm_solver_update(\n self,\n x,\n s,\n t,\n order,\n return_intermediate=False,\n solver_type=\"dpmsolver\",\n r1=None,\n r2=None,\n ):\n \"\"\"\n Singlestep DPM-Solver with the order `order` from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n r1: A `float`. The hyperparameter of the second-order or third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(\n x, s, t, return_intermediate=return_intermediate\n )\n elif order == 2:\n return self.singlestep_dpm_solver_second_update(\n x,\n s,\n t,\n return_intermediate=return_intermediate,\n solver_type=solver_type,\n r1=r1,\n )\n elif order == 3:\n return self.singlestep_dpm_solver_third_update(\n x,\n s,\n t,\n return_intermediate=return_intermediate,\n solver_type=solver_type,\n r1=r1,\n r2=r2,\n )\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def multistep_dpm_solver_update(\n self, x, model_prev_list, t_prev_list, t, order, solver_type=\"dpmsolver\"\n ):\n \"\"\"\n Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(\n x, t_prev_list[-1], t, model_s=model_prev_list[-1]\n )\n elif order == 2:\n return self.multistep_dpm_solver_second_update(\n x, model_prev_list, t_prev_list, t, solver_type=solver_type\n )\n elif order == 3:\n return self.multistep_dpm_solver_third_update(\n x, model_prev_list, t_prev_list, t, solver_type=solver_type\n )\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def dpm_solver_adaptive(\n self,\n x,\n order,\n t_T,\n t_0,\n h_init=0.05,\n atol=0.0078,\n rtol=0.05,\n theta=0.9,\n t_err=1e-5,\n solver_type=\"dpmsolver\",\n ):\n \"\"\"\n The adaptive step size solver based on singlestep DPM-Solver.\n\n Args:\n x: A pytorch tensor. The initial value at time `t_T`.\n order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n h_init: A `float`. The initial step size (for logSNR).\n atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].\n rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.\n theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].\n t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the\n current time and `t_0` is less than `t_err`. The default setting is 1e-5.\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_0: A pytorch tensor. The approximated solution at time `t_0`.\n\n [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, \"Gotta go fast when generating data with score-based models,\" arXiv preprint arXiv:2105.14080, 2021.\n \"\"\"\n ns = self.noise_schedule\n s = t_T * torch.ones((1,)).to(x)\n lambda_s = ns.marginal_lambda(s)\n lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))\n h = h_init * torch.ones_like(s).to(x)\n x_prev = x\n nfe = 0\n if order == 2:\n r1 = 0.5\n lower_update = lambda x, s, t: self.dpm_solver_first_update(\n x, s, t, return_intermediate=True\n )\n higher_update = (\n lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(\n x, s, t, r1=r1, solver_type=solver_type, **kwargs\n )\n )\n elif order == 3:\n r1, r2 = 1.0 / 3.0, 2.0 / 3.0\n lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(\n x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type\n )\n higher_update = (\n lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(\n x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs\n )\n )\n else:\n raise ValueError(\n \"For adaptive step size solver, order must be 2 or 3, got {}\".format(\n order\n )\n )\n while torch.abs((s - t_0)).mean() > t_err:\n t = ns.inverse_lambda(lambda_s + h)\n x_lower, lower_noise_kwargs = lower_update(x, s, t)\n x_higher = higher_update(x, s, t, **lower_noise_kwargs)\n delta = torch.max(\n torch.ones_like(x).to(x) * atol,\n rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)),\n )\n norm_fn = lambda v: torch.sqrt(\n torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)\n )\n E = norm_fn((x_higher - x_lower) / delta).max()\n if torch.all(E <= 1.0):\n x = x_higher\n s = t\n x_prev = x_lower\n lambda_s = ns.marginal_lambda(s)\n h = torch.min(\n theta * h * torch.float_power(E, -1.0 / order).float(),\n lambda_0 - lambda_s,\n )\n nfe += order\n print(\"adaptive solver nfe\", nfe)\n return x\n\n def add_noise(self, x, t, noise=None):\n \"\"\"\n Compute the noised input xt = alpha_t * x + sigma_t * noise.\n\n Args:\n x: A `torch.Tensor` with shape `(batch_size, *shape)`.\n t: A `torch.Tensor` with shape `(t_size,)`.\n Returns:\n xt with shape `(t_size, batch_size, *shape)`.\n \"\"\"\n alpha_t, sigma_t = self.noise_schedule.marginal_alpha(\n t\n ), self.noise_schedule.marginal_std(t)\n if noise is None:\n noise = torch.randn((t.shape[0], *x.shape), device=x.device)\n x = x.reshape((-1, *x.shape))\n xt = expand_dims(alpha_t, x.dim()) * x + expand_dims(sigma_t, x.dim()) * noise\n if t.shape[0] == 1:\n return xt.squeeze(0)\n else:\n return xt\n\n def inverse(\n self,\n x,\n steps=20,\n t_start=None,\n t_end=None,\n order=2,\n skip_type=\"time_uniform\",\n method=\"multistep\",\n lower_order_final=True,\n denoise_to_zero=False,\n solver_type=\"dpmsolver\",\n atol=0.0078,\n rtol=0.05,\n return_intermediate=False,\n ):\n \"\"\"\n Inverse the sample `x` from time `t_start` to `t_end` by DPM-Solver.\n For discrete-time DPMs, we use `t_start=1/N`, where `N` is the total time steps during training.\n \"\"\"\n t_0 = 1.0 / self.noise_schedule.total_N if t_start is None else t_start\n t_T = self.noise_schedule.T if t_end is None else t_end\n assert (\n t_0 > 0 and t_T > 0\n ), \"Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array\"\n return self.sample(\n x,\n steps=steps,\n t_start=t_0,\n t_end=t_T,\n order=order,\n skip_type=skip_type,\n method=method,\n lower_order_final=lower_order_final,\n denoise_to_zero=denoise_to_zero,\n solver_type=solver_type,\n atol=atol,\n rtol=rtol,\n return_intermediate=return_intermediate,\n )\n\n def sample(\n self,\n x,\n condition_func,\n steps=20,\n t_start=None,\n t_end=None,\n order=2,\n skip_type=\"time_uniform\",\n method=\"multistep\",\n lower_order_final=True,\n denoise_to_zero=False,\n solver_type=\"dpmsolver\",\n atol=0.0078,\n rtol=0.05,\n return_intermediate=False,\n ):\n \"\"\"\n Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.\n\n =====================================================\n\n We support the following algorithms for both noise prediction model and data prediction model:\n - 'singlestep':\n Singlestep DPM-Solver (i.e. \"DPM-Solver-fast\" in the paper), which combines different orders of singlestep DPM-Solver.\n We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).\n The total number of function evaluations (NFE) == `steps`.\n Given a fixed NFE == `steps`, the sampling procedure is:\n - If `order` == 1:\n - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.\n - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If `order` == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.\n - 'multistep':\n Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.\n We initialize the first `order` values by lower order multistep solvers.\n Given a fixed NFE == `steps`, the sampling procedure is:\n Denote K = steps.\n - If `order` == 1:\n - We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.\n - If `order` == 3:\n - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.\n - 'singlestep_fixed':\n Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).\n We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.\n - 'adaptive':\n Adaptive step size DPM-Solver (i.e. \"DPM-Solver-12\" and \"DPM-Solver-23\" in the paper).\n We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.\n You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs\n (NFE) and the sample quality.\n - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.\n - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.\n\n =====================================================\n\n Some advices for choosing the algorithm:\n - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:\n Use singlestep DPM-Solver or DPM-Solver++ (\"DPM-Solver-fast\" in the paper) with `order = 3`.\n e.g., DPM-Solver:\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type='time_uniform', method='singlestep')\n e.g., DPM-Solver++:\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver++\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type='time_uniform', method='singlestep')\n - For **guided sampling with large guidance scale** by DPMs:\n Use multistep DPM-Solver with `algorithm_type=\"dpmsolver++\"` and `order = 2`.\n e.g.\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver++\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,\n skip_type='time_uniform', method='multistep')\n\n We support three types of `skip_type`:\n - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**\n - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.\n - 'time_quadratic': quadratic time for the time steps.\n\n =====================================================\n Args:\n x: A pytorch tensor. The initial value at time `t_start`\n e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.\n steps: A `int`. The total number of function evaluations (NFE).\n t_start: A `float`. The starting time of the sampling.\n If `T` is None, we use self.noise_schedule.T (default is 1.0).\n t_end: A `float`. The ending time of the sampling.\n If `t_end` is None, we use 1. / self.noise_schedule.total_N.\n e.g. if total_N == 1000, we have `t_end` == 1e-3.\n For discrete-time DPMs:\n - We recommend `t_end` == 1. / self.noise_schedule.total_N.\n For continuous-time DPMs:\n - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.\n order: A `int`. The order of DPM-Solver.\n skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.\n method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.\n denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.\n Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).\n\n This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and\n score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID\n for diffusion models sampling by diffusion SDEs for low-resolutional images\n (such as CIFAR-10). However, we observed that such trick does not matter for\n high-resolutional images. As it needs an additional NFE, we do not recommend\n it for high-resolutional images.\n lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.\n Only valid for `method=multistep` and `steps < 15`. We empirically find that\n this trick is a key to stabilizing the sampling by DPM-Solver with very few steps\n (especially for steps <= 10). So we recommend to set it to be `True`.\n solver_type: A `str`. The taylor expansion type for the solver. `dpmsolver` or `taylor`. We recommend `dpmsolver`.\n atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n return_intermediate: A `bool`. Whether to save the xt at each step.\n When set to `True`, method returns a tuple (x0, intermediates); when set to False, method returns only x0.\n Returns:\n x_end: A pytorch tensor. The approximated solution at time `t_end`.\n\n \"\"\"\n t_0 = 1.0 / self.noise_schedule.total_N if t_end is None else t_end\n t_T = self.noise_schedule.T if t_start is None else t_start\n assert (\n t_0 > 0 and t_T > 0\n ), \"Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array\"\n if return_intermediate:\n assert method in [\n \"multistep\",\n \"singlestep\",\n \"singlestep_fixed\",\n ], \"Cannot use adaptive solver when saving intermediate values\"\n if self.correcting_xt_fn is not None:\n assert method in [\n \"multistep\",\n \"singlestep\",\n \"singlestep_fixed\",\n ], \"Cannot use adaptive solver when correcting_xt_fn is not None\"\n device = x.device\n intermediates = []\n with torch.no_grad():\n if method == \"adaptive\":\n x = self.dpm_solver_adaptive(\n x,\n order=order,\n t_T=t_T,\n t_0=t_0,\n atol=atol,\n rtol=rtol,\n solver_type=solver_type,\n )\n elif method == \"multistep\":\n assert steps >= order\n timesteps = self.get_time_steps(\n skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device\n )\n assert timesteps.shape[0] - 1 == steps\n # Init the initial values.\n step = 0\n t = timesteps[step]\n t_prev_list = [t]\n model_prev_list = [self.model_fn(x, t)]\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n # Init the first `order` values by lower order multistep DPM-Solver.\n for step in range(1, order):\n t = timesteps[step]\n x = condition_func(x)\n x = self.multistep_dpm_solver_update(\n x,\n model_prev_list,\n t_prev_list,\n t,\n step,\n solver_type=solver_type,\n )\n if self.correcting_xt_fn is not None:\n x = condition_func(x)\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n t_prev_list.append(t)\n model_prev_list.append(self.model_fn(x, t))\n # Compute the remaining values by `order`-th order multistep DPM-Solver.\n for step in range(order, steps + 1):\n t = timesteps[step]\n # We only use lower order for steps < 10\n if lower_order_final and steps < 10:\n step_order = min(order, steps + 1 - step)\n else:\n step_order = order\n x = condition_func(x)\n x = self.multistep_dpm_solver_update(\n x,\n model_prev_list,\n t_prev_list,\n t,\n step_order,\n solver_type=solver_type,\n )\n if self.correcting_xt_fn is not None:\n x = condition_func(x)\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n for i in range(order - 1):\n t_prev_list[i] = t_prev_list[i + 1]\n model_prev_list[i] = model_prev_list[i + 1]\n t_prev_list[-1] = t\n # We do not need to evaluate the final model value.\n if step < steps:\n model_prev_list[-1] = self.model_fn(x, t)\n elif method in [\"singlestep\", \"singlestep_fixed\"]:\n if method == \"singlestep\":\n (\n timesteps_outer,\n orders,\n ) = self.get_orders_and_timesteps_for_singlestep_solver(\n steps=steps,\n order=order,\n skip_type=skip_type,\n t_T=t_T,\n t_0=t_0,\n device=device,\n )\n elif method == \"singlestep_fixed\":\n K = steps // order\n orders = [\n order,\n ] * K\n timesteps_outer = self.get_time_steps(\n skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device\n )\n for step, order in enumerate(orders):\n s, t = timesteps_outer[step], timesteps_outer[step + 1]\n timesteps_inner = self.get_time_steps(\n skip_type=skip_type,\n t_T=s.item(),\n t_0=t.item(),\n N=order,\n device=device,\n )\n lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)\n h = lambda_inner[-1] - lambda_inner[0]\n r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h\n r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h\n x = condition_func(x)\n x = self.singlestep_dpm_solver_update(\n x, s, t, order, solver_type=solver_type, r1=r1, r2=r2\n )\n if self.correcting_xt_fn is not None:\n x = condition_func(x)\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n else:\n raise ValueError(\"Got wrong method {}\".format(method))\n if denoise_to_zero:\n t = torch.ones((1,)).to(device) * t_0\n x = self.denoise_to_zero_fn(x, t)\n x = condition_func(x)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step + 1)\n x = condition_func(x)\n if return_intermediate:\n intermediates.append(x)\n if return_intermediate:\n return x, intermediates\n else:\n return x" }, { "identifier": "NoiseScheduleVP", "path": "diffuser/utils/dpm_solver.py", "snippet": "class NoiseScheduleVP:\n def __init__(\n self,\n schedule=\"discrete\",\n betas=None,\n alphas_cumprod=None,\n continuous_beta_0=0.1,\n continuous_beta_1=20.0,\n dtype=torch.float32,\n ):\n \"\"\"Create a wrapper class for the forward SDE (VP type).\n\n ***\n Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.\n We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.\n ***\n\n The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).\n We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).\n Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:\n\n log_alpha_t = self.marginal_log_mean_coeff(t)\n sigma_t = self.marginal_std(t)\n lambda_t = self.marginal_lambda(t)\n\n Moreover, as lambda(t) is an invertible function, we also support its inverse function:\n\n t = self.inverse_lambda(lambda_t)\n\n ===============================================================\n\n We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).\n\n 1. For discrete-time DPMs:\n\n For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:\n t_i = (i + 1) / N\n e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.\n We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.\n\n Args:\n betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)\n alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)\n\n Note that we always have alphas_cumprod = cumprod(1 - betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.\n\n **Important**: Please pay special attention for the args for `alphas_cumprod`:\n The `alphas_cumprod` is the \\hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that\n q_{t_n | 0}(x_{t_n} | x_0) = N ( \\sqrt{\\hat{alpha_n}} * x_0, (1 - \\hat{alpha_n}) * I ).\n Therefore, the notation \\hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have\n alpha_{t_n} = \\sqrt{\\hat{alpha_n}},\n and\n log(alpha_{t_n}) = 0.5 * log(\\hat{alpha_n}).\n\n\n 2. For continuous-time DPMs:\n\n We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise\n schedule are the default settings in DDPM and improved-DDPM:\n\n Args:\n beta_min: A `float` number. The smallest beta for the linear schedule.\n beta_max: A `float` number. The largest beta for the linear schedule.\n cosine_s: A `float` number. The hyperparameter in the cosine schedule.\n cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.\n T: A `float` number. The ending time of the forward process.\n\n ===============================================================\n\n Args:\n schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,\n 'linear' or 'cosine' for continuous-time DPMs.\n Returns:\n A wrapper object of the forward SDE (VP type).\n\n ===============================================================\n\n Example:\n\n # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', betas=betas)\n\n # For discrete-time DPMs, given alphas_cumprod (the \\hat{alpha_n} array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)\n\n # For continuous-time DPMs (VPSDE), linear schedule:\n >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)\n\n \"\"\"\n\n if schedule not in [\"discrete\", \"linear\", \"cosine\"]:\n raise ValueError(\n \"Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'\".format(\n schedule\n )\n )\n\n self.schedule = schedule\n if schedule == \"discrete\":\n if betas is not None:\n log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)\n else:\n assert alphas_cumprod is not None\n log_alphas = 0.5 * torch.log(alphas_cumprod)\n self.total_N = len(log_alphas)\n self.T = 1.0\n self.t_array = (\n torch.linspace(0.0, 1.0, self.total_N + 1)[1:]\n .reshape((1, -1))\n .to(dtype=dtype)\n )\n self.log_alpha_array = log_alphas.reshape(\n (\n 1,\n -1,\n )\n ).to(dtype=dtype)\n else:\n self.total_N = 1000\n self.beta_0 = continuous_beta_0\n self.beta_1 = continuous_beta_1\n self.cosine_s = 0.008\n self.cosine_beta_max = 999.0\n self.cosine_t_max = (\n math.atan(self.cosine_beta_max * (1.0 + self.cosine_s) / math.pi)\n * 2.0\n * (1.0 + self.cosine_s)\n / math.pi\n - self.cosine_s\n )\n self.cosine_log_alpha_0 = math.log(\n math.cos(self.cosine_s / (1.0 + self.cosine_s) * math.pi / 2.0)\n )\n self.schedule = schedule\n if schedule == \"cosine\":\n # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.\n # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.\n self.T = 0.9946\n else:\n self.T = 1.0\n\n def marginal_log_mean_coeff(self, t):\n \"\"\"\n Compute log(alpha_t) of a given continuous-time label t in [0, T].\n \"\"\"\n if self.schedule == \"discrete\":\n return interpolate_fn(\n t.reshape((-1, 1)),\n self.t_array.to(t.device),\n self.log_alpha_array.to(t.device),\n ).reshape((-1))\n elif self.schedule == \"linear\":\n return -0.25 * t**2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0\n elif self.schedule == \"cosine\":\n log_alpha_fn = lambda s: torch.log(\n torch.cos((s + self.cosine_s) / (1.0 + self.cosine_s) * math.pi / 2.0)\n )\n log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0\n return log_alpha_t\n\n def marginal_alpha(self, t):\n \"\"\"\n Compute alpha_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.exp(self.marginal_log_mean_coeff(t))\n\n def marginal_std(self, t):\n \"\"\"\n Compute sigma_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.sqrt(1.0 - torch.exp(2.0 * self.marginal_log_mean_coeff(t)))\n\n def marginal_lambda(self, t):\n \"\"\"\n Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].\n \"\"\"\n log_mean_coeff = self.marginal_log_mean_coeff(t)\n log_std = 0.5 * torch.log(1.0 - torch.exp(2.0 * log_mean_coeff))\n return log_mean_coeff - log_std\n\n def inverse_lambda(self, lamb):\n \"\"\"\n Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.\n \"\"\"\n if self.schedule == \"linear\":\n tmp = (\n 2.0\n * (self.beta_1 - self.beta_0)\n * torch.logaddexp(-2.0 * lamb, torch.zeros((1,)).to(lamb))\n )\n Delta = self.beta_0**2 + tmp\n return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)\n elif self.schedule == \"discrete\":\n log_alpha = -0.5 * torch.logaddexp(\n torch.zeros((1,)).to(lamb.device), -2.0 * lamb\n )\n t = interpolate_fn(\n log_alpha.reshape((-1, 1)),\n torch.flip(self.log_alpha_array.to(lamb.device), [1]),\n torch.flip(self.t_array.to(lamb.device), [1]),\n )\n return t.reshape((-1,))\n else:\n log_alpha = -0.5 * torch.logaddexp(-2.0 * lamb, torch.zeros((1,)).to(lamb))\n t_fn = (\n lambda log_alpha_t: torch.arccos(\n torch.exp(log_alpha_t + self.cosine_log_alpha_0)\n )\n * 2.0\n * (1.0 + self.cosine_s)\n / math.pi\n - self.cosine_s\n )\n t = t_fn(log_alpha)\n return t" }, { "identifier": "model_wrapper", "path": "diffuser/utils/dpm_solver.py", "snippet": "def model_wrapper(\n model,\n noise_schedule,\n model_type=\"noise\",\n model_kwargs={},\n guidance_type=\"uncond\",\n condition=None,\n unconditional_condition=None,\n guidance_scale=1.0,\n classifier_fn=None,\n classifier_kwargs={},\n):\n \"\"\"Create a wrapper function for the noise prediction model.\n\n DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to\n firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.\n\n We support four types of the diffusion model by setting `model_type`:\n\n 1. \"noise\": noise prediction model. (Trained by predicting noise).\n\n 2. \"x_start\": data prediction model. (Trained by predicting the data x_0 at time 0).\n\n 3. \"v\": velocity prediction model. (Trained by predicting the velocity).\n The \"v\" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].\n\n [1] Salimans, Tim, and Jonathan Ho. \"Progressive distillation for fast sampling of diffusion models.\"\n arXiv preprint arXiv:2202.00512 (2022).\n [2] Ho, Jonathan, et al. \"Imagen Video: High Definition Video Generation with Diffusion Models.\"\n arXiv preprint arXiv:2210.02303 (2022).\n\n 4. \"score\": marginal score function. (Trained by denoising score matching).\n Note that the score function and the noise prediction model follows a simple relationship:\n ```\n noise(x_t, t) = -sigma_t * score(x_t, t)\n ```\n\n We support three types of guided sampling by DPMs by setting `guidance_type`:\n 1. \"uncond\": unconditional sampling by DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n 2. \"classifier\": classifier guidance sampling [3] by DPMs and another classifier.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n The input `classifier_fn` has the following format:\n ``\n classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)\n ``\n\n [3] P. Dhariwal and A. Q. Nichol, \"Diffusion models beat GANs on image synthesis,\"\n in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.\n\n 3. \"classifier-free\": classifier-free guidance sampling by conditional DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score\n ``\n And if cond == `unconditional_condition`, the model output is the unconditional DPM output.\n\n [4] Ho, Jonathan, and Tim Salimans. \"Classifier-free diffusion guidance.\"\n arXiv preprint arXiv:2207.12598 (2022).\n\n\n The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)\n or continuous-time labels (i.e. epsilon to T).\n\n We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:\n ``\n def model_fn(x, t_continuous) -> noise:\n t_input = get_model_input_time(t_continuous)\n return noise_pred(model, x, t_input, **model_kwargs)\n ``\n where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.\n\n ===============================================================\n\n Args:\n model: A diffusion model with the corresponding format described above.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n model_type: A `str`. The parameterization type of the diffusion model.\n \"noise\" or \"x_start\" or \"v\" or \"score\".\n model_kwargs: A `dict`. A dict for the other inputs of the model function.\n guidance_type: A `str`. The type of the guidance for sampling.\n \"uncond\" or \"classifier\" or \"classifier-free\".\n condition: A pytorch tensor. The condition for the guided sampling.\n Only used for \"classifier\" or \"classifier-free\" guidance type.\n unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.\n Only used for \"classifier-free\" guidance type.\n guidance_scale: A `float`. The scale for the guided sampling.\n classifier_fn: A classifier function. Only used for the classifier guidance.\n classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.\n Returns:\n A noise prediction model that accepts the noised data and the continuous time as the inputs.\n \"\"\"\n\n def get_model_input_time(t_continuous):\n \"\"\"\n Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.\n For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].\n For continuous-time DPMs, we just use `t_continuous`.\n \"\"\"\n if noise_schedule.schedule == \"discrete\":\n return (t_continuous - 1.0 / noise_schedule.total_N) * 1000.0\n else:\n return t_continuous\n\n def noise_pred_fn(x, t_continuous, cond=None):\n t_input = get_model_input_time(t_continuous)\n if cond is None:\n output = model(x, t_input, **model_kwargs)\n else:\n output = model(x, t_input, cond, **model_kwargs)\n if model_type == \"noise\":\n return output\n elif model_type == \"x_start\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(\n t_continuous\n ), noise_schedule.marginal_std(t_continuous)\n return (x - alpha_t * output) / sigma_t\n elif model_type == \"v\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(\n t_continuous\n ), noise_schedule.marginal_std(t_continuous)\n return alpha_t * output + sigma_t * x\n elif model_type == \"score\":\n sigma_t = noise_schedule.marginal_std(t_continuous)\n return -sigma_t * output\n\n def cond_grad_fn(x, t_input):\n \"\"\"\n Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).\n \"\"\"\n with torch.enable_grad():\n x_in = x.detach().requires_grad_(True)\n log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)\n return torch.autograd.grad(log_prob.sum(), x_in)[0]\n\n def model_fn(x, t_continuous):\n \"\"\"\n The noise predicition model function that is used for DPM-Solver.\n \"\"\"\n if guidance_type == \"uncond\":\n return noise_pred_fn(x, t_continuous)\n elif guidance_type == \"classifier\":\n assert classifier_fn is not None\n t_input = get_model_input_time(t_continuous)\n cond_grad = cond_grad_fn(x, t_input)\n sigma_t = noise_schedule.marginal_std(t_continuous)\n noise = noise_pred_fn(x, t_continuous)\n return noise - guidance_scale * sigma_t * cond_grad\n elif guidance_type == \"classifier-free\":\n if guidance_scale == 1.0 or unconditional_condition is None:\n return noise_pred_fn(x, t_continuous, cond=condition)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t_continuous] * 2)\n c_in = torch.cat([unconditional_condition, condition])\n noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)\n return noise_uncond + guidance_scale * (noise - noise_uncond)\n\n assert model_type in [\"noise\", \"x_start\", \"v\", \"score\"]\n assert guidance_type in [\"uncond\", \"classifier\", \"classifier-free\"]\n return model_fn" }, { "identifier": "Losses", "path": "diffuser/models/helpers.py", "snippet": "class SinusoidalPosEmb(nn.Module):\nclass Downsample1d(nn.Module):\nclass Upsample1d(nn.Module):\nclass Conv1dBlock(nn.Module):\nclass SelfAttention(nn.Module):\nclass PositionalEncoding(nn.Module):\nclass MlpSelfAttention(nn.Module):\nclass WeightedLoss(nn.Module):\nclass WeightedStateLoss(nn.Module):\nclass ValueLoss(nn.Module):\nclass WeightedL1(WeightedLoss):\nclass WeightedL2(WeightedLoss):\nclass WeightedStateL2(WeightedStateLoss):\nclass ValueL1(ValueLoss):\nclass ValueL2(ValueLoss):\n def __init__(self, dim):\n def forward(self, x):\n def __init__(self, dim):\n def forward(self, x):\n def __init__(self, dim):\n def forward(self, x):\n def __init__(self, inp_channels, out_channels, kernel_size, mish=True, n_groups=8):\n def forward(self, x):\n def __init__(\n self,\n n_channels: int,\n qk_n_channels: int,\n v_n_channels: int,\n nheads: int = 4,\n residual: bool = False,\n use_state: bool = False,\n ):\n def forward(self, x, states: torch.Tensor = None):\n def __init__(self, num_hiddens, dropout: float = 0, max_len: int = 1000):\n def forward(self, X):\n def __init__(self, dim_in, dim_hidden=128):\n def forward(self, x):\ndef extract(a, t, x_shape):\ndef cosine_beta_schedule(timesteps, s=0.008, dtype=torch.float32):\ndef apply_conditioning(x, conditions, action_dim):\n def __init__(self, weights, action_dim):\n def forward(self, pred, targ):\n def __init__(self, weights):\n def forward(self, pred, targ):\n def __init__(self, *args):\n def forward(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):\n def _loss(self, pred, targ):\n X = torch.arange(max_len, dtype=torch.float32).reshape(-1, 1) / torch.pow(\n 10000, torch.arange(0, num_hiddens, 2, dtype=torch.float32) / num_hiddens\n )\n X = X + self.P[:, : X.shape[1], :].to(X.device)" } ]
import functools import numpy as np import torch import torch.nn.functional as F import diffuser.utils as utils from torch import nn from diffuser.utils.dpm_solver import DPM_Solver, NoiseScheduleVP, model_wrapper from .helpers import Losses, apply_conditioning, cosine_beta_schedule, extract
21,225
clip_denoised=False, predict_epsilon=True, action_weight=1.0, loss_discount=1.0, loss_weights=None, returns_condition=False, condition_guidance_w=0.1, agent_share_noise=False, data_encoder=utils.IdentityEncoder(), **kwargs, ): super().__init__() self.n_agents = n_agents self.horizon = horizon self.history_horizon = history_horizon self.observation_dim = observation_dim self.action_dim = action_dim self.transition_dim = observation_dim + action_dim self.model = model self.returns_condition = returns_condition self.condition_guidance_w = condition_guidance_w self.agent_share_noise = agent_share_noise self.data_encoder = data_encoder betas = cosine_beta_schedule(n_timesteps) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, axis=0) alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]]) self.n_timesteps = int(n_timesteps) self.clip_denoised = clip_denoised self.predict_epsilon = predict_epsilon self.register_buffer("betas", betas) self.register_buffer("alphas_cumprod", alphas_cumprod) self.register_buffer("alphas_cumprod_prev", alphas_cumprod_prev) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", torch.sqrt(alphas_cumprod)) self.register_buffer( "sqrt_one_minus_alphas_cumprod", torch.sqrt(1.0 - alphas_cumprod) ) self.register_buffer( "log_one_minus_alphas_cumprod", torch.log(1.0 - alphas_cumprod) ) self.register_buffer( "sqrt_recip_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod - 1) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = ( betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod) ) self.register_buffer("posterior_variance", posterior_variance) # log calculation clipped because the posterior variance # is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", torch.log(torch.clamp(posterior_variance, min=1e-20)), ) self.register_buffer( "posterior_mean_coef1", betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod), ) self.register_buffer( "posterior_mean_coef2", (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod), ) # get loss coefficients and initialize objective self.loss_type = loss_type loss_weights = self.get_loss_weights(action_weight, loss_discount, loss_weights) self.loss_fn = Losses[loss_type](loss_weights, self.action_dim) def get_loss_weights(self, action_weight, discount, weights_dict): """ sets loss coefficients for trajectory action_weight : float coefficient on first action loss discount : float multiplies t^th timestep of trajectory loss by discount**t weights_dict : dict { i: c } multiplies dimension i of observation loss by c """ self.action_weight = action_weight dim_weights = torch.ones(self.transition_dim, dtype=torch.float32) # set loss coefficients for dimensions of observation if weights_dict is None: weights_dict = {} for ind, w in weights_dict.items(): dim_weights[self.action_dim + ind] *= w # decay loss with trajectory timestep: discount**t discounts = discount ** torch.arange( self.horizon + self.history_horizon, dtype=torch.float ) discounts = discounts / discounts.mean() loss_weights = torch.einsum("h,t->ht", discounts, dim_weights) loss_weights = loss_weights.unsqueeze(1).expand(-1, self.n_agents, -1).clone() # manually set a0 weight loss_weights[self.history_horizon, :, : self.action_dim] = action_weight return loss_weights # ------------------------------------------ sampling ------------------------------------------# def predict_start_from_noise(self, x_t, t, noise): """ if self.predict_epsilon, model output is (scaled) noise; otherwise, model predicts x0 directly """ if self.predict_epsilon: return (
class GaussianDiffusion(nn.Module): def __init__( self, model, n_agents, horizon, history_horizon, observation_dim, action_dim, n_timesteps=1000, loss_type="l1", clip_denoised=False, predict_epsilon=True, action_weight=1.0, loss_discount=1.0, loss_weights=None, returns_condition=False, condition_guidance_w=0.1, agent_share_noise=False, data_encoder=utils.IdentityEncoder(), **kwargs, ): super().__init__() self.n_agents = n_agents self.horizon = horizon self.history_horizon = history_horizon self.observation_dim = observation_dim self.action_dim = action_dim self.transition_dim = observation_dim + action_dim self.model = model self.returns_condition = returns_condition self.condition_guidance_w = condition_guidance_w self.agent_share_noise = agent_share_noise self.data_encoder = data_encoder betas = cosine_beta_schedule(n_timesteps) alphas = 1.0 - betas alphas_cumprod = torch.cumprod(alphas, axis=0) alphas_cumprod_prev = torch.cat([torch.ones(1), alphas_cumprod[:-1]]) self.n_timesteps = int(n_timesteps) self.clip_denoised = clip_denoised self.predict_epsilon = predict_epsilon self.register_buffer("betas", betas) self.register_buffer("alphas_cumprod", alphas_cumprod) self.register_buffer("alphas_cumprod_prev", alphas_cumprod_prev) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer("sqrt_alphas_cumprod", torch.sqrt(alphas_cumprod)) self.register_buffer( "sqrt_one_minus_alphas_cumprod", torch.sqrt(1.0 - alphas_cumprod) ) self.register_buffer( "log_one_minus_alphas_cumprod", torch.log(1.0 - alphas_cumprod) ) self.register_buffer( "sqrt_recip_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod) ) self.register_buffer( "sqrt_recipm1_alphas_cumprod", torch.sqrt(1.0 / alphas_cumprod - 1) ) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = ( betas * (1.0 - alphas_cumprod_prev) / (1.0 - alphas_cumprod) ) self.register_buffer("posterior_variance", posterior_variance) # log calculation clipped because the posterior variance # is 0 at the beginning of the diffusion chain self.register_buffer( "posterior_log_variance_clipped", torch.log(torch.clamp(posterior_variance, min=1e-20)), ) self.register_buffer( "posterior_mean_coef1", betas * np.sqrt(alphas_cumprod_prev) / (1.0 - alphas_cumprod), ) self.register_buffer( "posterior_mean_coef2", (1.0 - alphas_cumprod_prev) * np.sqrt(alphas) / (1.0 - alphas_cumprod), ) # get loss coefficients and initialize objective self.loss_type = loss_type loss_weights = self.get_loss_weights(action_weight, loss_discount, loss_weights) self.loss_fn = Losses[loss_type](loss_weights, self.action_dim) def get_loss_weights(self, action_weight, discount, weights_dict): """ sets loss coefficients for trajectory action_weight : float coefficient on first action loss discount : float multiplies t^th timestep of trajectory loss by discount**t weights_dict : dict { i: c } multiplies dimension i of observation loss by c """ self.action_weight = action_weight dim_weights = torch.ones(self.transition_dim, dtype=torch.float32) # set loss coefficients for dimensions of observation if weights_dict is None: weights_dict = {} for ind, w in weights_dict.items(): dim_weights[self.action_dim + ind] *= w # decay loss with trajectory timestep: discount**t discounts = discount ** torch.arange( self.horizon + self.history_horizon, dtype=torch.float ) discounts = discounts / discounts.mean() loss_weights = torch.einsum("h,t->ht", discounts, dim_weights) loss_weights = loss_weights.unsqueeze(1).expand(-1, self.n_agents, -1).clone() # manually set a0 weight loss_weights[self.history_horizon, :, : self.action_dim] = action_weight return loss_weights # ------------------------------------------ sampling ------------------------------------------# def predict_start_from_noise(self, x_t, t, noise): """ if self.predict_epsilon, model output is (scaled) noise; otherwise, model predicts x0 directly """ if self.predict_epsilon: return (
extract(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t
3
2023-10-13 13:03:53+00:00
24k
hellloxiaotian/KDNet
train_KDNet.py
[ { "identifier": "attempt_load", "path": "models/experimental.py", "snippet": "def attempt_load(weights, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n # print('weights', weights) # /runs/train/yolov7_distillation19/weights/epoch_074.pt\n for w in weights if isinstance(weights, list) else [weights]:\n # attempt_download(w) # /runs/train/yolov7_distillation19/weights/epoch_074.pt\n ckpt = torch.load(w, map_location=map_location) # load\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model\n \n # Compatibility updates\n for m in model.modules():\n if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True # pytorch 1.7.0 compatibility\n elif type(m) is nn.Upsample:\n m.recompute_scale_factor = None # torch 1.11.0 compatibility\n elif type(m) is Conv:\n m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility\n \n if len(model) == 1:\n return model[-1] # return model\n else:\n print('Ensemble created with %s\\n' % weights)\n for k in ['names', 'stride']:\n setattr(model, k, getattr(model[-1], k))\n return model # return ensemble" }, { "identifier": "attempt_loadv5", "path": "models/experimental.py", "snippet": "def attempt_loadv5(weights, device=None, inplace=True, fuse=True):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n from models.yolo import Detect, Model\n\n model = Ensemble()\n for w in weights if isinstance(weights, list) else [weights]:\n ckpt = torch.load(attempt_download(w), map_location='cpu') # load\n ckpt = (ckpt.get('ema') or ckpt['model']).to(device).float() # FP32 model\n\n # Model compatibility updates\n if not hasattr(ckpt, 'stride'):\n ckpt.stride = torch.tensor([32.])\n if hasattr(ckpt, 'names') and isinstance(ckpt.names, (list, tuple)):\n ckpt.names = dict(enumerate(ckpt.names)) # convert to dict\n\n model.append(ckpt.fuse().eval() if fuse and hasattr(ckpt, 'fuse') else ckpt.eval()) # model in eval mode\n\n # Module compatibility updates\n for m in model.modules():\n t = type(m)\n if t in (nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU, Detect, Model):\n m.inplace = inplace # torch 1.7.0 compatibility\n if t is Detect and not isinstance(m.anchor_grid, list):\n delattr(m, 'anchor_grid')\n setattr(m, 'anchor_grid', [torch.zeros(1)] * m.nl)\n elif t is nn.Upsample and not hasattr(m, 'recompute_scale_factor'):\n m.recompute_scale_factor = None # torch 1.11.0 compatibility\n\n # Return model\n if len(model) == 1:\n return model[-1]\n\n # Return detection ensemble\n print(f'Ensemble created with {weights}\\n')\n for k in 'names', 'nc', 'yaml':\n setattr(model, k, getattr(model[0], k))\n model.stride = model[torch.argmax(torch.tensor([m.stride.max() for m in model])).int()].stride # max stride\n assert all(model[0].nc == m.nc for m in model), f'Models have different class counts: {[m.nc for m in model]}'\n return model" }, { "identifier": "attempt_load_zxy", "path": "models/experimental.py", "snippet": "def attempt_load_zxy(weights, device, map_location=None):\n # Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a\n model = Ensemble()\n for w in weights if isinstance(weights, list) else [weights]:\n attempt_download(w)\n ckpt = torch.load(w, map_location=map_location) # load\n model.append(ckpt['ema' if ckpt.get('ema') else 'model'].to(device).float().fuse().eval()) # FP32 model\n\n # Compatibility updates\n for m in model.modules():\n if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:\n m.inplace = True # pytorch 1.7.0 compatibility\n elif type(m) is nn.Upsample:\n m.recompute_scale_factor = None # torch 1.11.0 compatibility\n elif type(m) is Conv:\n m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility\n\n if len(model) == 1:\n return model[-1] # return model\n else:\n print('Ensemble created with %s\\n' % weights)\n for k in ['names', 'stride']:\n setattr(model, k, getattr(model[-1], k))\n return model # return ensemble" }, { "identifier": "Model", "path": "models/yolo.py", "snippet": "class Model(nn.Module):\n # def __init__(self, cfg='yolor-csp-c.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes\n def __init__(self, cfg='yolor-csp-c.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes\n super(Model, self).__init__()\n self.traced = False\n if isinstance(cfg, dict):\n self.yaml = cfg # model dict\n else: # is *.yaml\n import yaml # for torch hub\n self.yaml_file = Path(cfg).name\n with open(cfg) as f:\n self.yaml = yaml.load(f, Loader=yaml.SafeLoader) # model dict\n\n # Define model\n ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels\n if nc and nc != self.yaml['nc']:\n logger.info(f\"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}\")\n self.yaml['nc'] = nc # override yaml value\n if anchors:\n logger.info(f'Overriding model.yaml anchors with anchors={anchors}')\n self.yaml['anchors'] = round(anchors) # override yaml value\n self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist\n # self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]).cuda() # model, savelist\n self.names = [str(i) for i in range(self.yaml['nc'])] # default names\n # print([x.shape for x in self.forward(torch.zeros(1, ch, 64, 64))])\n\n # Build strides, anchors\n # m = self.model[-1] # Detect()\n m = self.model[-1] # Detect()\n if isinstance(m, Detect):\n s = 256 # 2x min stride\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward\n check_anchor_order(m)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_biases() # only run once\n # print('Strides: %s' % m.stride.tolist())\n if isinstance(m, IDetect):\n print('yolo.py-IDetect')\n # print('m', m) # m IDetect\n m.cuda()\n s = 256 # 2x min stride\n # m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]).cuda() # forward\n # print('m.device2', m.device)\n check_anchor_order(m)\n # print('m.device3', m.device)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_biases() # only run once\n # print('Strides: %s' % m.stride.tolist())\n if isinstance(m, IAuxDetect):\n s = 256 # 2x min stride\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))[:4]]) # forward\n #print(m.stride)\n check_anchor_order(m)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_aux_biases() # only run once\n # print('Strides: %s' % m.stride.tolist())\n if isinstance(m, IBin):\n s = 256 # 2x min stride\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward\n check_anchor_order(m)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_biases_bin() # only run once\n # print('Strides: %s' % m.stride.tolist())\n if isinstance(m, IKeypoint):\n s = 256 # 2x min stride\n m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward\n check_anchor_order(m)\n m.anchors /= m.stride.view(-1, 1, 1)\n self.stride = m.stride\n self._initialize_biases_kpt() # only run once\n # print('Strides: %s' % m.stride.tolist())\n\n # Init weights, biases\n initialize_weights(self)\n self.info()\n logger.info('')\n\n def forward(self, x, augment=False, profile=False):\n # print('x', x.shape)\n if augment:\n img_size = x.shape[-2:] # height, width\n s = [1, 0.83, 0.67] # scales\n f = [None, 3, None] # flips (2-ud, 3-lr)\n y = [] # outputs\n for si, fi in zip(s, f):\n xi = scale_img(x.flip(fi) if fi else x, si, gs=int(self.stride.max()))\n yi = self.forward_once(xi)[0] # forward\n # cv2.imwrite(f'img_{si}.jpg', 255 * xi[0].cpu().numpy().transpose((1, 2, 0))[:, :, ::-1]) # save\n yi[..., :4] /= si # de-scale\n if fi == 2:\n yi[..., 1] = img_size[0] - yi[..., 1] # de-flip ud\n elif fi == 3:\n yi[..., 0] = img_size[1] - yi[..., 0] # de-flip lr\n y.append(yi)\n # print('y', y.shape)\n return torch.cat(y, 1), None # augmented inference, train\n else:\n return self.forward_once(x, profile) # single-scale inference, train\n\n def forward_once(self, x, profile=False):\n # print('x1', x.shape)\n y, dt = [], [] # outputs\n for m in self.model:\n if m.f != -1: # if not from previous layer\n x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers\n\n if not hasattr(self, 'traced'):\n self.traced=False\n\n if self.traced:\n if isinstance(m, Detect) or isinstance(m, IDetect) or isinstance(m, IAuxDetect) or isinstance(m, IKeypoint):\n break\n\n # print('profile', profile) # Flase\n if profile:\n c = isinstance(m, (Detect, IDetect, IAuxDetect, IBin))\n o = thop.profile(m, inputs=(x.copy() if c else x,), verbose=False)[0] / 1E9 * 2 if thop else 0 # FLOPS\n # print('o', o.shape)\n for _ in range(10):\n m(x.copy() if c else x)\n t = time_synchronized()\n for _ in range(10):\n m(x.copy() if c else x)\n dt.append((time_synchronized() - t) * 100)\n print('%10.1f%10.0f%10.1fms %-40s' % (o, m.np, dt[-1], m.type))\n\n # print('x3', x.shape)\n # print('m.i', m.i) # =len(y)\n x = m(x) # run\\\n \n y.append(x if m.i in self.save else None) # save output\n # print('x4', x.shape)\n\n if profile:\n print('%.1fms total' % sum(dt))\n\n # print('x', len(x)) # 3\n return x\n\n def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency\n # https://arxiv.org/abs/1708.02002 section 3.3\n # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n m = self.model[-1] # Detect() module\n for mi, s in zip(m.m, m.stride): # from\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def _initialize_aux_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency\n # https://arxiv.org/abs/1708.02002 section 3.3\n # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n m = self.model[-1] # Detect() module\n for mi, mi2, s in zip(m.m, m.m2, m.stride): # from\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n b2 = mi2.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n b2.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b2.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi2.bias = torch.nn.Parameter(b2.view(-1), requires_grad=True)\n\n def _initialize_biases_bin(self, cf=None): # initialize biases into Detect(), cf is class frequency\n # https://arxiv.org/abs/1708.02002 section 3.3\n # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n m = self.model[-1] # Bin() module\n bc = m.bin_count\n for mi, s in zip(m.m, m.stride): # from\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n old = b[:, (0,1,2,bc+3)].data\n obj_idx = 2*bc+4\n b[:, :obj_idx].data += math.log(0.6 / (bc + 1 - 0.99))\n b[:, obj_idx].data += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b[:, (obj_idx+1):].data += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n b[:, (0,1,2,bc+3)].data = old\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def _initialize_biases_kpt(self, cf=None): # initialize biases into Detect(), cf is class frequency\n # https://arxiv.org/abs/1708.02002 section 3.3\n # cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.\n m = self.model[-1] # Detect() module\n for mi, s in zip(m.m, m.stride): # from\n b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)\n b.data[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)\n b.data[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls\n mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)\n\n def _print_biases(self):\n m = self.model[-1] # Detect() module\n for mi in m.m: # from\n b = mi.bias.detach().view(m.na, -1).T # conv.bias(255) to (3,85)\n print(('%6g Conv2d.bias:' + '%10.3g' * 6) % (mi.weight.shape[1], *b[:5].mean(1).tolist(), b[5:].mean()))\n\n # def _print_weights(self):\n # for m in self.model.modules():\n # if type(m) is Bottleneck:\n # print('%10.3g' % (m.w.detach().sigmoid() * 2)) # shortcut weights\n\n def fuse(self): # fuse model Conv2d() + BatchNorm2d() layers\n print('Fusing layers... ')\n for m in self.model.modules():\n if isinstance(m, RepConv):\n #print(f\" fuse_repvgg_block\")\n m.fuse_repvgg_block()\n elif isinstance(m, RepConv_OREPA):\n #print(f\" switch_to_deploy\")\n m.switch_to_deploy()\n elif type(m) is Conv and hasattr(m, 'bn'):\n m.conv = fuse_conv_and_bn(m.conv, m.bn) # update conv\n delattr(m, 'bn') # remove batchnorm\n m.forward = m.fuseforward # update forward\n elif isinstance(m, (IDetect, IAuxDetect)):\n m.fuse()\n m.forward = m.fuseforward\n self.info()\n return self\n\n def nms(self, mode=True): # add or remove NMS module\n present = type(self.model[-1]) is NMS # last layer is NMS\n if mode and not present:\n print('Adding NMS... ')\n m = NMS() # module\n m.f = -1 # from\n m.i = self.model[-1].i + 1 # index\n self.model.add_module(name='%s' % m.i, module=m) # add\n self.eval()\n elif not mode and present:\n print('Removing NMS... ')\n self.model = self.model[:-1] # remove\n return self\n\n def autoshape(self): # add autoShape module\n print('Adding autoShape... ')\n m = autoShape(self) # wrap model\n copy_attr(m, self, include=('yaml', 'nc', 'hyp', 'names', 'stride'), exclude=()) # copy attributes\n return m\n\n def info(self, verbose=False, img_size=640): # print model information\n model_info(self, verbose, img_size)" }, { "identifier": "check_anchors", "path": "utils/autoanchor.py", "snippet": "def check_anchors(dataset, model, thr=4.0, imgsz=640):\n # Check anchor fit to data, recompute if necessary\n prefix = colorstr('autoanchor: ')\n print(f'\\n{prefix}Analyzing anchors... ', end='')\n m = model.module.model[-1] if hasattr(model, 'module') else model.model[-1] # Detect()\n shapes = imgsz * dataset.shapes / dataset.shapes.max(1, keepdims=True)\n scale = np.random.uniform(0.9, 1.1, size=(shapes.shape[0], 1)) # augment scale\n wh = torch.tensor(np.concatenate([l[:, 3:5] * s for s, l in zip(shapes * scale, dataset.labels)])).float() # wh\n\n def metric(k): # compute metric\n r = wh[:, None] / k[None]\n x = torch.min(r, 1. / r).min(2)[0] # ratio metric\n best = x.max(1)[0] # best_x\n aat = (x > 1. / thr).float().sum(1).mean() # anchors above threshold\n bpr = (best > 1. / thr).float().mean() # best possible recall\n return bpr, aat\n\n anchors = m.anchor_grid.clone().cpu().view(-1, 2) # current anchors\n bpr, aat = metric(anchors)\n print(f'anchors/target = {aat:.2f}, Best Possible Recall (BPR) = {bpr:.4f}', end='')\n if bpr < 0.98: # threshold to recompute\n print('. Attempting to improve anchors, please wait...')\n na = m.anchor_grid.numel() // 2 # number of anchors\n try:\n anchors = kmean_anchors(dataset, n=na, img_size=imgsz, thr=thr, gen=1000, verbose=False)\n except Exception as e:\n print(f'{prefix}ERROR: {e}')\n new_bpr = metric(anchors)[0]\n if new_bpr > bpr: # replace anchors\n anchors = torch.tensor(anchors, device=m.anchors.device).type_as(m.anchors)\n m.anchor_grid[:] = anchors.clone().view_as(m.anchor_grid) # for inference\n check_anchor_order(m)\n m.anchors[:] = anchors.clone().view_as(m.anchors) / m.stride.to(m.anchors.device).view(-1, 1, 1) # loss\n print(f'{prefix}New anchors saved to model. Update model *.yaml to use these anchors in the future.')\n else:\n print(f'{prefix}Original anchors better than new anchors. Proceeding with original anchors.')\n print('') # newline" }, { "identifier": "create_dataloader", "path": "utils/datasets.py", "snippet": "def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,\n rank=-1, world_size=1, workers=8, image_weights=False, quad=False, prefix=''):\n # Make sure only the first process in DDP process the dataset first, and the following others can use the cache\n with torch_distributed_zero_first(rank):\n dataset = LoadImagesAndLabels(path, imgsz, batch_size,\n augment=augment, # augment images\n hyp=hyp, # augmentation hyperparameters\n rect=rect, # rectangular training\n cache_images=cache,\n single_cls=opt.single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix)\n\n batch_size = min(batch_size, len(dataset))\n nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = torch.utils.data.distributed.DistributedSampler(dataset) if rank != -1 else None\n loader = torch.utils.data.DataLoader if image_weights else InfiniteDataLoader\n # Use torch.utils.data.DataLoader() if dataset.properties will update during training else InfiniteDataLoader()\n dataloader = loader(dataset,\n batch_size=batch_size,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabels.collate_fn4 if quad else LoadImagesAndLabels.collate_fn)\n return dataloader, dataset" }, { "identifier": "labels_to_class_weights", "path": "utils/general.py", "snippet": "def set_logging(rank=-1):\ndef init_seeds(seed=0):\ndef get_latest_run(search_dir='.'):\ndef isdocker():\ndef emojis(str=''):\ndef check_online():\ndef check_git_status():\ndef check_requirements(requirements='requirements.txt', exclude=()):\ndef check_img_size(img_size, s=32):\ndef check_imshow():\ndef check_file(file):\ndef check_dataset(dict):\ndef make_divisible(x, divisor):\ndef clean_str(s):\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\ndef colorstr(*input):\ndef labels_to_class_weights(labels, nc=80):\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\ndef segment2box(segment, width=640, height=640):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):\ndef clip_coords(boxes, img_shape):\ndef bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7):\ndef bbox_alpha_iou(box1, box2, x1y1x2y2=False, GIoU=False, DIoU=False, CIoU=False, alpha=2, eps=1e-9):\ndef box_iou(box1, box2):\n def box_area(box):\ndef wh_iou(wh1, wh2):\ndef box_giou(box1, box2):\n def box_area(box):\ndef box_ciou(box1, box2, eps: float = 1e-7):\n def box_area(box):\ndef box_diou(box1, box2, eps: float = 1e-7):\n def box_area(box):\ndef non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=()):\ndef non_max_suppression_kpt(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,\n labels=(), kpt_label=False, nc=None, nkpt=None):\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\ndef print_mutation(hyp, results, yaml_file='hyp_evolved.yaml', bucket=''):\ndef apply_classifier(x, model, img, im0):\ndef increment_path(path, exist_ok=True, sep=''):" }, { "identifier": "attempt_download", "path": "utils/google_utils.py", "snippet": "def attempt_download(file, repo='WongKinYiu/yolov7'):\n # Attempt file download if does not exist\n file = Path(str(file).strip().replace(\"'\", '').lower())\n\n if not file.exists():\n try:\n response = requests.get(f'https://api.github.com/repos/{repo}/releases/latest').json() # github api\n assets = [x['name'] for x in response['assets']] # release assets\n tag = response['tag_name'] # i.e. 'v1.0'\n except: # fallback plan\n assets = ['yolov7.pt', 'yolov7-tiny.pt', 'yolov7x.pt', 'yolov7-d6.pt', 'yolov7-e6.pt', \n 'yolov7-e6e.pt', 'yolov7-w6.pt']\n tag = subprocess.check_output('git tag', shell=True).decode().split()[-1]\n\n name = file.name\n if name in assets:\n msg = f'{file} missing, try downloading from https://github.com/{repo}/releases/'\n redundant = False # second download option\n try: # GitHub\n url = f'https://github.com/{repo}/releases/download/{tag}/{name}'\n print(f'Downloading {url} to {file}...')\n torch.hub.download_url_to_file(url, file)\n assert file.exists() and file.stat().st_size > 1E6 # check\n except Exception as e: # GCP\n print(f'Download error: {e}')\n assert redundant, 'No secondary mirror'\n url = f'https://storage.googleapis.com/{repo}/ckpt/{name}'\n print(f'Downloading {url} to {file}...')\n os.system(f'curl -L {url} -o {file}') # torch.hub.download_url_to_file(url, weights)\n finally:\n if not file.exists() or file.stat().st_size < 1E6: # check\n file.unlink(missing_ok=True) # remove partial downloads\n print(f'ERROR: Download failure: {msg}')\n print('')\n return" }, { "identifier": "ComputeLoss", "path": "utils/loss.py", "snippet": "class ComputeLoss:\n # Compute losses\n def __init__(self, model, autobalance=False):\n super(ComputeLoss, self).__init__()\n device = next(model.parameters()).device # get model device\n h = model.hyp # hyperparameters\n\n # Define criteria\n BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))\n BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))\n\n # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3\n self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets\n\n # Focal loss\n g = h['fl_gamma'] # focal loss gamma\n if g > 0:\n BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)\n\n det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module\n self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7\n #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.1, .05]) # P3-P7\n #self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.5, 0.4, .1]) # P3-P7\n self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index\n self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance\n for k in 'na', 'nc', 'nl', 'anchors':\n setattr(self, k, getattr(det, k))\n\n def __call__(self, p, targets): # predictions, targets, model\n device = targets.device\n lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)\n tcls, tbox, indices, anchors = self.build_targets(p, targets) # targets\n\n # Losses\n for i, pi in enumerate(p): # layer index, layer predictions\n b, a, gj, gi = indices[i] # image, anchor, gridy, gridx\n tobj = torch.zeros_like(pi[..., 0], device=device) # target obj\n\n n = b.shape[0] # number of targets\n if n:\n ps = pi[b, a, gj, gi] # prediction subset corresponding to targets\n\n # Regression\n pxy = ps[:, :2].sigmoid() * 2. - 0.5\n pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]\n pbox = torch.cat((pxy, pwh), 1) # predicted box\n iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target)\n lbox += (1.0 - iou).mean() # iou loss\n\n # Objectness\n tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio\n\n # Classification\n if self.nc > 1: # cls loss (only if multiple classes)\n t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets\n t[range(n), tcls[i]] = self.cp\n #t[t==self.cp] = iou.detach().clamp(0).type(t.dtype)\n lcls += self.BCEcls(ps[:, 5:], t) # BCE\n\n # Append targets to text file\n # with open('targets.txt', 'a') as file:\n # [file.write('%11.5g ' * 4 % tuple(x) + '\\n') for x in torch.cat((txy[i], twh[i]), 1)]\n\n obji = self.BCEobj(pi[..., 4], tobj)\n lobj += obji * self.balance[i] # obj loss\n if self.autobalance:\n self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()\n\n if self.autobalance:\n self.balance = [x / self.balance[self.ssi] for x in self.balance]\n lbox *= self.hyp['box']\n lobj *= self.hyp['obj']\n lcls *= self.hyp['cls']\n bs = tobj.shape[0] # batch size\n\n loss = lbox + lobj + lcls\n return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()\n\n def build_targets(self, p, targets):\n # Build targets for compute_loss(), input targets(image,class,x,y,w,h)\n na, nt = self.na, targets.shape[0] # number of anchors, targets\n tcls, tbox, indices, anch = [], [], [], []\n gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain\n ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)\n targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices\n\n g = 0.5 # bias\n off = torch.tensor([[0, 0],\n [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m\n # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm\n ], device=targets.device).float() * g # offsets\n\n for i in range(self.nl):\n anchors = self.anchors[i]\n gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain\n\n # Match targets to anchors\n t = targets * gain\n if nt:\n # Matches\n r = t[:, :, 4:6] / anchors[:, None] # wh ratio\n j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare\n # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))\n t = t[j] # filter\n\n # Offsets\n gxy = t[:, 2:4] # grid xy\n gxi = gain[[2, 3]] - gxy # inverse\n j, k = ((gxy % 1. < g) & (gxy > 1.)).T\n l, m = ((gxi % 1. < g) & (gxi > 1.)).T\n j = torch.stack((torch.ones_like(j), j, k, l, m))\n t = t.repeat((5, 1, 1))[j]\n offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]\n else:\n t = targets[0]\n offsets = 0\n\n # Define\n b, c = t[:, :2].long().T # image, class\n gxy = t[:, 2:4] # grid xy\n gwh = t[:, 4:6] # grid wh\n gij = (gxy - offsets).long()\n gi, gj = gij.T # grid xy indices\n\n # Append\n a = t[:, 6].long() # anchor indices\n indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices\n tbox.append(torch.cat((gxy - gij, gwh), 1)) # box\n anch.append(anchors[a]) # anchors\n tcls.append(c) # class\n\n return tcls, tbox, indices, anch" }, { "identifier": "ComputeLossOTA", "path": "utils/loss.py", "snippet": "class ComputeLossOTA:\n # Compute losses\n def __init__(self, model, autobalance=False):\n super(ComputeLossOTA, self).__init__()\n device = next(model.parameters()).device # get model device\n h = model.hyp # hyperparameters\n\n # Define criteria\n BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device))\n BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device))\n\n # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3\n self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets\n\n # Focal loss\n g = h['fl_gamma'] # focal loss gamma\n if g > 0:\n BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g)\n\n det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module\n self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7\n self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index\n self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, model.gr, h, autobalance\n for k in 'na', 'nc', 'nl', 'anchors', 'stride':\n setattr(self, k, getattr(det, k))\n\n def __call__(self, p, targets, imgs): # predictions, targets, model \n device = targets.device\n lcls, lbox, lobj = torch.zeros(1, device=device), torch.zeros(1, device=device), torch.zeros(1, device=device)\n bs, as_, gjs, gis, targets, anchors = self.build_targets(p, targets, imgs)\n pre_gen_gains = [torch.tensor(pp.shape, device=device)[[3, 2, 3, 2]] for pp in p] \n \n\n # Losses\n for i, pi in enumerate(p): # layer index, layer predictions\n b, a, gj, gi = bs[i], as_[i], gjs[i], gis[i] # image, anchor, gridy, gridx\n tobj = torch.zeros_like(pi[..., 0], device=device) # target obj\n\n n = b.shape[0] # number of targets\n if n:\n ps = pi[b, a, gj, gi] # prediction subset corresponding to targets\n\n # Regression\n grid = torch.stack([gi, gj], dim=1)\n pxy = ps[:, :2].sigmoid() * 2. - 0.5\n #pxy = ps[:, :2].sigmoid() * 3. - 1.\n pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i]\n pbox = torch.cat((pxy, pwh), 1) # predicted box\n selected_tbox = targets[i][:, 2:6] * pre_gen_gains[i]\n selected_tbox[:, :2] -= grid\n iou = bbox_iou(pbox.T, selected_tbox, x1y1x2y2=False, CIoU=True) # iou(prediction, target)\n lbox += (1.0 - iou).mean() # iou loss\n\n # Objectness\n tobj[b, a, gj, gi] = (1.0 - self.gr) + self.gr * iou.detach().clamp(0).type(tobj.dtype) # iou ratio\n\n # Classification\n selected_tcls = targets[i][:, 1].long()\n if self.nc > 1: # cls loss (only if multiple classes)\n t = torch.full_like(ps[:, 5:], self.cn, device=device) # targets\n t[range(n), selected_tcls] = self.cp\n lcls += self.BCEcls(ps[:, 5:], t) # BCE\n\n # Append targets to text file\n # with open('targets.txt', 'a') as file:\n # [file.write('%11.5g ' * 4 % tuple(x) + '\\n') for x in torch.cat((txy[i], twh[i]), 1)]\n\n obji = self.BCEobj(pi[..., 4], tobj)\n lobj += obji * self.balance[i] # obj loss\n if self.autobalance:\n self.balance[i] = self.balance[i] * 0.9999 + 0.0001 / obji.detach().item()\n\n if self.autobalance:\n self.balance = [x / self.balance[self.ssi] for x in self.balance]\n lbox *= self.hyp['box']\n lobj *= self.hyp['obj']\n lcls *= self.hyp['cls']\n bs = tobj.shape[0] # batch size\n\n loss = lbox + lobj + lcls\n return loss * bs, torch.cat((lbox, lobj, lcls, loss)).detach()\n\n def build_targets(self, p, targets, imgs):\n \n #indices, anch = self.find_positive(p, targets)\n indices, anch = self.find_3_positive(p, targets)\n #indices, anch = self.find_4_positive(p, targets)\n #indices, anch = self.find_5_positive(p, targets)\n #indices, anch = self.find_9_positive(p, targets)\n device = torch.device(targets.device)\n matching_bs = [[] for pp in p]\n matching_as = [[] for pp in p]\n matching_gjs = [[] for pp in p]\n matching_gis = [[] for pp in p]\n matching_targets = [[] for pp in p]\n matching_anchs = [[] for pp in p]\n \n nl = len(p) \n \n for batch_idx in range(p[0].shape[0]):\n \n b_idx = targets[:, 0]==batch_idx\n this_target = targets[b_idx]\n if this_target.shape[0] == 0:\n continue\n \n txywh = this_target[:, 2:6] * imgs[batch_idx].shape[1]\n txyxy = xywh2xyxy(txywh)\n\n pxyxys = []\n p_cls = []\n p_obj = []\n from_which_layer = []\n all_b = []\n all_a = []\n all_gj = []\n all_gi = []\n all_anch = []\n \n for i, pi in enumerate(p):\n \n b, a, gj, gi = indices[i]\n idx = (b == batch_idx)\n b, a, gj, gi = b[idx], a[idx], gj[idx], gi[idx] \n all_b.append(b)\n all_a.append(a)\n all_gj.append(gj)\n all_gi.append(gi)\n all_anch.append(anch[i][idx])\n from_which_layer.append((torch.ones(size=(len(b),)) * i).to(device))\n \n fg_pred = pi[b, a, gj, gi] \n p_obj.append(fg_pred[:, 4:5])\n p_cls.append(fg_pred[:, 5:])\n \n grid = torch.stack([gi, gj], dim=1)\n pxy = (fg_pred[:, :2].sigmoid() * 2. - 0.5 + grid) * self.stride[i] #/ 8.\n #pxy = (fg_pred[:, :2].sigmoid() * 3. - 1. + grid) * self.stride[i]\n pwh = (fg_pred[:, 2:4].sigmoid() * 2) ** 2 * anch[i][idx] * self.stride[i] #/ 8.\n pxywh = torch.cat([pxy, pwh], dim=-1)\n pxyxy = xywh2xyxy(pxywh)\n pxyxys.append(pxyxy)\n \n pxyxys = torch.cat(pxyxys, dim=0)\n if pxyxys.shape[0] == 0:\n continue\n p_obj = torch.cat(p_obj, dim=0)\n p_cls = torch.cat(p_cls, dim=0)\n from_which_layer = torch.cat(from_which_layer, dim=0)\n all_b = torch.cat(all_b, dim=0)\n all_a = torch.cat(all_a, dim=0)\n all_gj = torch.cat(all_gj, dim=0)\n all_gi = torch.cat(all_gi, dim=0)\n all_anch = torch.cat(all_anch, dim=0)\n \n pair_wise_iou = box_iou(txyxy, pxyxys)\n\n pair_wise_iou_loss = -torch.log(pair_wise_iou + 1e-8)\n\n top_k, _ = torch.topk(pair_wise_iou, min(10, pair_wise_iou.shape[1]), dim=1)\n dynamic_ks = torch.clamp(top_k.sum(1).int(), min=1)\n\n gt_cls_per_image = (\n F.one_hot(this_target[:, 1].to(torch.int64), self.nc)\n .float()\n .unsqueeze(1)\n .repeat(1, pxyxys.shape[0], 1)\n )\n\n num_gt = this_target.shape[0]\n cls_preds_ = (\n p_cls.float().unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()\n * p_obj.unsqueeze(0).repeat(num_gt, 1, 1).sigmoid_()\n )\n\n y = cls_preds_.sqrt_()\n pair_wise_cls_loss = F.binary_cross_entropy_with_logits(\n torch.log(y/(1-y)) , gt_cls_per_image, reduction=\"none\"\n ).sum(-1)\n del cls_preds_\n \n cost = (\n pair_wise_cls_loss\n + 3.0 * pair_wise_iou_loss\n )\n\n matching_matrix = torch.zeros_like(cost, device=device)\n\n for gt_idx in range(num_gt):\n _, pos_idx = torch.topk(\n cost[gt_idx], k=dynamic_ks[gt_idx].item(), largest=False\n )\n matching_matrix[gt_idx][pos_idx] = 1.0\n\n del top_k, dynamic_ks\n anchor_matching_gt = matching_matrix.sum(0)\n if (anchor_matching_gt > 1).sum() > 0:\n _, cost_argmin = torch.min(cost[:, anchor_matching_gt > 1], dim=0)\n matching_matrix[:, anchor_matching_gt > 1] *= 0.0\n matching_matrix[cost_argmin, anchor_matching_gt > 1] = 1.0\n fg_mask_inboxes = (matching_matrix.sum(0) > 0.0).to(device)\n matched_gt_inds = matching_matrix[:, fg_mask_inboxes].argmax(0)\n \n from_which_layer = from_which_layer[fg_mask_inboxes]\n all_b = all_b[fg_mask_inboxes]\n all_a = all_a[fg_mask_inboxes]\n all_gj = all_gj[fg_mask_inboxes]\n all_gi = all_gi[fg_mask_inboxes]\n all_anch = all_anch[fg_mask_inboxes]\n \n this_target = this_target[matched_gt_inds]\n \n for i in range(nl):\n layer_idx = from_which_layer == i\n matching_bs[i].append(all_b[layer_idx])\n matching_as[i].append(all_a[layer_idx])\n matching_gjs[i].append(all_gj[layer_idx])\n matching_gis[i].append(all_gi[layer_idx])\n matching_targets[i].append(this_target[layer_idx])\n matching_anchs[i].append(all_anch[layer_idx])\n\n for i in range(nl):\n if matching_targets[i] != []:\n matching_bs[i] = torch.cat(matching_bs[i], dim=0)\n matching_as[i] = torch.cat(matching_as[i], dim=0)\n matching_gjs[i] = torch.cat(matching_gjs[i], dim=0)\n matching_gis[i] = torch.cat(matching_gis[i], dim=0)\n matching_targets[i] = torch.cat(matching_targets[i], dim=0)\n matching_anchs[i] = torch.cat(matching_anchs[i], dim=0)\n else:\n matching_bs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_as[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_gjs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_gis[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_targets[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n matching_anchs[i] = torch.tensor([], device='cuda:0', dtype=torch.int64)\n\n return matching_bs, matching_as, matching_gjs, matching_gis, matching_targets, matching_anchs \n\n def find_3_positive(self, p, targets):\n # Build targets for compute_loss(), input targets(image,class,x,y,w,h)\n na, nt = self.na, targets.shape[0] # number of anchors, targets\n indices, anch = [], []\n gain = torch.ones(7, device=targets.device).long() # normalized to gridspace gain\n ai = torch.arange(na, device=targets.device).float().view(na, 1).repeat(1, nt) # same as .repeat_interleave(nt)\n targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices\n\n g = 0.5 # bias\n off = torch.tensor([[0, 0],\n [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m\n # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm\n ], device=targets.device).float() * g # offsets\n\n for i in range(self.nl):\n anchors = self.anchors[i]\n gain[2:6] = torch.tensor(p[i].shape)[[3, 2, 3, 2]] # xyxy gain\n\n # Match targets to anchors\n t = targets * gain\n if nt:\n # Matches\n r = t[:, :, 4:6] / anchors[:, None] # wh ratio\n j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare\n # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2))\n t = t[j] # filter\n\n # Offsets\n gxy = t[:, 2:4] # grid xy\n gxi = gain[[2, 3]] - gxy # inverse\n j, k = ((gxy % 1. < g) & (gxy > 1.)).T\n l, m = ((gxi % 1. < g) & (gxi > 1.)).T\n j = torch.stack((torch.ones_like(j), j, k, l, m))\n t = t.repeat((5, 1, 1))[j]\n offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j]\n else:\n t = targets[0]\n offsets = 0\n\n # Define\n b, c = t[:, :2].long().T # image, class\n gxy = t[:, 2:4] # grid xy\n gwh = t[:, 4:6] # grid wh\n gij = (gxy - offsets).long()\n gi, gj = gij.T # grid xy indices\n\n # Append\n a = t[:, 6].long() # anchor indices\n indices.append((b, a, gj.clamp_(0, gain[3] - 1), gi.clamp_(0, gain[2] - 1))) # image, anchor, grid indices\n anch.append(anchors[a]) # anchors\n\n return indices, anch" }, { "identifier": "plot_images", "path": "utils/plots.py", "snippet": "def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=640, max_subplots=16):\n # Plot image grid with labels\n\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n\n # un-normalise\n if np.max(images[0]) <= 1:\n images *= 255\n\n tl = 3 # line thickness\n tf = max(tl - 1, 1) # font thickness\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n\n # Check if we should resize\n scale_factor = max_size / max(h, w)\n if scale_factor < 1:\n h = math.ceil(scale_factor * h)\n w = math.ceil(scale_factor * w)\n\n colors = color_list() # list of colors\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, img in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n\n block_x = int(w * (i // ns))\n block_y = int(h * (i % ns))\n\n img = img.transpose(1, 2, 0)\n if scale_factor < 1:\n img = cv2.resize(img, (w, h))\n\n mosaic[block_y:block_y + h, block_x:block_x + w, :] = img\n if len(targets) > 0:\n image_targets = targets[targets[:, 0] == i]\n boxes = xywh2xyxy(image_targets[:, 2:6]).T\n classes = image_targets[:, 1].astype('int')\n labels = image_targets.shape[1] == 6 # labels if no conf column\n conf = None if labels else image_targets[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale_factor < 1: # absolute coords need scale if image scales\n boxes *= scale_factor\n boxes[[0, 2]] += block_x\n boxes[[1, 3]] += block_y\n for j, box in enumerate(boxes.T):\n cls = int(classes[j])\n color = colors[cls % len(colors)]\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = '%s' % cls if labels else '%s %.1f' % (cls, conf[j])\n plot_one_box(box, mosaic, label=label, color=color, line_thickness=tl)\n\n # Draw image filename labels\n if paths:\n label = Path(paths[i]).name[:40] # trim to 40 char\n t_size = cv2.getTextSize(label, 0, fontScale=tl / 3, thickness=tf)[0]\n cv2.putText(mosaic, label, (block_x + 5, block_y + t_size[1] + 5), 0, tl / 3, [220, 220, 220], thickness=tf,\n lineType=cv2.LINE_AA)\n\n # Image border\n cv2.rectangle(mosaic, (block_x, block_y), (block_x + w, block_y + h), (255, 255, 255), thickness=3)\n\n if fname:\n r = min(1280. / max(h, w) / ns, 1.0) # ratio to limit image size\n mosaic = cv2.resize(mosaic, (int(ns * w * r), int(ns * h * r)), interpolation=cv2.INTER_AREA)\n # cv2.imwrite(fname, cv2.cvtColor(mosaic, cv2.COLOR_BGR2RGB)) # cv2 save\n Image.fromarray(mosaic).save(fname) # PIL save\n return mosaic" }, { "identifier": "plot_labels", "path": "utils/plots.py", "snippet": "def plot_labels(labels, names=(), save_dir=Path(''), loggers=None):\n # plot dataset labels\n print('Plotting labels... ')\n c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes\n nc = int(c.max() + 1) # number of classes\n colors = color_list()\n x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height'])\n\n # seaborn correlogram\n sns.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9))\n plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200)\n plt.close()\n\n # matplotlib labels\n matplotlib.use('svg') # faster\n ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel()\n ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8)\n ax[0].set_ylabel('instances')\n if 0 < len(names) < 30:\n ax[0].set_xticks(range(len(names)))\n ax[0].set_xticklabels(names, rotation=90, fontsize=10)\n else:\n ax[0].set_xlabel('classes')\n sns.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9)\n sns.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9)\n\n # rectangles\n labels[:, 1:3] = 0.5 # center\n labels[:, 1:] = xywh2xyxy(labels[:, 1:]) * 2000\n img = Image.fromarray(np.ones((2000, 2000, 3), dtype=np.uint8) * 255)\n for cls, *box in labels[:1000]:\n ImageDraw.Draw(img).rectangle(box, width=1, outline=colors[int(cls) % 10]) # plot\n ax[1].imshow(img)\n ax[1].axis('off')\n\n for a in [0, 1, 2, 3]:\n for s in ['top', 'right', 'left', 'bottom']:\n ax[a].spines[s].set_visible(False)\n\n plt.savefig(save_dir / 'labels.jpg', dpi=200)\n matplotlib.use('Agg')\n plt.close()\n\n # loggers\n for k, v in loggers.items() or {}:\n if k == 'wandb' and v:\n v.log({\"Labels\": [v.Image(str(x), caption=x.name) for x in save_dir.glob('*labels*.jpg')]}, commit=False)" }, { "identifier": "plot_results", "path": "utils/plots.py", "snippet": "def plot_results(start=0, stop=0, bucket='', id=(), labels=(), save_dir=''):\n # Plot training 'results*.txt'. from utils.plots import *; plot_results(save_dir='runs/train/exp')\n fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True)\n ax = ax.ravel()\n s = ['Box', 'Objectness', 'Classification', 'Precision', 'Recall',\n 'val Box', 'val Objectness', 'val Classification', '[email protected]', '[email protected]:0.95']\n if bucket:\n # files = ['https://storage.googleapis.com/%s/results%g.txt' % (bucket, x) for x in id]\n files = ['results%g.txt' % x for x in id]\n c = ('gsutil cp ' + '%s ' * len(files) + '.') % tuple('gs://%s/results%g.txt' % (bucket, x) for x in id)\n os.system(c)\n else:\n files = list(Path(save_dir).glob('results*.txt'))\n assert len(files), 'No results.txt files found in %s, nothing to plot.' % os.path.abspath(save_dir)\n for fi, f in enumerate(files):\n try:\n results = np.loadtxt(f, usecols=[2, 3, 4, 8, 9, 12, 13, 14, 10, 11], ndmin=2).T\n n = results.shape[1] # number of rows\n x = range(start, min(stop, n) if stop else n)\n for i in range(10):\n y = results[i, x]\n if i in [0, 1, 2, 5, 6, 7]:\n y[y == 0] = np.nan # don't show zero loss values\n # y /= y[0] # normalize\n label = labels[fi] if len(labels) else f.stem\n ax[i].plot(x, y, marker='.', label=label, linewidth=2, markersize=8)\n ax[i].set_title(s[i])\n # if i in [5, 6, 7]: # share train and val loss y axes\n # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5])\n except Exception as e:\n print('Warning: Plotting error for %s; %s' % (f, e))\n\n ax[1].legend()\n fig.savefig(Path(save_dir) / 'results.png', dpi=200)" }, { "identifier": "plot_evolution", "path": "utils/plots.py", "snippet": "def plot_evolution(yaml_file='data/hyp.finetune.yaml'): # from utils.plots import *; plot_evolution()\n # Plot hyperparameter evolution results in evolve.txt\n with open(yaml_file) as f:\n hyp = yaml.load(f, Loader=yaml.SafeLoader)\n x = np.loadtxt('evolve.txt', ndmin=2)\n f = fitness(x)\n # weights = (f - f.min()) ** 2 # for weighted results\n plt.figure(figsize=(10, 12), tight_layout=True)\n matplotlib.rc('font', **{'size': 8})\n for i, (k, v) in enumerate(hyp.items()):\n y = x[:, i + 7]\n # mu = (y * weights).sum() / weights.sum() # best weighted result\n mu = y[f.argmax()] # best single result\n plt.subplot(6, 5, i + 1)\n plt.scatter(y, f, c=hist2d(y, f, 20), cmap='viridis', alpha=.8, edgecolors='none')\n plt.plot(mu, f.max(), 'k+', markersize=15)\n plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters\n if i % 5 != 0:\n plt.yticks([])\n print('%15s: %.3g' % (k, mu))\n plt.savefig('evolve.png', dpi=200)\n print('\\nPlot saved as evolve.png')" }, { "identifier": "ModelEMA", "path": "utils/torch_utils.py", "snippet": "class ModelEMA:\n \"\"\" Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models\n Keep a moving average of everything in the model state_dict (parameters and buffers).\n This is intended to allow functionality like\n https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage\n A smoothed version of the weights is necessary for some training schemes to perform well.\n This class is sensitive where it is initialized in the sequence of model init,\n GPU assignment and distributed training wrappers.\n \"\"\"\n\n def __init__(self, model, decay=0.9999, updates=0):\n # Create EMA\n self.ema = deepcopy(model.module if is_parallel(model) else model).eval() # FP32 EMA\n # if next(model.parameters()).device.type != 'cpu':\n # self.ema.half() # FP16 EMA\n self.updates = updates # number of EMA updates\n self.decay = lambda x: decay * (1 - math.exp(-x / 2000)) # decay exponential ramp (to help early epochs)\n for p in self.ema.parameters():\n p.requires_grad_(False)\n\n def update(self, model):\n # Update EMA parameters\n with torch.no_grad():\n self.updates += 1\n d = self.decay(self.updates)\n\n msd = model.module.state_dict() if is_parallel(model) else model.state_dict() # model state_dict\n for k, v in self.ema.state_dict().items():\n if v.dtype.is_floating_point:\n v *= d\n v += (1. - d) * msd[k].detach()\n\n def update_attr(self, model, include=(), exclude=('process_group', 'reducer')):\n # Update EMA attributes\n copy_attr(self.ema, model, include, exclude)" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=None):\n # device = 'cpu' or '0' or '0,1,2,3'\n s = f'YOLOR 🚀 {git_describe() or date_modified()} torch {torch.__version__} ' # string\n cpu = device.lower() == 'cpu'\n if cpu:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable\n assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability\n\n cuda = not cpu and torch.cuda.is_available()\n if cuda:\n n = torch.cuda.device_count()\n if n > 1 and batch_size: # check that batch_size is compatible with device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * len(s)\n for i, d in enumerate(device.split(',') if device else range(n)):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\\n\" # bytes to MB\n else:\n s += 'CPU\\n'\n\n logger.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe\n return torch.device('cuda:0' if cuda else 'cpu')" }, { "identifier": "intersect_dicts", "path": "utils/torch_utils.py", "snippet": "def intersect_dicts(da, db, exclude=()):\n # Dictionary intersection of matching keys and shapes, omitting 'exclude' keys, using da values\n return {k: v for k, v in da.items() if k in db and not any(x in k for x in exclude) and v.shape == db[k].shape}" }, { "identifier": "torch_distributed_zero_first", "path": "utils/torch_utils.py", "snippet": "@contextmanager\ndef torch_distributed_zero_first(local_rank: int):\n \"\"\"\n Decorator to make all processes in distributed training wait for each local_master to do something.\n \"\"\"\n if local_rank not in [-1, 0]:\n torch.distributed.barrier()\n yield\n if local_rank == 0:\n torch.distributed.barrier()" }, { "identifier": "is_parallel", "path": "utils/torch_utils.py", "snippet": "def is_parallel(model):\n return type(model) in (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)" }, { "identifier": "getMask", "path": "utils/distill_utils.py", "snippet": "def getMask(batch_size, gt_boxes, img_size, feat, anchors, max_num_box, device):\r\n # [b, K, 4]\r\n gt_boxes = make_gt_boxes(gt_boxes, max_num_box, batch_size, img_size)\r\n feat_stride = img_size[0] / feat.size(2)\r\n anchors = torch.from_numpy(generate_anchors(feat_stride, anchors))\r\n feat = feat.cpu()\r\n height, width = feat.size(2), feat.size(3)\r\n feat_height, feat_width = feat.size(2), feat.size(3)\r\n shift_x = np.arange(0, feat_width) * feat_stride\r\n shift_y = np.arange(0, feat_height) * feat_stride\r\n shift_x, shift_y = np.meshgrid(shift_x, shift_y)\r\n shifts = torch.from_numpy(np.vstack((shift_x.ravel(), shift_y.ravel(),\r\n shift_x.ravel(), shift_y.ravel())).transpose())\r\n shifts = shifts.contiguous().type_as(feat).float()\r\n\r\n # num of anchors [3]\r\n A = anchors.size(0)\r\n K = shifts.size(0)\r\n\r\n anchors = anchors.type_as(gt_boxes)\r\n # all_anchors [K, A, 4]\r\n all_anchors = anchors.view(1, A, 4) + shifts.view(K, 1, 4)\r\n all_anchors = all_anchors.view(K * A, 4)\r\n # compute iou [all_anchors, gt_boxes]\r\n IOU_map = bbox_overlaps_batch(all_anchors, gt_boxes, img_size).view(batch_size, height, width, A, gt_boxes.shape[1])\r\n\r\n mask_batch = []\r\n for i in range(batch_size):\r\n max_iou, _ = torch.max(IOU_map[i].view(height * width * A, gt_boxes.shape[1]), dim=0)\r\n mask_per_im = torch.zeros([height, width], dtype=torch.int64).to(device)\r\n for k in range(gt_boxes.shape[1]):\r\n if torch.sum(gt_boxes[i][k]) == 0:\r\n break\r\n max_iou_per_gt = max_iou[k] * 0.5\r\n mask_per_gt = torch.sum(IOU_map[i][:, :, :, k] > max_iou_per_gt, dim=2)\r\n mask_per_im += mask_per_gt.to(device)\r\n mask_batch.append(mask_per_im)\r\n return mask_batch\r" }, { "identifier": "compute_mask_loss", "path": "utils/distill_utils.py", "snippet": "def compute_mask_loss(mask_batch, student_feature, teacher_feature, imitation_loss_weight):\r\n mask_list = []\r\n for mask in mask_batch:\r\n mask = (mask > 0).float().unsqueeze(0)\r\n mask_list.append(mask)\r\n mask_batch = torch.stack(mask_list, dim=0)\r\n norms = mask_batch.sum() * 2\r\n mask_batch_s = mask_batch.unsqueeze(4)\r\n no = student_feature.size(-1)\r\n bs, na, height, width, _ = mask_batch_s.shape\r\n mask_batch_no = mask_batch_s.expand((bs, na, height, width, no))\r\n sup_loss = (torch.pow(teacher_feature - student_feature, 2) * mask_batch_no).sum() / norms\r\n sup_loss = sup_loss * imitation_loss_weight\r\n return sup_loss\r" } ]
import argparse import logging import math import os import random import time import numpy as np import torch.distributed as dist import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import torch.optim.lr_scheduler as lr_scheduler import torch.utils.data import yaml import test # import test.py to get mAP after each epoch from copy import deepcopy from pathlib import Path from threading import Thread from torch.cuda import amp from torch.nn.parallel import DistributedDataParallel as DDP from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from models.experimental import attempt_load from models.experimental import attempt_loadv5 from models.experimental import attempt_load_zxy from models.yolo import Model from utils.autoanchor import check_anchors from utils.datasets import create_dataloader from utils.general import labels_to_class_weights, increment_path, labels_to_image_weights, init_seeds, \ fitness, strip_optimizer, get_latest_run, check_dataset, check_file, check_git_status, check_img_size, \ check_requirements, print_mutation, set_logging, one_cycle, colorstr from utils.google_utils import attempt_download from utils.loss import ComputeLoss, ComputeLossOTA from utils.plots import plot_images, plot_labels, plot_results, plot_evolution from utils.torch_utils import ModelEMA, select_device, intersect_dicts, torch_distributed_zero_first, is_parallel from utils.wandb_logging.wandb_utils import WandbLogger, check_wandb_resume from utils.distill_utils import getMask, compute_mask_loss
20,866
pg0.append(v.im.implicit) else: for iv in v.im: pg0.append(iv.implicit) if hasattr(v, 'imc'): if hasattr(v.imc, 'implicit'): pg0.append(v.imc.implicit) else: for iv in v.imc: pg0.append(iv.implicit) if hasattr(v, 'imb'): if hasattr(v.imb, 'implicit'): pg0.append(v.imb.implicit) else: for iv in v.imb: pg0.append(iv.implicit) if hasattr(v, 'imo'): if hasattr(v.imo, 'implicit'): pg0.append(v.imo.implicit) else: for iv in v.imo: pg0.append(iv.implicit) if hasattr(v, 'ia'): if hasattr(v.ia, 'implicit'): pg0.append(v.ia.implicit) else: for iv in v.ia: pg0.append(iv.implicit) if hasattr(v, 'attn'): if hasattr(v.attn, 'logit_scale'): pg0.append(v.attn.logit_scale) if hasattr(v.attn, 'q_bias'): pg0.append(v.attn.q_bias) if hasattr(v.attn, 'v_bias'): pg0.append(v.attn.v_bias) if hasattr(v.attn, 'relative_position_bias_table'): pg0.append(v.attn.relative_position_bias_table) if hasattr(v, 'rbr_dense'): if hasattr(v.rbr_dense, 'weight_rbr_origin'): pg0.append(v.rbr_dense.weight_rbr_origin) if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'): pg0.append(v.rbr_dense.weight_rbr_avg_conv) if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'): pg0.append(v.rbr_dense.weight_rbr_pfir_conv) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2) if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'): pg0.append(v.rbr_dense.weight_rbr_gconv_dw) if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'): pg0.append(v.rbr_dense.weight_rbr_gconv_pw) if hasattr(v.rbr_dense, 'vector'): pg0.append(v.rbr_dense.vector) if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR if opt.linear_lr: lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear else: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA ema = ModelEMA(model) if rank in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 if pretrained: # Optimizer if ckpt['optimizer'] is not None: optimizer.load_state_dict(ckpt['optimizer']) best_fitness = ckpt['best_fitness'] # EMA if ema and ckpt.get('ema'): ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) ema.updates = ckpt['updates'] # Results if ckpt.get('training_results') is not None: results_file.write_text(ckpt['training_results']) # write results.txt # Epochs start_epoch = ckpt['epoch'] + 1 if opt.resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) if epochs < start_epoch: logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % (weights, ckpt['epoch'], epochs)) epochs += ckpt['epoch'] # finetune additional epochs del ckpt, state_dict # Image sizes gs = max(int(model.stride.max()), 32) # grid size (max stride) nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples # DP mode if cuda and rank == -1 and torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) # SyncBatchNorm if opt.sync_bn and cuda and rank != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') # Trainloader
logger = logging.getLogger(__name__) def train(hyp, opt, device, tb_writer=None): logger.info(colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, total_batch_size, weights, rank, freeze = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, opt.freeze # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' best = wdir / 'best.pt' results_file = save_dir / 'results.txt' # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict is_coco = opt.data.endswith('coco.yaml') # Logging- Doing this before checking the dataset. Might update data_dict loggers = {'wandb': None} # loggers dict if rank in [-1, 0]: opt.hyp = hyp # add hyperparameters run_id = torch.load(weights, map_location=device).get('wandb_id') if weights.endswith('.pt') and os.path.isfile( weights) else None wandb_logger = WandbLogger(opt, Path(opt.save_dir).stem, run_id, data_dict) loggers['wandb'] = wandb_logger.wandb data_dict = wandb_logger.data_dict if wandb_logger.wandb: weights, epochs, hyp = opt.weights, opt.epochs, opt.hyp # WandbLogger might update weights, epochs if resuming nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len(data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % (len(names), nc, opt.data) # check # Model pretrained = weights.endswith('.pt') # load teacher model teacher = attempt_load_zxy(opt.teacher_weights, device=device) if pretrained: with torch_distributed_zero_first(rank): attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create exclude = ['anchor'] if (opt.cfg or hyp.get('anchors')) and not opt.resume else [] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] # Freeze freeze = [f'model.{x}.' for x in (freeze if len(freeze) > 1 else range(freeze[0]))] # parameter names to freeze (full or partial) for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): print('freezing %s' % k) v.requires_grad = False # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d): pg0.append(v.weight) # no decay elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay if hasattr(v, 'im'): if hasattr(v.im, 'implicit'): pg0.append(v.im.implicit) else: for iv in v.im: pg0.append(iv.implicit) if hasattr(v, 'imc'): if hasattr(v.imc, 'implicit'): pg0.append(v.imc.implicit) else: for iv in v.imc: pg0.append(iv.implicit) if hasattr(v, 'imb'): if hasattr(v.imb, 'implicit'): pg0.append(v.imb.implicit) else: for iv in v.imb: pg0.append(iv.implicit) if hasattr(v, 'imo'): if hasattr(v.imo, 'implicit'): pg0.append(v.imo.implicit) else: for iv in v.imo: pg0.append(iv.implicit) if hasattr(v, 'ia'): if hasattr(v.ia, 'implicit'): pg0.append(v.ia.implicit) else: for iv in v.ia: pg0.append(iv.implicit) if hasattr(v, 'attn'): if hasattr(v.attn, 'logit_scale'): pg0.append(v.attn.logit_scale) if hasattr(v.attn, 'q_bias'): pg0.append(v.attn.q_bias) if hasattr(v.attn, 'v_bias'): pg0.append(v.attn.v_bias) if hasattr(v.attn, 'relative_position_bias_table'): pg0.append(v.attn.relative_position_bias_table) if hasattr(v, 'rbr_dense'): if hasattr(v.rbr_dense, 'weight_rbr_origin'): pg0.append(v.rbr_dense.weight_rbr_origin) if hasattr(v.rbr_dense, 'weight_rbr_avg_conv'): pg0.append(v.rbr_dense.weight_rbr_avg_conv) if hasattr(v.rbr_dense, 'weight_rbr_pfir_conv'): pg0.append(v.rbr_dense.weight_rbr_pfir_conv) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_idconv1'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_idconv1) if hasattr(v.rbr_dense, 'weight_rbr_1x1_kxk_conv2'): pg0.append(v.rbr_dense.weight_rbr_1x1_kxk_conv2) if hasattr(v.rbr_dense, 'weight_rbr_gconv_dw'): pg0.append(v.rbr_dense.weight_rbr_gconv_dw) if hasattr(v.rbr_dense, 'weight_rbr_gconv_pw'): pg0.append(v.rbr_dense.weight_rbr_gconv_pw) if hasattr(v.rbr_dense, 'vector'): pg0.append(v.rbr_dense.vector) if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR if opt.linear_lr: lf = lambda x: (1 - x / (epochs - 1)) * (1.0 - hyp['lrf']) + hyp['lrf'] # linear else: lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # EMA ema = ModelEMA(model) if rank in [-1, 0] else None # Resume start_epoch, best_fitness = 0, 0.0 if pretrained: # Optimizer if ckpt['optimizer'] is not None: optimizer.load_state_dict(ckpt['optimizer']) best_fitness = ckpt['best_fitness'] # EMA if ema and ckpt.get('ema'): ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) ema.updates = ckpt['updates'] # Results if ckpt.get('training_results') is not None: results_file.write_text(ckpt['training_results']) # write results.txt # Epochs start_epoch = ckpt['epoch'] + 1 if opt.resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % (weights, epochs) if epochs < start_epoch: logger.info('%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % (weights, ckpt['epoch'], epochs)) epochs += ckpt['epoch'] # finetune additional epochs del ckpt, state_dict # Image sizes gs = max(int(model.stride.max()), 32) # grid size (max stride) nl = model.model[-1].nl # number of detection layers (used for scaling hyp['obj']) imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size] # verify imgsz are gs-multiples # DP mode if cuda and rank == -1 and torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) # SyncBatchNorm if opt.sync_bn and cuda and rank != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') # Trainloader
dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt,
5
2023-10-08 13:05:58+00:00
24k
falesiani/torch_ga
tests/test_keras.py
[ { "identifier": "GeometricProductDense", "path": "torch_ga/layers.py", "snippet": "class GeometricProductDense(GeometricAlgebraLayer):\n \"\"\"Analagous to Keras' Dense layer but using multivector-valued matrices\n instead of scalar ones and geometric multiplication instead of standard\n multiplication.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self,\n algebra: GeometricAlgebra,\n units: int,\n blade_indices_kernel: List[int],\n blade_indices_bias: Union[None, List[int]] = None,\n activation='None',\n use_bias=True,\n **kwargs\n ):\n super().__init__(algebra=algebra, **kwargs)\n\n self.units = units\n self.blade_indices_kernel = torch.tensor(blade_indices_kernel, dtype=torch.int64)\n if use_bias: self.blade_indices_bias = torch.tensor(blade_indices_bias, dtype=torch.int64)\n # self.blade_indices_kernel = blade_indices_kernel.to(dtype=torch.int64)\n # if use_bias: self.blade_indices_bias = blade_indices_bias.to(dtype=torch.int64) \n\n self.activation = activation\n self.use_bias = use_bias\n self.activation_fn = activations.get(activation)\n self.built = False\n\n def build(self, input_shape: list):\n if False: print(f\"input_shape={input_shape}\")\n self.num_input_units = input_shape[-2]\n shape_kernel = [\n self.units,\n self.num_input_units,\n int(self.blade_indices_kernel.shape[0])\n ]\n if False: print(f\"shape_kernel={shape_kernel}\")\n self.kernel = nn.Parameter(1./np.prod(shape_kernel)*torch.randn(size=shape_kernel)).to(dtype=torch.float)\n if self.use_bias:\n shape_bias = [self.units, self.blade_indices_bias.shape[0]]\n self.bias = nn.Parameter(1./np.prod(shape_bias)*torch.randn(size=shape_bias)).to(dtype=torch.float)\n else:\n self.bias = None\n self.built = True\n\n def compute_output_shape(self, input_shape):\n return [*input_shape[:-2], self.units, self.algebra.num_blades]\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n w_geom = self.algebra.from_tensor(self.kernel, self.blade_indices_kernel)\n\n # Perform a matrix-multiply, but using geometric product instead of\n # standard multiplication. To do this we do the geometric product\n # elementwise and then sum over the common axis.\n # [..., 1, I, X] * [..., O, I, X] -> [..., O, I, X] -> [..., O, X]\n # inputs_expanded = tf.expand_dims(inputs, axis=inputs.shape.ndims - 2)\n # result = tf.reduce_sum(self.algebra.geom_prod(\n # inputs_expanded, w_geom), axis=-2)\n\n inputs_expanded = inputs.unsqueeze(len(inputs.shape) - 2)\n result = self.algebra.geom_prod(inputs_expanded, w_geom).sum(dim=-2)\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(self.bias, self.blade_indices_bias)\n result += b_geom\n if self.activation_fn:\n result = self.activation_fn(result)\n return result\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"blade_indices_kernel\":\n self.blade_indices_kernel.cpu().detach().numpy(),\n \"blade_indices_bias\":\n self.blade_indices_bias.cpu().detach().numpy(),\n \"units\":\n self.units,\n # \"activation\":\n # activations.serialize(self.activation),\n \"use_bias\":\n self.use_bias,\n })\n return config" }, { "identifier": "GeometricSandwichProductDense", "path": "torch_ga/layers.py", "snippet": "class GeometricSandwichProductDense(GeometricProductDense):\n \"\"\"Analagous to Keras' Dense layer but using multivector-valued matrices\n instead of scalar ones and geometric sandwich multiplication instead of\n standard multiplication.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self, algebra, units, blade_indices_kernel, blade_indices_bias=None,\n activation=None, use_bias=True, \n # kernel_initializer=\"glorot_uniform\",\n # bias_initializer=\"zeros\", kernel_regularizer=None,\n # bias_regularizer=None, activity_regularizer=None,\n # kernel_constraint=None, bias_constraint=None, \n **kwargs\n ):\n super().__init__(\n algebra, units,\n blade_indices_kernel,\n blade_indices_bias=blade_indices_bias,\n activation=activation,\n use_bias=use_bias,\n # kernel_initializer=kernel_initializer,\n # bias_initializer=bias_initializer,\n # kernel_regularizer=kernel_regularizer,\n # bias_regularizer=bias_regularizer,\n # activity_regularizer=activity_regularizer,\n # kernel_constraint=kernel_constraint,\n # bias_constraint=bias_constraint, \n **kwargs\n )\n self.built = False\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n w_geom = self.algebra.from_tensor(self.kernel, self.blade_indices_kernel)\n\n # Same as GeometricProductDense but using R*x*~R instead of just R*x\n # inputs_expanded = tf.expand_dims(inputs, axis=inputs.shape.ndims - 2)\n # result = tf.reduce_sum(\n # self.algebra.geom_prod(\n # w_geom,\n # self.algebra.geom_prod(\n # inputs_expanded,\n # self.algebra.reversion(w_geom)\n # )\n # ),\n # axis=-2\n # )\n # if self.bias is not None:\n # b_geom = self.algebra.from_tensor(\n # self.bias, self.blade_indices_bias)\n # result += b_geom\n\n # return self.activation(result)\n\n inputs_expanded = inputs.unsqueeze(len(inputs.shape) - 2)\n result = self.algebra.geom_prod( w_geom, self.algebra.geom_prod(inputs_expanded, self.algebra.reversion(w_geom))).sum(dim=-2)\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(self.bias, self.blade_indices_bias)\n result += b_geom\n if self.activation_fn:\n result = self.activation_fn(result)\n return result" }, { "identifier": "GeometricProductElementwise", "path": "torch_ga/layers.py", "snippet": "class GeometricProductElementwise(GeometricAlgebraLayer):\n \"\"\"Performs the elementwise geometric product with a list of multivectors\n with as many elements as there are input units.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self,\n algebra: GeometricAlgebra,\n blade_indices_kernel: List[int],\n blade_indices_bias: Union[None, List[int]] = None,\n activation=None,\n use_bias=True,\n # kernel_initializer=\"glorot_uniform\",\n # bias_initializer=\"zeros\",\n # kernel_regularizer=None,\n # bias_regularizer=None,\n # activity_regularizer=None,\n # kernel_constraint=None,\n # bias_constraint=None,\n **kwargs\n ):\n # super().__init__(algebra=algebra, activity_regularizer=activity_regularizer, **kwargs)\n super().__init__(algebra=algebra, **kwargs)\n\n self.blade_indices_kernel = torch.tensor(blade_indices_kernel, dtype=torch.int64)\n if use_bias:\n self.blade_indices_bias = torch.tensor(blade_indices_bias, dtype=torch.int64)\n \n # self.blade_indices_kernel = blade_indices_kernel.to(dtype=torch.int64)\n # if use_bias:\n # self.blade_indices_bias = blade_indices_bias.to(dtype=torch.int64)\n\n self.activation_fn = activations.get(activation)\n self.use_bias = use_bias\n # self.kernel_initializer = initializers.get(kernel_initializer)\n # self.bias_initializer = initializers.get(bias_initializer)\n # self.kernel_regularizer = regularizers.get(kernel_regularizer)\n # self.bias_regularizer = regularizers.get(bias_regularizer)\n # self.kernel_constraint = constraints.get(kernel_constraint)\n # self.bias_constraint = constraints.get(bias_constraint)\n self.built = False\n\n def build(self, input_shape: torch.Size):\n self.num_input_units = input_shape[-2]\n shape_kernel = [\n self.num_input_units,\n self.blade_indices_kernel.shape[0]\n ]\n self.kernel = nn.Parameter(1./np.prod(shape_kernel)*torch.randn(shape_kernel)).to(dtype=torch.float)\n if self.use_bias:\n shape_bias = [self.num_input_units,self.blade_indices_bias.shape[0]]\n self.bias = nn.Parameter(1./np.prod(shape_bias)*torch.randn(shape_bias)).to(dtype=torch.float)\n else:\n self.bias = None\n\n # self.kernel = self.add_weight(\n # \"kernel\",\n # shape=shape_kernel,\n # initializer=self.kernel_initializer,\n # regularizer=self.kernel_regularizer,\n # constraint=self.kernel_constraint,\n # dtype=self.dtype,\n # trainable=True\n # )\n # if self.use_bias:\n # shape_bias = [self.num_input_units,\n # self.blade_indices_bias.shape[0]]\n # self.bias = self.add_weight(\n # \"bias\",\n # shape=shape_bias,\n # initializer=self.bias_initializer,\n # regularizer=self.bias_regularizer,\n # constraint=self.bias_constraint,\n # dtype=self.dtype,\n # trainable=True\n # )\n # else:\n # self.bias = None\n self.built = True\n\n def compute_output_shape(self, input_shape):\n return torch.Size([*input_shape[:-1], self.algebra.num_blades])\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n w_geom = self.algebra.from_tensor(\n self.kernel, self.blade_indices_kernel)\n\n # Elementwise multiplication for each unit with a multivector.\n # [..., U, X] * [U, X] -> [..., U, X]\n result = self.algebra.geom_prod(inputs, w_geom)\n\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(\n self.bias, self.blade_indices_bias)\n result += b_geom\n\n if self.activation_fn:\n result = self.activation_fn(result)\n return result\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"blade_indices_kernel\":\n self.blade_indices_kernel.cpu().detach().numpy(),\n \"blade_indices_bias\":\n self.blade_indices_bias.cpu().detach().numpy(),\n # \"activation\":\n # self.activation,\n # activations.serialize(self.activation),\n \"use_bias\":\n self.use_bias,\n # \"kernel_initializer\":\n # initializers.serialize(self.kernel_initializer),\n # \"bias_initializer\":\n # initializers.serialize(self.bias_initializer),\n # \"kernel_regularizer\":\n # regularizers.serialize(self.kernel_regularizer),\n # \"bias_regularizer\":\n # regularizers.serialize(self.bias_regularizer),\n # \"activity_regularizer\":\n # regularizers.serialize(self.activity_regularizer),\n # \"kernel_constraint\":\n # constraints.serialize(self.kernel_constraint),\n # \"bias_constraint\":\n # constraints.serialize(self.bias_constraint)\n })\n return config" }, { "identifier": "GeometricSandwichProductElementwise", "path": "torch_ga/layers.py", "snippet": "class GeometricSandwichProductElementwise(GeometricProductElementwise):\n \"\"\"Performs the elementwise geometric sandwich product with a list of\n multivectors with as many elements as there are input units.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self, algebra, blade_indices_kernel, blade_indices_bias=None,\n activation=None, use_bias=True, \n # kernel_initializer=\"glorot_uniform\",\n # bias_initializer=\"zeros\", kernel_regularizer=None,\n # bias_regularizer=None, activity_regularizer=None,\n # kernel_constraint=None, bias_constraint=None, \n **kwargs\n ):\n super().__init__(\n algebra,\n blade_indices_kernel,\n blade_indices_bias=blade_indices_bias,\n activation=activation,\n use_bias=use_bias,\n # kernel_initializer=kernel_initializer,\n # bias_initializer=bias_initializer,\n # kernel_regularizer=kernel_regularizer,\n # bias_regularizer=bias_regularizer,\n # activity_regularizer=activity_regularizer,\n # kernel_constraint=kernel_constraint,\n # bias_constraint=bias_constraint, \n **kwargs\n )\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n w_geom = self.algebra.from_tensor( self.kernel, self.blade_indices_kernel)\n\n # Elementwise multiplication Rx~R for each unit with a multivector.\n # [..., U, X] * [U, X] -> [..., U, X]\n result = self.algebra.geom_prod(\n w_geom,\n self.algebra.geom_prod(\n inputs,\n self.algebra.reversion(w_geom)\n )\n )\n\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(\n self.bias, self.blade_indices_bias)\n result += b_geom\n\n if self.activation_fn:\n result = self.activation_fn(result)\n return result" }, { "identifier": "GeometricProductConv1D", "path": "torch_ga/layers.py", "snippet": "class GeometricProductConv1D(GeometricAlgebraLayer):\n \"\"\"Analagous to Keras' Conv1D layer but using multivector-valued kernels\n instead of scalar ones and geometric product instead of\n standard multiplication.\n\n Args:\n algebra: GeometricAlgebra instance to use for the parameters\n filters: How many channels the output will have\n kernel_size: Size for the convolution kernel\n stride: Stride to use for the convolution\n padding: \"SAME\" (zero-pad input length so output\n length == input length / stride) or \"VALID\" (no padding)\n blade_indices_kernel: Blade indices to use for the kernel parameter\n blade_indices_bias: Blade indices to use for the bias parameter (if used)\n \"\"\"\n\n def __init__(\n self,\n algebra: GeometricAlgebra,\n filters: int,\n kernel_size: int,\n stride: int,\n padding: str,\n blade_indices_kernel: List[int],\n blade_indices_bias: Union[None, List[int]] = None,\n dilations: Union[None, int] = None,\n activation=None,\n use_bias=True,\n # kernel_initializer=\"glorot_uniform\",\n # bias_initializer=\"zeros\",\n # kernel_regularizer=None,\n # bias_regularizer=None,\n # activity_regularizer=None,\n # kernel_constraint=None,\n # bias_constraint=None,\n **kwargs\n ):\n super().__init__(\n algebra=algebra,\n # activity_regularizer=activity_regularizer,\n **kwargs\n )\n\n self.filters = filters\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.dilations = dilations\n\n self.blade_indices_kernel = torch.tensor( blade_indices_kernel, dtype=torch.int64)\n if use_bias:\n self.blade_indices_bias = torch.tensor( blade_indices_bias, dtype=torch.int64)\n # self.blade_indices_kernel = blade_indices_kernel.to(dtype=torch.int64)\n # if use_bias:\n # self.blade_indices_bias = blade_indices_bias.to(dtype=torch.int64)\n\n self.activation_fn = activations.get(activation)\n self.use_bias = use_bias\n # self.kernel_initializer = initializers.get(kernel_initializer)\n # self.bias_initializer = initializers.get(bias_initializer)\n # self.kernel_regularizer = regularizers.get(kernel_regularizer)\n # self.bias_regularizer = regularizers.get(bias_regularizer)\n # self.kernel_constraint = constraints.get(kernel_constraint)\n # self.bias_constraint = constraints.get(bias_constraint)\n self.built = False\n\n def build(self, input_shape: torch.Size):\n # I: [..., S, C, B]\n self.num_input_filters = input_shape[-2]\n\n # K: [K, IC, OC, B]\n shape_kernel = [\n self.kernel_size,\n self.num_input_filters,\n self.filters,\n self.blade_indices_kernel.shape[0]\n ]\n self.kernel = nn.Parameter(1./np.prod(shape_kernel)*torch.randn(size=shape_kernel)).to(dtype=torch.float)\n if self.use_bias:\n shape_bias = [self.filters, self.blade_indices_bias.shape[0]]\n self.bias = nn.Parameter(1./np.prod(shape_bias)*torch.randn(size=shape_bias)).to(dtype=torch.float)\n else:\n self.bias = None\n\n # self.kernel = self.add_weight(\n # \"kernel\",\n # shape=shape_kernel,\n # initializer=self.kernel_initializer,\n # regularizer=self.kernel_regularizer,\n # constraint=self.kernel_constraint,\n # dtype=self.dtype,\n # trainable=True\n # )\n # if self.use_bias:\n # shape_bias = [self.filters, self.blade_indices_bias.shape[0]]\n # self.bias = self.add_weight(\n # \"bias\",\n # shape=shape_bias,\n # initializer=self.bias_initializer,\n # regularizer=self.bias_regularizer,\n # constraint=self.bias_constraint,\n # dtype=self.dtype,\n # trainable=True\n # )\n # else:\n # self.bias = None\n self.built = True\n\n def forward(self, inputs):\n if not self.built: \n self.build(inputs.shape)\n k_geom = self.algebra.from_tensor(\n self.kernel, self.blade_indices_kernel)\n\n result = self.algebra.geom_conv1d(\n inputs, k_geom,\n stride=self.stride, padding=self.padding,\n dilations=self.dilations\n )\n\n if self.bias is not None:\n b_geom = self.algebra.from_tensor(\n self.bias, self.blade_indices_bias)\n result += b_geom\n\n if self.activation_fn:\n result = self.activation_fn(result)\n return result\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"filters\":\n self.filters,\n \"kernel_size\":\n self.kernel_size,\n \"stride\":\n self.stride,\n \"padding\":\n self.padding,\n \"dilations\":\n self.dilations,\n \"blade_indices_kernel\":\n self.blade_indices_kernel.numpy(),\n \"blade_indices_bias\":\n self.blade_indices_bias.numpy(),\n # \"activation\":\n # activations.serialize(self.activation),\n \"use_bias\":\n self.use_bias,\n # \"kernel_initializer\":\n # initializers.serialize(self.kernel_initializer),\n # \"bias_initializer\":\n # initializers.serialize(self.bias_initializer),\n # \"kernel_regularizer\":\n # regularizers.serialize(self.kernel_regularizer),\n # \"bias_regularizer\":\n # regularizers.serialize(self.bias_regularizer),\n # \"activity_regularizer\":\n # regularizers.serialize(self.activity_regularizer),\n # \"kernel_constraint\":\n # constraints.serialize(self.kernel_constraint),\n # \"bias_constraint\":\n # constraints.serialize(self.bias_constraint)\n\n })\n\n return config" }, { "identifier": "GeometricAlgebraExp", "path": "torch_ga/layers.py", "snippet": "class GeometricAlgebraExp(GeometricAlgebraLayer):\n \"\"\"Calculates the exponential function of the input. Input must square to\n a scalar.\n\n Args:\n algebra: GeometricAlgebra instance to use\n square_scalar_tolerance: Tolerance to use for the square scalar check\n or None if the check should be skipped\n \"\"\"\n\n def __init__(\n self,\n algebra: GeometricAlgebra,\n square_scalar_tolerance: Union[float, None] = 1e-4,\n **kwargs\n ):\n super().__init__(algebra=algebra, **kwargs)\n self.square_scalar_tolerance = square_scalar_tolerance\n self.built = False\n\n def compute_output_shape(self, input_shape):\n return torch.Size([*input_shape[:-1], self.algebra.num_blades])\n\n def build(self,inputs_shape): self.built = True\n\n def forward(self, inputs):\n if not self.built: self.build(inputs.shape)\n return self.algebra.exp(\n inputs, square_scalar_tolerance=self.square_scalar_tolerance\n )\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"square_scalar_tolerance\": self.square_scalar_tolerance\n })\n return config" }, { "identifier": "GeometricToTensor", "path": "torch_ga/layers.py", "snippet": "class GeometricToTensor(GeometricAlgebraLayer):\n \"\"\"Layer for extracting given blades from geometric algebra tensors.\n\n Args:\n algebra: GeometricAlgebra instance to use\n blade_indices: blade indices to extract\n \"\"\"\n\n def __init__(self, algebra: GeometricAlgebra, blade_indices: List[int],\n **kwargs):\n super().__init__(algebra=algebra, **kwargs)\n self.blade_indices = torch.tensor(blade_indices).to(dtype=torch.int64)\n # self.blade_indices = blade_indices.to(dtype=torch.int64) \n self.built = False\n\n def compute_output_shape(self, input_shape):\n return [*input_shape[:-1], self.blade_indices.shape[0]]\n def build(self,input_shape): self.built = True\n\n def forward(self, inputs):\n if not self.build: self.build(inputs.shape)\n # return torch.select(inputs, self.blade_indices, axis=-1)\n x = inputs[...,self.blade_indices]\n return x\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"blade_indices\": self.blade_indices.numpy()\n })\n return config" }, { "identifier": "GeometricToTensorWithKind", "path": "torch_ga/layers.py", "snippet": "class GeometricToTensorWithKind(GeometricToTensor):\n \"\"\"Layer for extracting blades of a kind from geometric algebra tensors.\n\n Args:\n algebra: GeometricAlgebra instance to use\n kind: blade indices of kind to extract\n \"\"\"\n\n def __init__(self, algebra: GeometricAlgebra, kind: BladeKind,\n **kwargs):\n blade_indices = algebra.get_kind_blade_indices(kind)\n super().__init__(algebra=algebra, blade_indices=blade_indices,\n **kwargs)" }, { "identifier": "TensorToGeometric", "path": "torch_ga/layers.py", "snippet": "class TensorToGeometric(GeometricAlgebraLayer):\n \"\"\"Layer for converting tensors with given blade indices to\n geometric algebra tensors.\n\n Args:\n algebra: GeometricAlgebra instance to use\n blade_indices: blade indices to interpret the last axis of the\n input tensor as\n \"\"\"\n\n def __init__(self, algebra: GeometricAlgebra, blade_indices: List[int],\n **kwargs):\n super().__init__(algebra=algebra, **kwargs)\n\n self.blade_indices = torch.tensor(blade_indices, dtype=torch.int64)\n # self.blade_indices = blade_indices.to(dtype=torch.int64) \n self.built = False\n\n def compute_output_shape(self, input_shape):\n return [*input_shape[:-1], self.algebra.num_blades]\n\n def forward(self, inputs):\n if not self.build: self.build(inputs.shape)\n return self.algebra.from_tensor(inputs, blade_indices=self.blade_indices)\n def build(self,input_shape): self.built = True\n def get_config(self):\n config = super().get_config()\n config.update({\n \"blade_indices\": self.blade_indices.numpy()\n })\n return config" }, { "identifier": "TensorWithKindToGeometric", "path": "torch_ga/layers.py", "snippet": "class TensorWithKindToGeometric(GeometricAlgebraLayer):\n \"\"\"Layer for converting tensors with given blade kind to\n geometric algebra tensors.\n\n Args:\n algebra: GeometricAlgebra instance to use\n kind: blade kind indices to interpret the last axis of the\n input tensor as\n \"\"\"\n\n def __init__(self, algebra: GeometricAlgebra, kind: BladeKind,\n **kwargs):\n super().__init__(algebra=algebra, **kwargs)\n self.kind = kind\n self.built = False\n\n def compute_output_shape(self, input_shape):\n return [*input_shape[:-1], self.algebra.get_kind_blade_indices(self.kind).shape[0]]\n\n def build(self,input_shape): self.built = True\n def forward(self, inputs):\n if not self.build: self.build(inputs.shape)\n\n return self.algebra.from_tensor_with_kind(inputs, kind=self.kind)\n\n def get_config(self):\n config = super().get_config()\n config.update({\n \"kind\": self.kind\n })\n return config" }, { "identifier": "BladeKind", "path": "torch_ga/blades.py", "snippet": "class BladeKind(Enum):\n \"\"\"Kind of blade depending on its degree.\"\"\"\n MV = \"mv\"\n EVEN = \"even\"\n ODD = \"odd\"\n SCALAR = \"scalar\"\n VECTOR = \"vector\"\n BIVECTOR = \"bivector\"\n TRIVECTOR = \"trivector\"\n PSEUDOSCALAR = \"pseudoscalar\"\n PSEUDOVECTOR = \"pseudovector\"\n PSEUDOBIVECTOR = \"pseudobivector\"\n PSEUDOTRIVECTOR = \"pseudotrivector\"" }, { "identifier": "GeometricAlgebra", "path": "torch_ga/torch_ga.py", "snippet": "class GeometricAlgebra:\n \"\"\"Class used for performing geometric algebra operations on `torch.Tensor` instances.\n Exposes methods for operating on `torch.Tensor` instances where their last\n axis is interpreted as blades of the algebra.\n Holds the metric and other quantities derived from it.\n \"\"\"\n\n def __init__(self, metric: List[float]):\n \"\"\"Creates a GeometricAlgebra object given a metric.\n The algebra will have as many basis vectors as there are\n elements in the metric.\n\n Args:\n metric: Metric as a list. Specifies what basis vectors square to\n \"\"\"\n self._metric = torch.tensor(metric, dtype=torch.float32)\n\n self._num_bases = len(metric)\n self._bases = list(map(str, range(self._num_bases)))\n\n self._blades, self._blade_degrees = blades_from_bases(self._bases)\n self._blade_degrees = torch.tensor(self._blade_degrees)\n self._num_blades = len(self._blades)\n self._max_degree = self._blade_degrees.max()\n\n # [Blades, Blades, Blades]\n _list = get_cayley_tensor(self.metric, self._bases, self._blades)\n # print(_list)\n if type(_list) in [list,tuple]:\n _list = np.array(_list)\n self._cayley, self._cayley_inner, self._cayley_outer = torch.tensor(\n _list,\n dtype=torch.float32\n )\n\n self._blade_mvs = torch.eye(self._num_blades)\n self._basis_mvs = self._blade_mvs[1:1+self._num_bases]\n\n # Find the dual by looking at the anti-diagonal in the Cayley tensor.\n self._dual_blade_indices = []\n self._dual_blade_signs = []\n\n for blade_index in range(self._num_blades):\n dual_index = self.num_blades - blade_index - 1\n anti_diag = self._cayley[blade_index, dual_index]\n # dual_sign = tf.gather(anti_diag, tf.where(\n # anti_diag != 0.0)[..., 0])[..., 0]\n dual_sign = anti_diag[torch.where(anti_diag != 0.0)]\n\n self._dual_blade_indices.append(dual_index)\n self._dual_blade_signs.append(dual_sign)\n\n self._dual_blade_indices = torch.tensor(\n self._dual_blade_indices, dtype=torch.int64)\n self._dual_blade_signs = torch.tensor(\n self._dual_blade_signs, dtype=torch.float32)\n\n def print(self, *args, **kwargs):\n \"\"\"Same as the default `print` function but formats `torch.Tensor`\n instances that have as many elements on their last axis\n as the algebra has blades using `mv_repr()`.\n \"\"\"\n def _is_mv(arg):\n return isinstance(arg, torch.Tensor) and len(arg.shape) > 0 and arg.shape[-1] == self.num_blades\n new_args = [self.mv_repr(arg) if _is_mv(arg) else arg for arg in args]\n\n print(*new_args, **kwargs)\n\n @property\n def metric(self) -> torch.Tensor:\n \"\"\"Metric list which contains the number that each\n basis vector in the algebra squares to\n (ie. the diagonal of the metric tensor).\n \"\"\"\n return self._metric\n\n @property\n def cayley(self) -> torch.Tensor:\n \"\"\"`MxMxM` tensor where `M` is the number of basis\n blades in the algebra. Used for calculating the\n geometric product:\n\n `a_i, b_j, cayley_ijk -> c_k`\n \"\"\"\n return self._cayley\n\n @property\n def cayley_inner(self) -> torch.Tensor:\n \"\"\"Analagous to cayley but for inner product.\"\"\"\n return self._cayley_inner\n\n @property\n def cayley_outer(self) -> torch.Tensor:\n \"\"\"Analagous to cayley but for outer product.\"\"\"\n return self._cayley_outer\n\n @property\n def blades(self) -> List[str]:\n \"\"\"List of all blade names.\n\n Blades are all possible independent combinations of\n basis vectors. Basis vectors are named starting\n from `\"0\"` and counting up. The scalar blade is the\n empty string `\"\"`.\n\n Example\n - Bases: `[\"0\", \"1\", \"2\"]`\n - Blades: `[\"\", \"0\", \"1\", \"2\", \"01\", \"02\", \"12\", \"012\"]`\n \"\"\"\n return self._blades\n\n @property\n def blade_mvs(self) -> torch.Tensor:\n \"\"\"List of all blade tensors in the algebra.\"\"\"\n return self._blade_mvs\n\n @property\n def dual_blade_indices(self) -> torch.Tensor:\n \"\"\"Indices of the dual blades for each blade.\"\"\"\n return self._dual_blade_indices\n\n @property\n def dual_blade_signs(self) -> torch.Tensor:\n \"\"\"Signs of the dual blades for each blade.\"\"\"\n return self._dual_blade_signs\n\n @property\n def num_blades(self) -> int:\n \"\"\"Total number of blades in the algebra.\"\"\"\n return self._num_blades\n\n @property\n def blade_degrees(self) -> torch.Tensor:\n \"\"\"List of blade-degree for each blade in the algebra.\"\"\"\n return self._blade_degrees\n\n @property\n def max_degree(self) -> int:\n \"\"\"Highest blade degree in the algebra.\"\"\"\n return self._max_degree\n\n @property\n def basis_mvs(self) -> torch.Tensor:\n \"\"\"List of basis vectors as torch.Tensor.\"\"\"\n return self._basis_mvs\n\n def get_kind_blade_indices(self, kind: BladeKind, invert: bool = False) -> torch.Tensor:\n \"\"\"Find all indices of blades of a given kind in the algebra.\n\n Args:\n kind: kind of blade to give indices for\n invert: whether to return all blades not of the kind\n\n Returns:\n indices of blades of a given kind in the algebra\n \"\"\"\n return get_blade_of_kind_indices(self.blade_degrees, kind, self.max_degree, invert=invert)\n\n def get_blade_indices_of_degree(self, degree: int) -> torch.Tensor:\n \"\"\"Find all indices of blades of the given degree.\n\n Args:\n degree: degree to return blades for\n\n Returns:\n indices of blades with the given degree in the algebra\n \"\"\"\n # return tf.gather(tf.range(self.num_blades), tf.where(self.blade_degrees == degree)[..., 0])\n return torch.range(self.num_blades)[torch.where(self.blade_degrees == degree)[..., 0]]\n\n def is_pure(self, tensor: torch.Tensor, blade_indices: torch.Tensor) -> bool:\n \"\"\"Returns whether the given tensor is purely of the given blades\n and has no non-zero values for blades not in the given blades.\n\n Args:\n tensor: tensor to check purity for\n blade_indices: blade indices to check purity for\n\n Returns:\n Whether the tensor is purely of the given blades\n and has no non-zero values for blades not in the given blades\n \"\"\"\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n tensor = tensor.to(dtype=torch.float32)\n if not type(blade_indices) in [torch.Tensor]:\n blade_indices = torch.tensor(blade_indices)\n \n blade_indices = blade_indices.to(dtype=torch.int64)\n\n # blade_indices = torch.tensor(\n # blade_indices, dtype=torch.int64)\n\n inverted_blade_indices = invert_blade_indices(\n self.num_blades, blade_indices)\n\n # return tf.reduce_all(tf.gather(\n # tensor,\n # inverted_blade_indices,\n # axis=-1\n # ) == 0)\n return (tensor[inverted_blade_indices]==0).sum(dim=-1)\n\n def is_pure_kind(self, tensor: torch.Tensor, kind: BladeKind) -> bool:\n \"\"\"Returns whether the given tensor is purely of a given kind\n and has no non-zero values for blades not of the kind.\n\n Args:\n tensor: tensor to check purity for\n kind: kind of blade to check purity for\n\n Returns:\n Whether the tensor is purely of a given kind\n and has no non-zero values for blades not of the kind\n \"\"\"\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n tensor = tensor.to(dtype=torch.float32)\n inverted_kind_indices = self.get_kind_blade_indices(kind, invert=True)\n # print(f\"tensor={tensor}\")\n # print(f\"kind={kind}\")\n # print(f\"inverted_kind_indices={inverted_kind_indices.T}\")\n # print(f\"inverted_kind_indices.shape={inverted_kind_indices.shape}\")\n # print(f\"tensor[inverted_kind_indices]={tensor[inverted_kind_indices].T}\")\n # print(f\"tensor[inverted_kind_indices].shape={tensor[inverted_kind_indices].shape}\")\n # print(f\"tensor[inverted_kind_indices]==0={tensor[inverted_kind_indices].T==0}\")\n\n # return tf.reduce_all(tf.gather(\n # tensor,\n # inverted_kind_indices,\n # axis=-1\n # ) == 0)\n return (tensor[inverted_kind_indices]==0).sum(dim=-1)\n\n # def from_tensor(self, tensor: torch.Tensor, blade_indices: torch.Tensor) -> torch.Tensor:\n # \"\"\"Creates a geometric algebra torch.Tensor from a torch.Tensor and blade\n # indices. The blade indices have to align with the last axis of the\n # tensor.\n\n # Args:\n # tensor: torch.Tensor to take as values for the geometric algebra tensor\n # blade_indices: Blade indices corresponding to the tensor. Can\n # be obtained from blade names eg. using get_kind_blade_indices()\n # or as indices from the blades list property.\n\n # Returns:\n # Geometric algebra torch.Tensor from tensor and blade indices\n # \"\"\"\n # blade_indices = torch.tensor(blade_indices, dtype=torch.int64).to(dtype=torch.int64)\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n # # print(f\"blade_indices={blade_indices}\")\n # # print(f\"tensor={tensor}\")\n \n # _shape = tensor.shape\n # is_scalar = False\n # if len(_shape)==1 :\n # _shape_final = [1]+ [self.num_blades] \n # is_scalar = True\n # else:\n # _shape_final = list(_shape[:-1]) + [self.num_blades] \n # b = torch.zeros(_shape_final)\n \n\n # # i = blade_indices.view([-1,1])\n # # v = tensor.flatten().view([-1,1])\n # i = blade_indices.nonzero().flatten()\n # v = tensor.flatten().unsqueeze(1)\n # b = b.view([-1,self.num_blades])\n # # b[:,i] = v\n # try:\n # b[:,i] = v\n # except:\n # print(f\"_shape={_shape},_shape_final={_shape_final}\")\n # print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n # print(f\"i={i},v={v},b={b}\")\n # raise\n # # raise \"whatever\"\n # b = b.reshape(_shape_final)\n\n # # _shape_tmp = list(v.shape) + [self.num_blades] \n # # print(f\"i,v,_shape_tmp,_shape_final={i},{v},{_shape_tmp},{_shape_final},i.shape={i.shape}\")\n # # b = torch.sparse_coo_tensor(i, v, size=_shape_tmp)\n # # print(f\"b={b}\")\n # # b = torch.sparse_coo_tensor(i, v, size=_shape_tmp).to_dense()\n # # b = b.reshape(_shape_final)\n # if is_scalar:\n # b=b.unsqueeze(0)\n # return b\n\n # # # Put last axis on first axis so scatter_nd becomes easier.\n # # # Later undo the transposition again.\n # # # t = tf.concat([[tensor.shape.ndims - 1],\n # # # tf.range(0, tensor.shape.ndims - 1)], axis=0)\n # # # t_inv = tf.concat([tf.range(1, tensor.shape.ndims), [0]], axis=0)\n\n # # # tensor = tf.transpose(tensor, t)\n\n # # # shape = tf.concat([\n # # # torch.tensor([self.num_blades], dtype=torch.int64),\n # # # tf.shape(tensor, torch.int64)[1:]\n # # # ], axis=0)\n\n # # # tensor = tf.scatter_nd(\n # # # tf.expand_dims(blade_indices, axis=-1),\n # # # tensor,\n # # # shape\n # # # )\n\n # # # return tf.transpose(tensor, t_inv)\n # # # t = torch.concat([torch.tensor([len(tensor.shape) - 1]), torch.range(0, len(tensor.shape)- 1)], axis=0)\n # # # t_inv = torch.concat([torch.range(1, len(tensor.shape)), torch.tensor([0])], axis=0)\n # # t = [len(tensor.shape) - 1] + list(range(0, len(tensor.shape)- 1))\n # # t_inv = list(range(1, len(tensor.shape))) + [0]\n\n # # tensor = torch.permute(tensor, t)\n\n # # a= torch.tensor([self.num_blades], dtype=torch.int64)\n # # b = torch.tensor(tensor, dtype=torch.int64)[1:]\n # # print(\"a,b:\", a,b, tensor)\n\n\n # # shape = torch.concat([\n # # torch.tensor([self.num_blades], dtype=torch.int64),\n # # torch.tensor(tensor, dtype=torch.int64)[1:]\n # # ], axis=0)\n\n\n # # # tensor = torch.scatter_nd(\n # # # blade_indices.unsqueeze(-1),\n # # # tensor,\n # # # shape\n # # # )\n # # a = torch.zeros(shape)\n # # a[blade_indices] = tensor\n # # tensor = a\n\n # # return torch.permute(tensor, t_inv) \n \n\n def from_tensor(self, tensor: torch.Tensor, blade_indices: torch.Tensor) -> torch.Tensor:\n \"\"\"Creates a geometric algebra torch.Tensor from a torch.Tensor and blade\n indices. The blade indices have to align with the last axis of the\n tensor.\n\n Args:\n tensor: torch.Tensor to take as values for the geometric algebra tensor\n blade_indices: Blade indices corresponding to the tensor. Can\n be obtained from blade names eg. using get_kind_blade_indices()\n or as indices from the blades list property.\n\n Returns:\n Geometric algebra torch.Tensor from tensor and blade indices\n \"\"\"\n # blade_indices = torch.tensor(blade_indices, dtype=torch.int64).to(dtype=torch.int64)\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n blade_indices = blade_indices.to(dtype=torch.int64)\n tensor = tensor.to(dtype=torch.float32)\n # print(f\"blade_indices={blade_indices}\")\n # print(f\"tensor={tensor}\")\n \n _shape = tensor.shape\n is_scalar = False\n if len(_shape)==1 :\n _shape_final = [1]+ [self.num_blades] \n is_scalar = True\n else:\n _shape_final = list(_shape[:-1]) + [self.num_blades] \n b = torch.zeros(_shape_final)\n\n if False:\n print(f\"blade_indices.shape={blade_indices.shape}\")\n print(f\"tensor.shape={tensor.shape}\")\n print(f\"_shape_final={_shape_final}\")\n \n\n\n # i = blade_indices.view([-1,1])\n # v = tensor.flatten().view([-1,1])\n # i = blade_indices.nonzero().flatten()\n i = blade_indices.flatten()\n # v = tensor.flatten().unsqueeze(1)\n v = tensor.view([-1,_shape[-1]])\n b = b.view([-1,self.num_blades])\n if False:\n print(f\"_shape={_shape},_shape_final={_shape_final}\")\n print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n print(f\"i={i},v={v},b={b}\")\n\n # b[:,i] = v\n try:\n b[:,i] = v\n except:\n print(f\"_shape={_shape},_shape_final={_shape_final}\")\n print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n print(f\"i={i},v={v},b={b}\")\n raise\n b = b.reshape(_shape_final)\n\n if False:\n print(f\"b.shape={b.shape}\")\n\n if is_scalar:\n # b=b.unsqueeze(0)\n b=b.squeeze(0)\n return b\n\n\n # # i = blade_indices.view([-1,1])\n # # v = tensor.flatten().view([-1,1])\n # i = blade_indices.nonzero().flatten()\n # v = tensor.flatten().unsqueeze(1)\n # b = b.view([-1,self.num_blades])\n # # b[:,i] = v\n # try:\n # b[:,i] = v\n # except:\n # print(f\"_shape={_shape},_shape_final={_shape_final}\")\n # print(f\"i.shape={i.shape},v.shape={v.shape},b.shape={b.shape}\")\n # print(f\"i={i},v={v},b={b}\")\n # raise\n # b = b.reshape(_shape_final)\n\n # if is_scalar:\n # b=b.unsqueeze(0)\n # return b\n\n \n\n def from_tensor_with_kind(self, tensor: torch.Tensor, kind: BladeKind) -> torch.Tensor:\n \"\"\"Creates a geometric algebra torch.Tensor from a torch.Tensor and a kind.\n The kind's blade indices have to align with the last axis of the\n tensor.\n\n Args:\n tensor: torch.Tensor to take as values for the geometric algebra tensor\n kind: Kind corresponding to the tensor\n\n Returns:\n Geometric algebra torch.Tensor from tensor and kind\n \"\"\"\n # Put last axis on first axis so scatter_nd becomes easier.\n # Later undo the transposition again.\n # tensor = torch.tensor(tensor, dtype=torch.float32)\n tensor = tensor.to(dtype=torch.float32)\n kind_indices = self.get_kind_blade_indices(kind)\n if False:\n print(f\"tensor={tensor}\")\n print(f\"kind_indices={kind_indices}\")\n return self.from_tensor(tensor, kind_indices)\n\n def from_scalar(self, scalar: numbers.Number) -> torch.Tensor:\n \"\"\"Creates a geometric algebra torch.Tensor with scalar elements.\n\n Args:\n scalar: Elements to be used as scalars\n\n Returns:\n Geometric algebra torch.Tensor from scalars\n \"\"\"\n # return self.from_tensor_with_kind(tf.expand_dims(scalar, axis=-1), BladeKind.SCALAR)\n # print(\"torch.tensor([scalar]).unsqueeze(-1).shape\",torch.tensor([scalar]).unsqueeze(-1).shape)\n return self.from_tensor_with_kind(torch.tensor([scalar]).unsqueeze(-1), BladeKind.SCALAR).squeeze(0)\n\n def e(self, *blades: List[str]) -> torch.Tensor:\n \"\"\"Returns a geometric algebra torch.Tensor with the given blades set\n to 1.\n\n Args:\n blades: list of blade names, can be unnormalized\n\n Returns:\n torch.Tensor with blades set to 1\n \"\"\"\n blade_signs, blade_indices = get_blade_indices_from_names(\n blades, self.blades)\n\n assert type(blade_indices) in [torch.Tensor], \"should be a tensor\"\n if False: blade_indices = torch.tensor(blade_indices)\n\n # # Don't allow duplicate indices\n # tf.Assert(\n # blade_indices.shape[0] == tf.unique(blade_indices)[0].shape[0],\n # [blades]\n # )\n\n # x = (\n # tf.expand_dims(blade_signs, axis=-1) *\n # tf.gather(self.blade_mvs, blade_indices)\n # )\n\n # # a, b -> b\n # return tf.reduce_sum(x, axis=-2)\n\n # print(f\"blade_indices={blade_indices}\")\n # print(f\"torch.unique(blade_indices)={torch.unique(blade_indices)}\")\n # print(f\"torch.unique(blade_indices)[0]={torch.unique(blade_indices)[0]}\")\n # Don't allow duplicate indices\n # assert(\n # blade_indices.shape[0] == torch.unique(blade_indices).shape[0],\n # [blades]\n # )\n assert blade_indices.shape[0] == torch.unique(blade_indices).shape[0], \"indexes not unique\"\n\n x = blade_signs.unsqueeze(-1) * self.blade_mvs[blade_indices]\n\n # a, b -> b\n return x.sum(dim=-2) \n\n def __getattr__(self, name: str) -> torch.Tensor:\n \"\"\"Returns basis blade tensors if name was a basis.\"\"\"\n if name.startswith(\"e\") and (name[1:] == \"\" or int(name[1:]) >= 0):\n return self.e(name[1:])\n raise AttributeError\n\n def dual(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the dual of the geometric algebra tensor.\n\n Args:\n tensor: Geometric algebra tensor to return dual for\n\n Returns:\n Dual of the geometric algebra tensor\n \"\"\"\n tensor = torch.tensor(tensor, dtype=torch.float32)\n # return self.dual_blade_signs * tf.gather(tensor, self.dual_blade_indices, axis=-1)\n return self.dual_blade_signs * tensor[...,self.dual_blade_indices]\n\n def grade_automorphism(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the geometric algebra tensor with odd grades negated.\n See https://en.wikipedia.org/wiki/Paravector#Grade_automorphism.\n\n Args:\n tensor: Geometric algebra tensor to return grade automorphism for\n\n Returns:\n Geometric algebra tensor with odd grades negated\n \"\"\"\n tensor = tensor.to(dtype=torch.float32)\n return mv_grade_automorphism(tensor, self.blade_degrees)\n\n def reversion(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the grade-reversed geometric algebra tensor.\n See https://en.wikipedia.org/wiki/Paravector#Reversion_conjugation.\n\n Args:\n tensor: Geometric algebra tensor to return grade-reversion for\n\n Returns:\n Grade-reversed geometric algebra tensor\n \"\"\"\n tensor = tensor.to(dtype=torch.float32)\n\n return mv_reversion(tensor, self.blade_degrees)\n\n def conjugation(self, tensor: torch.Tensor) -> torch.Tensor:\n \"\"\"Combines reversion and grade automorphism.\n See https://en.wikipedia.org/wiki/Paravector#Clifford_conjugation.\n\n Args:\n tensor: Geometric algebra tensor to return conjugate for\n\n Returns:\n Geometric algebra tensor after `reversion()` and `grade_automorphism()`\n \"\"\"\n tensor = tensor.to(dtype=torch.float32)\n return self.grade_automorphism(self.reversion(tensor))\n\n def simple_inverse(self, a: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the inverted geometric algebra tensor\n `X^-1` such that `X * X^-1 = 1`. Only works for elements that\n square to scalars. Faster than the general inverse.\n\n Args:\n a: Geometric algebra tensor to return inverse for\n\n Returns:\n inverted geometric algebra tensor\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n\n rev_a = self.reversion(a)\n divisor = self.geom_prod(a, rev_a)\n # print(f\"divisor={divisor}\")\n # print(f\"self.is_pure_kind(divisor, BladeKind.SCALAR)={self.is_pure_kind(divisor, BladeKind.SCALAR)}\")\n if not self.is_pure_kind(divisor, BladeKind.SCALAR):\n raise Exception(\n \"Can't invert multi-vector (inversion divisor V ~V not scalar: %s).\" % divisor)\n\n # Divide by scalar part\n return rev_a / divisor[..., :1]\n\n def reg_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the regressive product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the regressive product\n b: Geometric algebra tensor on the right hand side of\n the regressive product\n\n Returns:\n regressive product of a and b\n \"\"\"\n a = torch.tensor(a, dtype=torch.float32)\n b = torch.tensor(b, dtype=torch.float32)\n\n return self.dual(self.ext_prod(self.dual(a), self.dual(b)))\n\n def ext_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the exterior product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the exterior product\n b: Geometric algebra tensor on the right hand side of\n the exterior product\n\n Returns:\n exterior product of a and b\n \"\"\"\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n\n return mv_multiply(a, b, self._cayley_outer)\n\n def geom_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the geometric product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the geometric product\n b: Geometric algebra tensor on the right hand side of\n the geometric product\n\n Returns:\n geometric product of a and b\n \"\"\"\n # a = torch.tensor(a, dtype=torch.float32)\n # b = torch.tensor(b, dtype=torch.float32)\n\n # a = torch.tensor(a)\n # b = torch.tensor(b)\n\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n return mv_multiply(a, b, self._cayley)\n\n \n def element_wise_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the element-wise product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the geometric product\n b: Geometric algebra tensor on the right hand side of\n the geometric product\n\n Returns:\n geometric product of a and b\n \"\"\"\n # a = torch.tensor(a, dtype=torch.float32)\n # b = torch.tensor(b, dtype=torch.float32)\n\n # a = torch.tensor(a)\n # b = torch.tensor(b)\n\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n return mv_multiply_element_wise(a, b, self._cayley)\n\n\n def inner_prod(self, a: torch.Tensor, b: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the inner product of two geometric\n algebra tensors.\n\n Args:\n a: Geometric algebra tensor on the left hand side of\n the inner product\n b: Geometric algebra tensor on the right hand side of\n the inner product\n\n Returns:\n inner product of a and b\n \"\"\"\n a = a.to(dtype=torch.float32)\n b = b.to(dtype=torch.float32)\n\n return mv_multiply(a, b, self._cayley_inner)\n\n def geom_conv1d(self, a: torch.Tensor, k: torch.Tensor,\n stride: int, padding: str,\n dilations: Union[int, None] = None) -> torch.Tensor:\n \"\"\"Returns the 1D convolution of a sequence with a geometric algebra\n tensor kernel. The convolution is performed using the geometric\n product.\n\n Args:\n a: Input geometric algebra tensor of shape\n [..., Length, ChannelsIn, Blades]\n k: Geometric algebra tensor for the convolution kernel of shape\n [KernelSize, ChannelsIn, ChannelsOut, Blades]\n stride: Stride to use for the convolution\n padding: \"SAME\" (zero-pad input length so output\n length == input length / stride) or \"VALID\" (no padding)\n Returns:\n Geometric algbra tensor of shape\n [..., OutputLength, ChannelsOut, Blades]\n representing `a` convolved with `k`\n \"\"\"\n a = a.to(dtype=torch.float32)\n k = k.to(dtype=torch.float32)\n\n # return mv_conv1d(a, k, self._cayley, stride=stride, padding=padding)\n return f_mv_conv1d(a, k, self._cayley, stride=stride, padding=padding)\n\n def mv_repr(self, a: torch.Tensor) -> str:\n \"\"\"Returns a string representation for the given\n geometric algebra tensor.\n\n Args:\n a: Geometric algebra tensor to return the representation for\n\n Returns:\n string representation for `a`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n\n if len(a.shape) == 1:\n return \"MultiVector[%s]\" % \" + \".join(\n \"%.2f*%s\" % (value, get_blade_repr(blade_name))\n for value, blade_name\n in zip(a, self.blades)\n if value != 0\n )\n else:\n return f\"MultiVector[batch_shape={a.shape[:-1]}]\"\n\n def approx_exp(self, a: torch.Tensor, order: int = 50) -> torch.Tensor:\n \"\"\"Returns an approximation of the exponential using a centered taylor series.\n\n Args:\n a: Geometric algebra tensor to return exponential for\n order: order of the approximation\n\n Returns:\n Approximation of `exp(a)`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n v = self.from_scalar(1.0)\n result = self.from_scalar(1.0)\n for i in range(1, order + 1):\n v = self.geom_prod(a, v)\n # i_factorial = tf.exp(tf.math.lgamma(i + 1.0))\n i_factorial = torch.exp(torch.lgamma(torch.tensor([i + 1.0])))\n result += v / i_factorial\n return result\n\n def exp(self, a: torch.Tensor, square_scalar_tolerance: Union[float, None] = 1e-4) -> torch.Tensor:\n \"\"\"Returns the exponential of the passed geometric algebra tensor.\n Only works for multivectors that square to scalars.\n\n Args:\n a: Geometric algebra tensor to return exponential for\n square_scalar_tolerance: Tolerance to use for the square scalar check\n or None if the check should be skipped\n\n Returns:\n `exp(a)`\n \"\"\"\n # See https://www.euclideanspace.com/maths/algebra/clifford/algebra/functions/exponent/index.htm\n # for an explanation of how to exponentiate multivectors.\n\n self_sq = self.geom_prod(a, a)\n\n if square_scalar_tolerance is not None:\n # tf.Assert(tf.reduce_all(\n # tf.abs(self_sq[..., 1:]) < square_scalar_tolerance\n # ), [self_sq])\n \n # assert torch.equal(torch.all(self_sq[..., 1:].abs() < square_scalar_tolerance),[self_sq]), \"not sure what\"\n assert torch.all(self_sq[..., 1:].abs() < square_scalar_tolerance), \"square_scalar_tolerance not met\"\n\n scalar_self_sq = self_sq[..., :1]\n\n # \"Complex\" square root (argument can be negative)\n s_sqrt = torch.sign(scalar_self_sq) * torch.sqrt(torch.abs(scalar_self_sq))\n\n # Square to +1: cosh(sqrt(||a||)) + a / sqrt(||a||) sinh(sqrt(||a||))\n # Square to -1: cos(sqrt(||a||)) + a / sqrt(||a||) sin(sqrt(||a||))\n # TODO: Does this work for values other than 1 too? eg. square to +0.5?\n # TODO: Find a solution that doesnt require calculating all possibilities\n # first.\n non_zero_result = torch.where(\n scalar_self_sq < 0,\n (self.from_tensor(torch.cos(s_sqrt), torch.tensor([0])) + a / s_sqrt * torch.sin(s_sqrt)),\n (self.from_tensor(torch.cosh(s_sqrt), torch.tensor([0])) + a / s_sqrt * torch.sinh(s_sqrt))\n )\n\n return torch.where(scalar_self_sq == 0, self.from_scalar(1.0) + a, non_zero_result)\n\n def approx_log(self, a: torch.Tensor, order: int = 50) -> torch.Tensor:\n \"\"\"Returns an approximation of the natural logarithm using a centered\n taylor series. Only converges for multivectors where `||mv - 1|| < 1`.\n\n Args:\n a: Geometric algebra tensor to return logarithm for\n order: order of the approximation\n\n Returns:\n Approximation of `log(a)`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n result = self.from_scalar(0.0)\n\n a_minus_one = a - self.from_scalar(1.0)\n v = None\n\n for i in range(1, order + 1):\n v = a_minus_one if v is None else v * a_minus_one\n result += (((-1.0) ** i) / i) * v\n\n return -result\n\n def int_pow(self, a: torch.Tensor, n: int) -> torch.Tensor:\n \"\"\"Returns the geometric algebra tensor to the power of an integer\n using repeated multiplication.\n\n Args:\n a: Geometric algebra tensor to raise\n n: integer power to raise the multivector to\n\n Returns:\n `a` to the power of `n`\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n\n if not isinstance(n, int):\n raise Exception(\"n must be an integer.\")\n if n < 0:\n raise Exception(\"Can't raise to negative powers.\")\n\n if n == 0:\n # TODO: more efficient (ones only in scalar)\n return torch.ones_like(a) * self.e(\"\")\n\n result = a\n for i in range(n - 1):\n result = self.geom_prod(result, a)\n return result\n\n def keep_blades(self, a: torch.Tensor, blade_indices: List[int]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns it with only the given\n blade_indices as non-zeros.\n\n Args:\n a: Geometric algebra tensor to copy\n blade_indices: Indices for blades to keep\n\n Returns:\n `a` with only `blade_indices` components as non-zeros\n \"\"\"\n a = a.to(dtype=torch.float32)\n blade_indices = blade_indices.to(dtype=torch.int64)\n\n # blade_values = tf.gather(a, blade_indices, axis=-1)\n blade_values = a[...,blade_indices]\n if True: \n b = self.from_tensor(blade_values, blade_indices)\n else:\n blade_mask = torch.zeros(self.num_blades)\n blade_mask[blade_indices] = 1\n b = self.from_tensor(blade_values, blade_mask)\n # print(f\"blade_values, blade_indices, b={blade_values}, {blade_indices}, {b}\")\n # print(f\"blade_mask={blade_mask}\")\n return b\n\n # return self.from_tensor(blade_values, blade_indices)\n\n def keep_blades_with_name(self, a: torch.Tensor, blade_names: Union[List[str], str]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns it with only the given\n blades as non-zeros.\n\n Args:\n a: Geometric algebra tensor to copy\n blade_names: Blades to keep\n\n Returns:\n `a` with only `blade_names` components as non-zeros\n \"\"\"\n if isinstance(blade_names, str):\n blade_names = [blade_names]\n\n _, blade_indices = get_blade_indices_from_names(blade_names, self.blades)\n\n if False:\n print(f\"self.blades={self.blades}\")\n print(f\"blade_names={blade_names}\")\n print(f\"blade_indices={blade_indices}\")\n\n return self.keep_blades(a, blade_indices)\n\n def select_blades(self, a: torch.Tensor, blade_indices: List[int]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns a `torch.Tensor` with the\n blades in blade_indices on the last axis.\n\n\n Args:\n a: Geometric algebra tensor to copy\n blade_indices: Indices for blades to select\n\n Returns:\n `torch.Tensor` based on `a` with `blade_indices` on last axis.\n \"\"\"\n a = a.to(dtype=torch.float32) \n # blade_indices = torch.tensor(blade_indices, dtype=torch.int64).to(dtype=torch.int64)\n blade_indices = blade_indices.to(dtype=torch.int64)\n\n # result = tf.gather(a, blade_indices, axis=-1)\n try:\n if len(a.shape)==1 or a.shape[-1]==a.size().numel():\n result = a.squeeze()[blade_indices]\n else:\n result = a[...,blade_indices]\n except:\n print(f\"a={a},blade_indices={blade_indices}\")\n print(f\"a.shape={a.shape},blade_indices.shape={blade_indices.shape},a.size().numel()={a.size().numel()}\")\n raise\n \n return result\n\n def select_blades_with_name(self, a: torch.Tensor, blade_names: Union[List[str], str]) -> torch.Tensor:\n \"\"\"Takes a geometric algebra tensor and returns a `torch.Tensor` with the\n blades in blade_names on the last axis.\n\n\n Args:\n a: Geometric algebra tensor to copy\n blade_names: Blades to keep\n\n Returns:\n `torch.Tensor` based on `a` with `blade_names` on last axis.\n \"\"\"\n a = a.to(dtype=torch.float32)\n\n is_single_blade = isinstance(blade_names, str)\n if is_single_blade:\n blade_names = [blade_names]\n\n blade_signs, blade_indices = get_blade_indices_from_names(\n blade_names, self.blades)\n\n result = blade_signs * self.select_blades(a, blade_indices)\n # if True:\n # print(f\"\")\n\n if is_single_blade:\n return result[..., 0]\n\n return result\n\n def inverse(self, a: torch.Tensor) -> torch.Tensor:\n \"\"\"Returns the inverted geometric algebra tensor\n `X^-1` such that `X * X^-1 = 1`.\n\n Using Shirokov's inverse algorithm that works in arbitrary dimensions,\n see https://arxiv.org/abs/2005.04015 Theorem 4.\n\n Args:\n a: Geometric algebra tensor to return inverse for\n\n Returns:\n inverted geometric algebra tensor\n \"\"\"\n # a = torch.tensor(a, dtype=torch.float32)\n a = a.to(dtype=torch.float32)\n if False:\n print(f\"a={a}\")\n\n n = 2 ** ((len(self.metric) + 1) // 2)\n\n # u = a.clone()\n u = a\n for k in range(1, n):\n # c = n / k * self.keep_blades_with_name(u, \"\")\n d = self.keep_blades_with_name(u, \"\")\n c = n / k * d\n u_minus_c = u - c\n if False:\n print(f\"a,d,c,u_minus_c, u = {a},{d},{c},{u_minus_c}, {u}\")\n u = self.geom_prod(a, u_minus_c)\n if False:\n print(f\"u={u}\")\n \n if False:\n print(f\"n={n}\")\n print(f\"a={a}\")\n print(f\"u={u}\")\n if not torch.all(self.is_pure_kind(u, BladeKind.SCALAR)):\n raise Exception(\n \"Can't invert multi-vector (det U not scalar: %s).\" % u)\n\n # adj / det\n return u_minus_c / u[..., :1]\n\n def __call__(self, a: torch.Tensor) -> MultiVector:\n \"\"\"Creates a `MultiVector` from a geometric algebra tensor.\n Mainly used as a wrapper for the algebra's functions for convenience.\n\n Args:\n a: Geometric algebra tensor to return `MultiVector` for\n\n Returns:\n `MultiVector` for `a`\n \"\"\"\n a = a.to(dtype=torch.float32)\n return MultiVector(a, self)\n # return MultiVector(torch.tensor(a), self)" } ]
import unittest as ut import h5py import torch import torch.nn as nn import torch.nn.functional as F import torch from io import BytesIO from torch_ga.layers import ( GeometricProductDense, GeometricSandwichProductDense, GeometricProductElementwise, GeometricSandwichProductElementwise, GeometricProductConv1D, GeometricAlgebraExp, GeometricToTensor, GeometricToTensorWithKind, TensorToGeometric, TensorWithKindToGeometric, ) from torch_ga.blades import BladeKind from torch_ga import GeometricAlgebra
18,943
self.assertEqual(result.shape[-1], len(scalar_bivector_blade_indices)) def test_geometric_sandwich_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] result_indices = torch.concat([ sta.get_kind_blade_indices(BladeKind.VECTOR), sta.get_kind_blade_indices(BladeKind.TRIVECTOR) ], axis=0) geom_prod_layer = GeometricSandwichProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=result_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ) result = geom_prod_layer(geom_tensor) print(f"test_geometric_sandwich_product_dense_v_v:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # vector * vector * ~vector + vector -> vector + trivector self.assertTrue(torch.all(sta.is_pure(result, result_indices))) class TestKerasLayersSerializable(ut.TestCase): def assertTensorsEqual(self, a, b): # self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) assert torch.all(a == b), "%s not equal to %s" % (a, b) def _test_layer_serializable(self, layer, inputs): # Create algebra algebra = layer.algebra # Create model model = nn.Sequential(*[layer]) # Predict on inputs to compare later layer.build(inputs.shape) model_output = model(inputs) # Serialize model to virtual file # model_file = h5py.File(BytesIO(), mode="w") # model.save(model_file) model_file = "./test_model.ph" torch.save(model.state_dict(), model_file) # Load model from stream # loaded_model = tf.keras.models.load_model(model_file) device = torch.device('cpu') loaded_model = nn.Sequential(*[layer]) loaded_model.load_state_dict(torch.load(model_file, map_location=device)) # Predict on same inputs as before loaded_output = loaded_model(inputs) # Check same output for original and loaded model self.assertTensorsEqual(model_output, loaded_output) # Check same recreated algebra self.assertTensorsEqual( # algebra.metric, loaded_model.layers[0].algebra.metric algebra.metric, loaded_model[0].algebra.metric ) self.assertTensorsEqual( # algebra.cayley, loaded_model.layers[0].algebra.cayley algebra.cayley, loaded_model[0].algebra.cayley ) def test_geom_dense_serializable(self): # Create algebra sta = GeometricAlgebra([1, -1, -1, -1]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) torch.manual_seed(0) # Create model self._test_layer_serializable(GeometricProductDense( sta, units=8, blade_indices_kernel=mv_blade_indices, blade_indices_bias=vector_blade_indices ), torch.randn(*[3, 6, sta.num_blades])) def test_sandwich_dense_serializable(self): # Create algebra sta = GeometricAlgebra([1, -1, -1, -1]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) # Create model torch.manual_seed(0) self._test_layer_serializable(GeometricSandwichProductDense( sta, units=8, blade_indices_kernel=mv_blade_indices, blade_indices_bias=vector_blade_indices ), torch.randn([3, 6, sta.num_blades])) def test_geom_elementwise_serializable(self): # Create algebra sta = GeometricAlgebra([1, -1, -1, -1]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) # Create model torch.manual_seed(0)
torch.manual_seed(0) class TestKerasLayers(ut.TestCase): def assertTensorsEqual(self, a, b): # self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) print(f"assertTensorsEqual(a={a},b={b})") assert torch.all(a.squeeze() == b.squeeze()), "%s not equal to %s" % (a, b) def test_tensor_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_to_geom_layer = TensorToGeometric(sta, vector_blade_indices) self.assertTensorsEqual(tensor_to_geom_layer(tensor), gt_geom_tensor) def test_tensor_with_kind_to_geometric(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([32, 4]) gt_geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] tensor_kind_to_geom_layer = TensorWithKindToGeometric( sta, BladeKind.VECTOR) self.assertTensorsEqual( tensor_kind_to_geom_layer(tensor), gt_geom_tensor) def test_geometric_to_tensor(self): sta = GeometricAlgebra([1, -1, -1, -1]) gt_tensor = torch.ones([32, 4]) geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_to_tensor_layer = GeometricToTensor(sta, vector_blade_indices) self.assertTensorsEqual(geom_to_tensor_layer(geom_tensor), gt_tensor) def test_geometric_to_tensor_with_kind(self): sta = GeometricAlgebra([1, -1, -1, -1]) gt_tensor = torch.ones([32, 4]) geom_tensor = torch.concat( [torch.zeros([32, 1]), torch.ones([32, 4]), torch.zeros([32, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_to_tensor_kind_layer = GeometricToTensorWithKind( sta, BladeKind.VECTOR) self.assertTensorsEqual( geom_to_tensor_kind_layer(geom_tensor), gt_tensor) def test_geometric_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] geom_prod_layer = GeometricProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=vector_blade_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ) result = geom_prod_layer(geom_tensor) print(f"test_geometric_product_dense_v_v:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # vector * vector + vector -> scalar + bivector + vector expected_result_indices = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) self.assertTrue(torch.all(sta.is_pure(result, expected_result_indices))) def test_geometric_product_dense_s_mv(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.ones([20, 6, 1]), torch.zeros([20, 6, 15])], axis=-1 ) mv_blade_indices = list(range(16)) geom_prod_layer = GeometricProductDense( sta, 8, blade_indices_kernel=mv_blade_indices, blade_indices_bias=mv_blade_indices ) geom_prod_layer.build(geom_tensor.shape) result = geom_prod_layer(geom_tensor) print(f"test_geometric_product_dense_s_mv:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # scalar * multivector + multivector -> multivector # Check that nothing is zero (it would be extremely unlikely # but not impossible to randomly get a zero here). assert torch.all(result != 0.0) # self.assertTrue(tf.reduce_all(result != 0.0)) def test_geometric_product_dense_sequence(self): sta = GeometricAlgebra([1, -1, -1, -1]) tensor = torch.ones([20, 6, 4]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) # vector * vector + vector -> scalar + bivector + vector scalar_bivector_blade_indices = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] sequence = nn.Sequential(*[ TensorToGeometric(sta, blade_indices=vector_blade_indices), GeometricProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=vector_blade_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ), GeometricToTensor(sta, blade_indices=scalar_bivector_blade_indices) ]) for e in sequence: e.build(tensor.shape) result = sequence(tensor) print(f"test_geometric_product_dense_sequence:") print(f"tensor={tensor}") print(f"result={result}") self.assertEqual(result.shape[-1], len(scalar_bivector_blade_indices)) def test_geometric_sandwich_product_dense_v_v(self): sta = GeometricAlgebra([1, -1, -1, -1]) geom_tensor = torch.concat( [torch.zeros([32, 6, 1]), torch.ones([32, 6, 4]), torch.zeros([32, 6, 11])], axis=-1 ) vector_blade_indices = [1, 2, 3, 4] result_indices = torch.concat([ sta.get_kind_blade_indices(BladeKind.VECTOR), sta.get_kind_blade_indices(BladeKind.TRIVECTOR) ], axis=0) geom_prod_layer = GeometricSandwichProductDense( sta, 8, blade_indices_kernel=vector_blade_indices, blade_indices_bias=result_indices, # bias_initializer=tf.keras.initializers.RandomNormal() ) result = geom_prod_layer(geom_tensor) print(f"test_geometric_sandwich_product_dense_v_v:") print(f"geom_tensor={geom_tensor}") print(f"result={result}") # vector * vector * ~vector + vector -> vector + trivector self.assertTrue(torch.all(sta.is_pure(result, result_indices))) class TestKerasLayersSerializable(ut.TestCase): def assertTensorsEqual(self, a, b): # self.assertTrue(tf.reduce_all(a == b), "%s not equal to %s" % (a, b)) assert torch.all(a == b), "%s not equal to %s" % (a, b) def _test_layer_serializable(self, layer, inputs): # Create algebra algebra = layer.algebra # Create model model = nn.Sequential(*[layer]) # Predict on inputs to compare later layer.build(inputs.shape) model_output = model(inputs) # Serialize model to virtual file # model_file = h5py.File(BytesIO(), mode="w") # model.save(model_file) model_file = "./test_model.ph" torch.save(model.state_dict(), model_file) # Load model from stream # loaded_model = tf.keras.models.load_model(model_file) device = torch.device('cpu') loaded_model = nn.Sequential(*[layer]) loaded_model.load_state_dict(torch.load(model_file, map_location=device)) # Predict on same inputs as before loaded_output = loaded_model(inputs) # Check same output for original and loaded model self.assertTensorsEqual(model_output, loaded_output) # Check same recreated algebra self.assertTensorsEqual( # algebra.metric, loaded_model.layers[0].algebra.metric algebra.metric, loaded_model[0].algebra.metric ) self.assertTensorsEqual( # algebra.cayley, loaded_model.layers[0].algebra.cayley algebra.cayley, loaded_model[0].algebra.cayley ) def test_geom_dense_serializable(self): # Create algebra sta = GeometricAlgebra([1, -1, -1, -1]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) torch.manual_seed(0) # Create model self._test_layer_serializable(GeometricProductDense( sta, units=8, blade_indices_kernel=mv_blade_indices, blade_indices_bias=vector_blade_indices ), torch.randn(*[3, 6, sta.num_blades])) def test_sandwich_dense_serializable(self): # Create algebra sta = GeometricAlgebra([1, -1, -1, -1]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) # Create model torch.manual_seed(0) self._test_layer_serializable(GeometricSandwichProductDense( sta, units=8, blade_indices_kernel=mv_blade_indices, blade_indices_bias=vector_blade_indices ), torch.randn([3, 6, sta.num_blades])) def test_geom_elementwise_serializable(self): # Create algebra sta = GeometricAlgebra([1, -1, -1, -1]) vector_blade_indices = [1, 2, 3, 4] mv_blade_indices = list(range(16)) # Create model torch.manual_seed(0)
self._test_layer_serializable(GeometricProductElementwise(
2
2023-10-07 13:34:07+00:00
24k
Significant-Gravitas/autostandup
bot.py
[ { "identifier": "StreaksDB", "path": "streaks/streaks_db.py", "snippet": "class StreaksDB(BaseDB):\n \"\"\"\n StreaksDB class handles all operations related to the 'streaks' table.\n Inherits from the BaseDB class.\n \"\"\"\n\n def __init__(self, host, user, password, database, port):\n \"\"\"\n Initializes the StreaksDB class and creates the 'streaks' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_streaks_table()\n\n def _create_streaks_table(self):\n \"\"\"\n Creates the 'streaks' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS streaks (\n discord_id BIGINT PRIMARY KEY,\n current_streak INT DEFAULT 0,\n FOREIGN KEY (discord_id) REFERENCES team_members(discord_id) ON DELETE CASCADE\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def update_streak(self, discord_id: int, new_streak: int):\n \"\"\"\n Updates the streak for a given user.\n\n :param discord_id: The Discord ID of the user.\n :param new_streak: The new streak count.\n \"\"\"\n query = \"\"\"\n INSERT INTO streaks (discord_id, current_streak)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE current_streak = %s\n \"\"\"\n params = (discord_id, new_streak, new_streak)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def get_streak(self, discord_id: int) -> int:\n \"\"\"\n Fetches the current streak for a given user.\n\n :param discord_id: The Discord ID of the user.\n :return: The current streak count.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n c = self.conn.cursor()\n query = \"SELECT current_streak FROM streaks WHERE discord_id = %s\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n row = c.fetchone()\n return row[0] if row else 0\n finally:\n c.close()\n self.close()" }, { "identifier": "TeamMemberDB", "path": "team_members/team_member_db.py", "snippet": "class TeamMemberDB(BaseDB):\n \"\"\"\n TeamMemberDB class handles operations related to the 'team_members' table.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the TeamMemberDB class and creates the 'team_members' table if it doesn't exist.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_team_members_table()\n\n def _create_team_members_table(self):\n \"\"\"\n Creates the 'team_members' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS team_members (\n discord_id BIGINT PRIMARY KEY,\n name VARCHAR(255) NOT NULL,\n time_zone VARCHAR(50) NOT NULL,\n github_username VARCHAR(255),\n on_vacation BOOLEAN DEFAULT FALSE\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def insert_new_member(self, discord_id: int, name: str, time_zone: str, github_username: str):\n \"\"\"\n Inserts a new team member into the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param name: The name of the team member.\n :param time_zone: The time zone of the team member.\n :param github_username: The GitHub username of the team member.\n \"\"\"\n query = \"\"\"\n INSERT INTO team_members (discord_id, name, time_zone, github_username)\n VALUES (%s, %s, %s, %s)\n ON DUPLICATE KEY UPDATE name = %s, time_zone = %s, github_username = %s\n \"\"\"\n params = (discord_id, name, time_zone, github_username, name, time_zone, github_username)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def remove_member(self, discord_id: int):\n \"\"\"\n Removes a team member from the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member to remove.\n \"\"\"\n query = \"DELETE FROM team_members WHERE discord_id = %s\"\n params = (discord_id,)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def list_all_members(self) -> List[Tuple[int, str, str, str, bool]]:\n \"\"\"\n Fetches all team members from the 'team_members' table.\n\n :return: A list of tuples, each containing the Discord ID, name, time zone, GitHub username, and vacation status of a team member.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n c = self.conn.cursor()\n try:\n c.execute(\"SELECT discord_id, name, time_zone, github_username, on_vacation FROM team_members\")\n return c.fetchall()\n finally:\n c.close()\n self.close()\n\n def update_member_timezone(self, discord_id: int, new_time_zone: str):\n \"\"\"\n Updates the timezone of a team member in the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param new_time_zone: The new timezone to be set for the team member.\n \"\"\"\n query = \"UPDATE team_members SET time_zone = %s WHERE discord_id = %s\"\n params = (new_time_zone, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def set_vacation_status(self, discord_id: int, on_vacation: bool):\n \"\"\"\n Sets the vacation status of a team member in the 'team_members' table.\n\n :param discord_id: The Discord ID of the team member.\n :param on_vacation: The vacation status to be set for the team member.\n \"\"\"\n query = \"UPDATE team_members SET on_vacation = %s WHERE discord_id = %s\"\n params = (on_vacation, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()" }, { "identifier": "UpdatesDB", "path": "updates/updates_db.py", "snippet": "class UpdatesDB(BaseDB):\n \"\"\"\n Database class for handling operations related to the 'updates' table.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the UpdatesDB class and creates the 'updates' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_updates_table()\n\n def _create_updates_table(self):\n \"\"\"\n Creates the 'updates' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS updates (\n id INT AUTO_INCREMENT PRIMARY KEY,\n discord_id BIGINT,\n status TEXT NOT NULL,\n summarized_status TEXT,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP,\n time_zone VARCHAR(255),\n FOREIGN KEY (discord_id) REFERENCES team_members(discord_id) ON DELETE CASCADE\n )\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def insert_status(self, discord_id: int, status: str, time_zone: str):\n \"\"\"\n Inserts a new status update into the 'updates' table.\n\n :param discord_id: The Discord ID of the team member.\n :param status: The status update.\n :param time_zone: The time zone of the user.\n \"\"\"\n # Convert current UTC time to user's local time zone\n utc_now = datetime.utcnow().replace(tzinfo=pytz.utc)\n local_now = utc_now.astimezone(pytz.timezone(time_zone))\n\n query = \"INSERT INTO updates (discord_id, status, timestamp, time_zone) VALUES (%s, %s, %s, %s)\"\n params = (discord_id, status, local_now, time_zone)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n\n def update_summarized_status(self, discord_id: int, summarized_status: str):\n \"\"\"\n Updates the summarized_status for the most recent update for a given user.\n\n :param discord_id: The Discord ID of the team member.\n :param summarized_status: The summarized status update.\n \"\"\"\n query = \"\"\"\n UPDATE updates\n SET summarized_status = %s\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n params = (summarized_status, discord_id)\n try:\n self.execute_query(query, params)\n finally:\n self.close()\n \n def get_weekly_checkins_count(self, discord_id: int, time_zone: str) -> int:\n \"\"\"\n Fetches the number of check-ins for a given user in the current week.\n\n :param discord_id: The Discord ID of the user.\n :param time_zone: The time zone of the user.\n :return: The count of check-ins in the current week.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n # Adjusting the current time to the user's time zone\n local_tz = pytz.timezone(time_zone)\n local_now = datetime.now(local_tz)\n \n # Getting the Monday of the current week in the user's time zone\n monday = local_now - timedelta(days=local_now.weekday())\n monday = monday.replace(hour=0, minute=0, second=0, microsecond=0)\n\n query = \"\"\"\n SELECT COUNT(*) FROM updates\n WHERE discord_id = %s AND timestamp >= %s\n \"\"\"\n params = (discord_id, monday)\n try:\n c.execute(query, params)\n \n row = c.fetchone()\n return row[0] if row else 0\n finally:\n c.close()\n self.close()\n\n def get_statuses_in_date_range(self, discord_id: int, start_date: datetime, end_date: datetime) -> List[str]:\n \"\"\"\n Fetches all raw status updates for a given user within a specified date range.\n\n Args:\n discord_id: The Discord ID of the user.\n start_date: The start date of the date range.\n end_date: The end date of the date range.\n\n Returns:\n A list of raw status updates.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n query = \"\"\"\n SELECT summarized_status FROM updates\n WHERE discord_id = %s AND timestamp >= %s AND timestamp <= %s\n \"\"\"\n params = (discord_id, start_date, end_date)\n try:\n c.execute(query, params)\n \n statuses = [row[0] for row in c.fetchall()]\n return statuses\n finally:\n c.close()\n self.close()\n \n def get_all_statuses_for_user(self, discord_id: int) -> List[dict]:\n \"\"\"\n Fetches all status updates (both raw and summarized) for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A list of dictionaries, each containing the status update details for a given record.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor(dictionary=True) # Set dictionary=True to return results as dictionaries\n \n query = \"\"\"\n SELECT id, discord_id, status, summarized_status, timestamp \n FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n \"\"\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n \n statuses = c.fetchall()\n return statuses\n finally:\n c.close()\n self.close()\n \n def get_last_update_timestamp(self, discord_id: int) -> Tuple[datetime, str]:\n \"\"\"\n Fetches the timestamp and time zone of the last status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A tuple containing the timestamp of the last update and its time zone, or (None, None) if there are no updates.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n query = \"\"\"\n SELECT timestamp, time_zone FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n params = (discord_id,)\n try:\n c.execute(query, params)\n \n row = c.fetchone()\n return (row[0], row[1]) if row else (None, None)\n finally:\n c.close()\n self.close()\n \n def delete_newest_status(self, discord_id: int) -> None:\n \"\"\"\n Deletes the most recent status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n \n # Fetch the ID of the newest status update for the given user\n query_get_id = \"\"\"\n SELECT id FROM updates\n WHERE discord_id = %s\n ORDER BY timestamp DESC\n LIMIT 1\n \"\"\"\n try:\n c.execute(query_get_id, (discord_id,))\n \n row = c.fetchone()\n if row:\n status_id = row[0]\n \n # Now, delete the status update using its ID\n query_delete = \"\"\"\n DELETE FROM updates WHERE id = %s\n \"\"\"\n c.execute(query_delete, (status_id,))\n \n self.conn.commit()\n finally:\n c.close()\n self.close()" }, { "identifier": "WeeklyPostsDB", "path": "weekly_posts/weekly_posts_db.py", "snippet": "class WeeklyPostsDB(BaseDB):\n \"\"\"\n Database class that handles operations related to the 'weekly_posts' table.\n \"\"\"\n\n def __init__(self, host: str, user: str, password: str, database: str, port: str):\n \"\"\"\n Initializes the WeeklyPostsDB class, connects to the MySQL database,\n and creates the 'weekly_posts' table if it doesn't exist.\n\n :param host: The MySQL host address.\n :param user: The MySQL user.\n :param password: The MySQL password.\n :param database: The MySQL database name.\n :param port: The MySQL port number.\n \"\"\"\n super().__init__(host, user, password, database, port)\n self._create_weekly_posts_table()\n\n def _create_weekly_posts_table(self):\n \"\"\"\n Creates the 'weekly_posts' table if it doesn't already exist.\n \"\"\"\n query = '''\n CREATE TABLE IF NOT EXISTS weekly_posts (\n post_id BIGINT PRIMARY KEY,\n timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP\n );\n '''\n try:\n self.execute_query(query)\n finally:\n self.close()\n\n def get_weekly_post_data(self) -> Optional[Dict[str, datetime.datetime]]:\n \"\"\"\n Fetches the most recent weekly post data from the 'weekly_posts' table.\n\n :return: A dictionary containing the post ID and timestamp, or None if no data exists.\n \"\"\"\n query = \"SELECT post_id, timestamp FROM weekly_posts ORDER BY timestamp DESC LIMIT 1\"\n \n if not self.conn.is_connected():\n print(\"Reconnecting to MySQL\")\n self.connect()\n\n c = self.conn.cursor()\n try:\n c.execute(query)\n row = c.fetchone()\n\n if row:\n return {'post_id': row[0], 'timestamp': row[1]}\n return None\n finally:\n c.close()\n self.close()\n\n def save_weekly_post_data(self, post_id: int, timestamp: datetime.datetime):\n \"\"\"\n Inserts or updates the weekly post data in the 'weekly_posts' table.\n\n :param post_id: The ID of the weekly post.\n :param timestamp: The timestamp of the weekly post.\n \"\"\"\n query = \"\"\"\n INSERT INTO weekly_posts (post_id, timestamp)\n VALUES (%s, %s)\n ON DUPLICATE KEY UPDATE timestamp = %s\n \"\"\"\n params = (post_id, timestamp, timestamp)\n try:\n self.execute_query(query, params)\n finally:\n self.close()" }, { "identifier": "StreaksManager", "path": "streaks/streaks_manager.py", "snippet": "class StreaksManager:\n \"\"\"\n Manages the streaks for team members.\n \"\"\"\n \n def __init__(self, streaks_db: StreaksDB):\n \"\"\"\n Initializes a new StreaksManager instance.\n\n Args:\n streaks_db: The StreaksDB object that handles database operations.\n \"\"\"\n self.streaks_db = streaks_db\n \n def get_streak(self, discord_id: int) -> int:\n \"\"\"\n Fetches the current streak for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n The current streak count.\n \"\"\"\n return self.streaks_db.get_streak(discord_id)\n\n def update_streak(self, discord_id: int, new_streak: int):\n \"\"\"\n Updates the streak for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n new_streak: The new streak count.\n \"\"\"\n self.streaks_db.update_streak(discord_id, new_streak)\n \n def reset_streak(self, discord_id: int):\n \"\"\"\n Resets the streak for a given user to zero.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n self.streaks_db.update_streak(discord_id, 0)" }, { "identifier": "TeamMemberManager", "path": "team_members/team_member_manager.py", "snippet": "class TeamMemberManager:\n \"\"\"\n Manages operations related to team members.\n \"\"\"\n\n def __init__(self, db: TeamMemberDB):\n \"\"\"\n Initialize a TeamMemberManager object.\n\n :param db: TeamMemberDB object for interacting with the database.\n \"\"\"\n self.db = db\n self.team_members = self.load_team_members()\n\n def load_team_members(self) -> List[TeamMember]:\n \"\"\"\n Load team members from the MySQL database into a list of TeamMember objects.\n\n :return: List of TeamMember objects.\n \"\"\"\n team_members = []\n members_data = self.db.list_all_members()\n\n for member_data in members_data:\n member = TeamMember(\n discord_id=member_data[0],\n time_zone=member_data[2],\n name=member_data[1],\n github_username=member_data[3],\n on_vacation=member_data[4]\n )\n team_members.append(member)\n\n return team_members\n\n def find_member(self, discord_id: int) -> TeamMember:\n \"\"\"\n Find and return a team member by their Discord ID.\n\n :param discord_id: The Discord ID of the team member.\n :return: A TeamMember object if found, otherwise None.\n \"\"\"\n for member in self.team_members:\n if member.discord_id == discord_id:\n return member\n return None\n\n def add_member(self, discord_id: int, name: str, time_zone: str, github_username: str):\n \"\"\"\n Add a new team member to the list and the database.\n\n :param discord_id: The Discord ID of the new member.\n :param name: The name of the new member.\n :param time_zone: The time zone of the new member.\n :param github_username: The GitHub username of the new member.\n \"\"\"\n new_member = TeamMember(discord_id, time_zone, name, github_username)\n self.db.insert_new_member(discord_id, name, time_zone, github_username)\n self.team_members.append(new_member)\n\n def remove_member(self, discord_id: int):\n \"\"\"\n Remove a team member from the list and the database.\n\n :param discord_id: The Discord ID of the member to remove.\n \"\"\"\n self.db.remove_member(discord_id)\n self.team_members = [member for member in self.team_members if member.discord_id != discord_id]\n\n def update_member_timezone(self, discord_id: int, new_time_zone: str):\n \"\"\"\n Update the timezone of a team member in the database and the list.\n\n :param discord_id: The Discord ID of the member to update.\n :param new_time_zone: The new timezone string to set for the member.\n \"\"\"\n # Update the timezone in the database\n self.db.update_member_timezone(discord_id, new_time_zone)\n\n # Find the member in the team_members list and update their timezone\n member = self.find_member(discord_id)\n if member:\n member.time_zone = new_time_zone\n\n def set_member_vacation_status(self, discord_id: int, on_vacation: bool):\n \"\"\"\n Sets the vacation status of a team member.\n\n :param discord_id: The Discord ID of the team member.\n :param on_vacation: The vacation status to be set for the team member.\n \"\"\"\n # Update the vacation status in the database\n self.db.set_vacation_status(discord_id, on_vacation)\n\n # Find the member in the team_members list and update their vacation status\n member = self.find_member(discord_id)\n if member:\n member.on_vacation = on_vacation" }, { "identifier": "UpdatesManager", "path": "updates/updates_manager.py", "snippet": "class UpdatesManager:\n \"\"\"\n Manages status updates for team members.\n \"\"\"\n\n def __init__(self, updates_db: UpdatesDB):\n \"\"\"\n Initializes a new UpdatesManager instance.\n\n Args:\n updates_db: The UpdatesDB object that handles database operations.\n \"\"\"\n self.updates_db = updates_db\n\n def insert_status(self, discord_id: int, status: str, time_zone: str):\n \"\"\"\n Inserts a new status update.\n\n Args:\n discord_id: The Discord ID of the team member.\n status: The status update.\n \"\"\"\n self.updates_db.insert_status(discord_id, status, time_zone)\n\n def update_summarized_status(self, discord_id: int, summarized_status: str):\n \"\"\"\n Updates the summarized status for the most recent update for a given user.\n\n Args:\n discord_id: The Discord ID of the team member.\n summarized_status: The summarized status update.\n \"\"\"\n self.updates_db.update_summarized_status(discord_id, summarized_status)\n\n def get_weekly_checkins_count(self, discord_id: int, time_zone: str) -> int:\n \"\"\"\n Fetches the number of check-ins for a given user in the current week.\n\n Args:\n discord_id: The Discord ID of the user.\n time_zone: The time zone of the user.\n\n Returns:\n The count of check-ins in the current week.\n \"\"\"\n return self.updates_db.get_weekly_checkins_count(discord_id, time_zone)\n \n def get_all_statuses_for_user(self, discord_id: int) -> List[dict]:\n \"\"\"\n Fetches all status updates (both raw and summarized) for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A list of dictionaries, each containing the status update details for a given record.\n \"\"\"\n return self.updates_db.get_all_statuses_for_user(discord_id)\n\n def get_last_update_timestamp(self, discord_id: int) -> Tuple[datetime, str]:\n \"\"\"\n Fetches the timestamp and time zone of the last status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n\n Returns:\n A tuple containing the timestamp of the last update and its time zone, or (None, None) if there are no updates.\n \"\"\"\n return self.updates_db.get_last_update_timestamp(discord_id)\n\n def delete_newest_status(self, discord_id: int) -> None:\n \"\"\"\n Deletes the most recent status update for a given user.\n\n Args:\n discord_id: The Discord ID of the user.\n \"\"\"\n self.updates_db.delete_newest_status(discord_id)\n\n async def generate_daily_summary(self, user_message: str) -> str:\n \"\"\"\n Generates a daily summary of the user's message using a large language model.\n\n Args:\n user_message: The user's message that needs to be summarized.\n\n Returns:\n The summarized message.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"Please summarize the user's update into two sections: 'Did' for tasks completed yesterday and 'Do' for tasks planned for today.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": user_message}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n \n except Exception as e:\n print(f\"An error occurred while generating the summary: {e}\")\n return \"Error in generating summary\"\n\n async def generate_weekly_summary(self, discord_id: int, start_date: datetime, end_date: datetime) -> str:\n \"\"\"\n Generates a weekly summary of the user's status updates using a large language model.\n\n Args:\n discord_id: The Discord ID of the user.\n start_date: The start date of the date range.\n end_date: The end date of the date range.\n\n Returns:\n The summarized weekly status update.\n \"\"\"\n # Fetch all raw status updates for the specified date range using the new method in UpdatesDB\n weekly_statuses = self.updates_db.get_statuses_in_date_range(discord_id, start_date, end_date)\n\n if not weekly_statuses:\n return \"There are no status updates for this week.\"\n \n # Combine all raw statuses into a single string\n combined_statuses = \"\\n\".join(weekly_statuses)\n \n # Prepare a system message to guide OpenAI's model for weekly summary\n system_message = \"Please generate a comprehensive weekly summary based on the provided daily status updates, including only tasks that have been accomplished. Ignore tasks that are not in the 'Did' section.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": combined_statuses}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-4-0613\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n weekly_summary = response['choices'][0]['message']['content'].strip()\n\n return weekly_summary\n \n except Exception as e:\n print(f\"An error occurred while generating the weekly summary: {e}\")\n return \"Error in generating weekly summary\"\n \n async def summarize_technical_updates(self, commit_messages: List[str]) -> str:\n \"\"\"\n Summarizes the technical updates based on commit messages.\n\n Args:\n commit_messages: List of commit messages for the day.\n\n Returns:\n A summarized version of the technical updates.\n \"\"\"\n\n # Combine commit messages into a single string for the LLM\n combined_commits = \"\\n\".join(commit_messages)\n\n # If there are no commit messages, return a default message\n if not combined_commits:\n return \"No technical updates found based on commit messages.\"\n\n # Summarization using LLM\n system_message = \"Please provide a concise summary of the technical updates based on the provided commit messages.\"\n\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": combined_commits}\n ]\n\n model_engine = \"gpt-3.5-turbo-1106\"\n\n try:\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n\n # Extract the generated summary\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n\n except Exception as e:\n print(f\"An error occurred while generating the technical summary: {e}\")\n return \"Error in generating technical summary.\"\n\n async def summarize_feedback_and_revisions(self, original_report: str, feedback: str) -> str:\n \"\"\"\n Takes the original report and user feedback and generates a revised summary.\n\n Args:\n original_report: The original summarized report.\n feedback: The user's feedback or suggested edits.\n\n Returns:\n The revised summary.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"Revise the original report based on the user's feedback.\"\n\n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": f\"Original Report: {original_report}\"},\n {\"role\": \"user\", \"content\": f\"Feedback: {feedback}\"}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n revised_summary = response['choices'][0]['message']['content'].strip()\n\n return revised_summary\n \n except Exception as e:\n print(f\"An error occurred while generating the revised summary: {e}\")\n return \"Error in generating revised summary\"\n\n async def summarize_non_technical_updates(self, update: str) -> str:\n \"\"\"\n Summarizes a non-technical update using a large language model.\n\n Args:\n update: The raw non-technical update provided by the user.\n\n Returns:\n The summarized non-technical update.\n \"\"\"\n\n # System message to guide the LLM for a concise summary\n system_message = \"Please provide a concise summary of the non-technical update shared by the user.\"\n\n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": update}\n ]\n\n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n\n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n\n # Extract the generated summary\n summarized_message = response['choices'][0]['message']['content'].strip()\n\n return summarized_message\n\n except Exception as e:\n print(f\"An error occurred while generating the non-technical summary: {e}\")\n return \"Error in generating summary\"\n\n async def summarize_goals_for_the_day(self, goals: str) -> str:\n \"\"\"\n Summarizes the user's goals for the day using a large language model.\n\n Args:\n goals: The user's raw input on their goals for the day.\n\n Returns:\n The summarized goals for the day.\n \"\"\"\n # Initiate the conversation with the model\n system_message = \"Please provide a concise summary of the user's goals for today.\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": goals}\n ]\n \n # Specify the model engine you want to use (this is an example and can be adjusted based on your needs)\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Provide user's input and retrieve model's response\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n summarized_goals = response['choices'][0]['message']['content'].strip()\n\n # Return the summary\n return summarized_goals\n \n except Exception as e:\n print(f\"An error occurred while generating the goals summary: {e}\")\n return \"Error in generating goals summary\"\n \n async def evaluate_performance(self, user_message: str) -> str:\n \"\"\"\n Evaluates the performance of the user based on their update.\n\n Args:\n user_message: The user's message that needs to be evaluated.\n\n Returns:\n The evaluation of the user's performance.\n \"\"\"\n # Prepare a system message to guide OpenAI's model\n system_message = \"\"\"\n You are a project manager at a fast-paced tech startup, recognized for providing clear and actionable feedback during stand-up meetings. Your role is to evaluate the quality of team members' daily stand-up reports, with a focus on clear communication, comprehensive planning, and problem-solving abilities.\n It is essential to note that team members should neither be penalized nor rewarded for merely mentioning issues; instead, the emphasis should be on the clarity of the report and the quality of strategies proposed to address these issues.\n Your feedback is candid and aimed at encouraging high-quality reporting and effective planning within the startup environment.\n Please provide a two-sentence summary of the stand-up and assign a grade (A, B, C, D, or F) based on the following criteria:\n\n - A: Excellent - The report is exceptionally clear and detailed, with well-defined tasks and a thorough approach to tackling issues, exemplifying the proactive and problem-solving ethos of our startup.\n - B: Good - The report is clear and adequately detailed, outlining tasks and addressing issues with a reasonable approach, indicating a commitment to momentum and resolution.\n - C: Fair - The report is understandable but lacks detail in some areas, with a basic approach to resolving issues, suggesting a need for further strategy development.\n - D: Poor - The report is vague or missing details, with a limited or unclear approach to issues, necessitating better communication and planning skills.\n - F: Fail - The report is missing, overly vague, or lacks a coherent structure, with no apparent approach to issues, reflecting a need for significant improvement in reporting and strategizing.\n\n A comprehensive stand-up report effectively communicates what was done and what is planned, clearly identifies any issues, and connects daily tasks with broader business objectives.\n\n Provide clear and constructive feedback, aiming to foster a culture of excellence and continuous improvement in how we plan and communicate our daily activities.\n \"\"\"\n \n # Prepare the messages input for ChatCompletion\n messages = [\n {\"role\": \"system\", \"content\": system_message},\n {\"role\": \"user\", \"content\": user_message}\n ]\n \n # Specify the model engine you want to use\n model_engine = \"gpt-3.5-turbo-1106\"\n \n try:\n # Make an API call to OpenAI's ChatCompletion\n response = openai.ChatCompletion.create(\n model=model_engine,\n messages=messages\n )\n \n # Extract the generated text\n performance_evaluation = response['choices'][0]['message']['content'].strip()\n\n return performance_evaluation\n \n except Exception as e:\n print(f\"An error occurred while evaluating the performance: {e}\")\n return \"Error in evaluating performance\"" }, { "identifier": "WeeklyPostManager", "path": "weekly_posts/weekly_post_manager.py", "snippet": "class WeeklyPostManager:\n \"\"\"Manages the status post in a Discord channel.\"\"\"\n \n def __init__(self, channel, weekly_posts_db: WeeklyPostsDB):\n \"\"\"\n Initializes a new WeeklyPostManager instance.\n \"\"\"\n self.channel = channel\n self.weekly_posts_db = weekly_posts_db\n self.editable_weekly_post = None\n self.load_weekly_post_data()\n\n def load_weekly_post_data(self):\n \"\"\"\n Load the weekly post data from the database.\n \n This method queries the 'weekly_posts' table to get the ID and timestamp of \n the last weekly post. If no data exists, it sets the ID and timestamp to None.\n \"\"\"\n data = self.weekly_posts_db.get_weekly_post_data()\n self.editable_weekly_post_id = data.get('post_id', None)\n self.weekly_post_timestamp = data.get('timestamp', None)\n\n def save_weekly_post_data(self):\n \"\"\"\n Save the weekly post data to the database.\n \n This method inserts or updates the ID and timestamp of the current weekly post \n in the 'weekly_posts' table.\n \"\"\"\n self.weekly_posts_db.save_weekly_post_data(self.editable_weekly_post.id, datetime.now())\n\n async def initialize_post(self, team_members: List[TeamMember]):\n \"\"\"\n Initializes or retrieves the weekly status post on Discord.\n\n This function checks if a valid weekly post already exists for the current week.\n If it does, it retrieves that post. Otherwise, it sends a new message in the Discord\n channel with the list of team members and their statuses.\n\n Args:\n team_members: A list of TeamMember objects to be displayed in the post.\n \"\"\"\n current_week_number = datetime.now().isocalendar()[1]\n saved_week_number = self.weekly_post_timestamp.isocalendar()[1] if self.weekly_post_timestamp else None\n\n # Skip initialization if the post already exists and is for the current week\n if self.editable_weekly_post_id and current_week_number == saved_week_number:\n self.editable_weekly_post = await self.channel.fetch_message(self.editable_weekly_post_id)\n return\n\n utc_now = pytz.utc.localize(datetime.utcnow())\n today_weekday = utc_now.weekday()\n last_monday = utc_now - timedelta(days=today_weekday)\n next_sunday = last_monday + timedelta(days=6)\n\n start_date = self.format_date(last_monday)\n end_date = self.format_date(next_sunday)\n\n # Calculate the max name length for alignment purposes\n max_name_length = max([len(m.name) for m in team_members])\n\n member_list = []\n for m in team_members:\n # Include the streak with the fire emoji if the streak is greater than 0\n streak_str = f\" {m.current_streak}🔥\" if m.current_streak > 0 else \"\"\n\n # Construct the new line for the member with the updated information\n new_line = f\"# `{m.name.ljust(max_name_length)} {'❓' * 5} {streak_str}`\"\n member_list.append(new_line)\n\n member_list_str = '\\n'.join(member_list)\n\n await self.channel.send(f\"# Weekly Status Updates\")\n await self.channel.send(f\"## {start_date} to {end_date}\")\n if member_list_str:\n self.editable_weekly_post = await self.channel.send(f\"{member_list_str}\")\n self.save_weekly_post_data() # Save the ID and timestamp after creating the post\n\n async def rebuild_post(self, team_members: List[TeamMember]):\n \"\"\"\n Rebuilds the entire weekly status post from the team members' data.\n\n Args:\n team_members: A list of TeamMember objects with updated statuses and streaks.\n \"\"\"\n # If there are no team members, delete the post and return\n if not team_members:\n if self.editable_weekly_post:\n await self.editable_weekly_post.delete()\n self.editable_weekly_post = None\n return\n\n # Calculate the max name length for alignment purposes\n max_name_length = max([len(m.name) for m in team_members])\n\n member_list = []\n for m in team_members:\n # Get the streak and number of weekly check-ins for the member\n streak = m.current_streak\n check_ins = m.weekly_checkins\n\n # Generate the marks based on the number of check-ins\n marks = \"✅\" * check_ins + \"❓\" * (5 - check_ins)\n\n # Include the streak with the fire emoji if the streak is greater than 0\n streak_str = f\" {streak}🔥\" if streak > 0 else \"\"\n\n # Construct the new line for the member with the updated information\n new_line = f\"# `{m.name.ljust(max_name_length)} {marks} {streak_str}`\"\n member_list.append(new_line)\n\n new_content = '\\n'.join(member_list)\n\n # Update the existing post or create a new one if it doesn't exist\n if self.editable_weekly_post:\n self.editable_weekly_post = await self.editable_weekly_post.edit(content=new_content)\n else:\n self.editable_weekly_post = await self.channel.send(new_content)\n\n # Save the ID and timestamp of the post\n self.save_weekly_post_data()\n\n def format_date(self, dt: datetime) -> str:\n \"\"\"\n Formats a datetime object into a human-readable string.\n\n Args:\n dt: The datetime object to format.\n\n Returns:\n A human-readable date string.\n \"\"\"\n suffix = ['th', 'st', 'nd', 'rd']\n day = int(dt.strftime('%d'))\n if 4 <= day <= 20 or 24 <= day <= 30:\n suffix_index = 0 # use 'th'\n else:\n suffix_index = day % 10 # use 'st', 'nd', 'rd' as appropriate\n\n return dt.strftime(f\"%B {day}{suffix[suffix_index]}\")" }, { "identifier": "Scheduler", "path": "scheduler.py", "snippet": "class Scheduler:\n \"\"\"Scheduler class to manage timed jobs for sending status requests.\n\n Attributes:\n scheduler: The APScheduler object.\n job_ids: A dictionary to store lists of job IDs for each member.\n \"\"\"\n \n def __init__(self) -> None:\n \"\"\"Initialize the Scheduler object and start the APScheduler.\"\"\"\n self.scheduler: AsyncIOScheduler = AsyncIOScheduler()\n self.job_ids: Dict[int, List[str]] = {} # Store job IDs indexed by member's Discord ID\n self.weekly_post_job_id = None # To store the ID of the scheduled weekly post job\n self.scheduler.start()\n\n def add_job(self, func: callable, member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, updates_manager: UpdatesManager) -> None:\n \"\"\"Add a new job to the scheduler for a specific team member.\n \n Args:\n func: The function to call when the job is run.\n member: The TeamMember object for whom the job is added.\n \"\"\"\n time_zone = pytz.timezone(member.time_zone)\n \n weekday_trigger = CronTrigger(day_of_week='mon,tue,wed,thu,fri', hour=10, timezone=time_zone)\n weekend_trigger = CronTrigger(day_of_week='sat,sun', hour=11, timezone=time_zone)\n\n weekday_job = self.scheduler.add_job(func, weekday_trigger, args=[member, weekly_post_manager, streaks_manager, updates_manager])\n weekend_job = self.scheduler.add_job(func, weekend_trigger, args=[member, weekly_post_manager, streaks_manager, updates_manager])\n\n self.job_ids.setdefault(member.discord_id, []).extend([weekday_job.id, weekend_job.id])\n\n def remove_job(self, discord_id: int) -> None:\n \"\"\"Remove jobs for a specific team member.\n \n Args:\n discord_id: The Discord ID of the member for whom the job should be removed.\n \"\"\"\n job_ids = self.job_ids.get(discord_id, [])\n for job_id in job_ids:\n self.scheduler.remove_job(job_id)\n\n if discord_id in self.job_ids:\n del self.job_ids[discord_id] # Remove the job IDs from the dictionary\n\n def schedule_weekly_post(self, func: callable, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]) -> None:\n \"\"\"Schedules the weekly post based on the latest time zone among the team members.\"\"\"\n \n # Determine the latest time zone\n latest_time_zone = max([member.time_zone for member in team_members], key=lambda tz: pytz.timezone(tz).utcoffset(datetime.utcnow()))\n\n # Set the trigger for 9:10 AM in the earliest time zone on Monday\n trigger = CronTrigger(day_of_week='mon', hour=9, minute=10, timezone=latest_time_zone)\n\n # Schedule the function with the trigger\n job = self.scheduler.add_job(func, trigger, args=[weekly_post_manager, streaks_manager, team_members])\n self.weekly_post_job_id = job.id\n\n def unschedule_weekly_post(self) -> None:\n \"\"\"Removes the weekly post job from the scheduler.\"\"\"\n if self.weekly_post_job_id:\n self.scheduler.remove_job(self.weekly_post_job_id)\n self.weekly_post_job_id = None\n\n def get_all_scheduled_jobs(self, team_member_manager) -> List[str]:\n \"\"\"Retrieve all scheduled jobs as a list of strings.\"\"\"\n job_descriptions = []\n\n for job in self.scheduler.get_jobs():\n # Determine the associated team member by looking up the job ID in the job_ids dictionary\n member_discord_id = next((discord_id for discord_id, job_ids in self.job_ids.items() if job.id in job_ids), None)\n member_name = team_member_manager.find_member(member_discord_id).name if member_discord_id else \"Unknown\"\n\n # Calculate the remaining time until the next run\n now = datetime.now(job.next_run_time.tzinfo) # Get the current time with the same timezone as the job's next_run_time\n remaining_time = job.next_run_time - now\n remaining_time_str = str(remaining_time).split('.')[0] # Remove the microseconds part\n\n # If this job is the weekly post job\n if job.id == self.weekly_post_job_id:\n job_descriptions.append(f\"ID: {job.id}, Type: Weekly Post, Next Run: {job.next_run_time}, Remaining Time: {remaining_time_str}, Func: {job.func.__name__}\")\n else:\n job_descriptions.append(f\"ID: {job.id}, Member: {member_name}, Next Run: {job.next_run_time}, Remaining Time: {remaining_time_str}, Func: {job.func.__name__}\")\n\n return job_descriptions" }, { "identifier": "TeamMember", "path": "team_members/team_member.py", "snippet": "class TeamMember:\n \"\"\"TeamMember class to store individual team member details.\n \n Attributes:\n discord_id: The Discord ID of the team member.\n time_zone: The time zone in which the team member resides.\n name: The name of the team member.\n github_username: The GitHub username of the team member.\n current_streak: The current streak of daily updates/check-ins of the team member.\n weekly_checkins: The number of check-ins for the current week.\n \"\"\"\n \n def __init__(self, discord_id: int, time_zone: str, name: str, github_username: str,\n current_streak: int = 0, weekly_checkins: int = 0, on_vacation: bool = False) -> None:\n \"\"\"Initialize a new TeamMember object.\n \n Args:\n discord_id: The Discord ID of the team member.\n time_zone: The time zone of the team member.\n name: The name of the team member.\n github_username: The GitHub username of the team member.\n current_streak: The current streak of daily updates/check-ins. Defaults to 0.\n weekly_checkins: The number of check-ins for the current week. Defaults to 0.\n \"\"\"\n self.discord_id: int = discord_id\n self.time_zone: str = time_zone\n self.name: str = name\n self.github_username: str = github_username\n self.current_streak: int = current_streak\n self.weekly_checkins: int = weekly_checkins\n self.on_vacation: bool = on_vacation\n \n def update_streak(self, streak: int) -> None:\n \"\"\"Update the current streak of the team member.\n \n Args:\n streak: The new streak count.\n \"\"\"\n self.current_streak = streak\n \n def reset_streak(self) -> None:\n \"\"\"Reset the current streak of the team member to 0.\"\"\"\n self.current_streak = 0\n\n def update_weekly_checkins(self, count: int):\n \"\"\"\n Update the weekly check-ins count.\n\n Args:\n count: The new count of weekly check-ins.\n \"\"\"\n self.weekly_checkins = count\n \n def increment_weekly_checkins(self) -> None:\n \"\"\"Increment the number of check-ins for the current week by 1.\"\"\"\n self.weekly_checkins += 1\n \n def reset_weekly_checkins(self) -> None:\n \"\"\"Reset the number of check-ins for the current week to 0.\"\"\"\n self.weekly_checkins = 0" } ]
import os import pytz import asyncio import openai import requests from typing import List from dotenv import load_dotenv from datetime import datetime, timedelta from multiprocessing import Process from streaks.streaks_db import StreaksDB from team_members.team_member_db import TeamMemberDB from updates.updates_db import UpdatesDB from weekly_posts.weekly_posts_db import WeeklyPostsDB from streaks.streaks_manager import StreaksManager from team_members.team_member_manager import TeamMemberManager from updates.updates_manager import UpdatesManager from weekly_posts.weekly_post_manager import WeeklyPostManager from scheduler import Scheduler from team_members.team_member import TeamMember from discord.ext import commands, tasks from discord import Intents, DMChannel from flask import Flask from asyncio import Task, ensure_future, CancelledError
15,324
@bot.command(name='forcepostrebuild') async def force_post_rebuild(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to force a post rebuild.") return # Rebuild the post await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send("Post rebuilt successfully.") @bot.command(name='deletelateststatus') async def delete_latest_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to delete status updates.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Delete the newest status using the UpdatesManager's method updates_manager.delete_newest_status(discord_id) await ctx.send(f"Latest status update for user with Discord ID {discord_id} deleted successfully.") @bot.command(name='viewuser') async def view_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view user data.") return # Get the member's statuses using the UpdatesManager's method statuses = updates_manager.get_all_statuses_for_user(discord_id) if not statuses: await ctx.send(f"No status updates found for user with Discord ID {discord_id}.") return # Loop through the statuses and send individual messages for status in statuses: await ctx.send(f"### **Timestamp:** {status['timestamp']}") await ctx.send(f"### **Raw Status:** {status['status']}") await ctx.send(f"### **Summarized Status:** \n{status['summarized_status']}") @bot.command(name='setvacationstatus') async def set_vacation_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to set vacation status.") return member = team_member_manager.find_member(discord_id) if member: new_status = not member.on_vacation team_member_manager.set_member_vacation_status(discord_id, new_status) await ctx.send(f"Vacation status for user with Discord ID {discord_id} set to {'on vacation' if new_status else 'not on vacation'}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='weeklysummary') async def weekly_summary(ctx, discord_id: int, start_date: str, end_date: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to generate weekly summaries.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Convert the start_date and end_date strings to datetime objects # Adjusting the date format to MM-DD-YYYY and setting the time try: start_date = datetime.strptime(start_date, '%m-%d-%Y') end_date = datetime.strptime(end_date, '%m-%d-%Y') # Setting the time to ensure the whole week is captured start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999) except ValueError: await ctx.send("Invalid date format. Please use MM-DD-YYYY.") return # Generate the weekly summary weekly_summary = await updates_manager.generate_weekly_summary(discord_id, start_date, end_date) # Send the weekly summary to the admin user admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"**{member.name}'s Weekly Summary for {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}:**\n{weekly_summary}") else: await ctx.send("Unable to find the admin user.") @bot.event async def on_ready(): print("Bot is online!") # Log that the bot is online streaks_db = StreaksDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) team_member_db = TeamMemberDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) weekly_posts_db = WeeklyPostsDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) updates_db = UpdatesDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) guild = bot.get_guild(GUILD_TOKEN) channel = guild.get_channel(CHANNEL_TOKEN) global updates_manager updates_manager = UpdatesManager(updates_db) global streaks_manager streaks_manager = StreaksManager(streaks_db) global team_member_manager
# Import required modules app = Flask(__name__) # Load environment variables from the .env file load_dotenv() # Retrieve bot, guild, and channel tokens from environment variables BOT_TOKEN = os.getenv('DISCORD_BOT_TOKEN') GUILD_TOKEN = int(os.getenv('DISCORD_GUILD_TOKEN')) CHANNEL_TOKEN = int(os.getenv('DISCORD_CHANNEL_TOKEN')) ADMIN_DISCORD_ID = int(os.getenv('ADMIN_DISCORD_ID')) # Retrieve database credentials from environment variables MYSQL_HOST = os.getenv('MYSQL_HOST') MYSQL_USER = os.getenv('MYSQL_USER') MYSQL_PASSWORD = os.getenv('MYSQL_PASSWORD') MYSQL_DB = os.getenv('MYSQL_DB') MYSQL_PORT = os.getenv('MYSQL_PORT') ORG_NAME = os.getenv('GITHUB_ORG_NAME') ORG_TOKEN = os.getenv('GITHUB_ORG_TOKEN') OPENAI_API_KEY = os.getenv('OPENAI_API_KEY') # Initialize bot with default intents intents = Intents.default() intents.members = True intents.message_content = True bot = commands.Bot(command_prefix='!', intents=intents) openai.api_key = OPENAI_API_KEY # TODO: Remove these globals streaks_manager = None weekly_post_manager = None team_member_manager = None updates_manager = None scheduler = None ongoing_status_requests = {} THUMBS_UP_EMOJI = "👍" PENCIL_EMOJI = "✏️" REPORT_SUBMISSION_EMOJI = '📝' async def weekly_state_reset(weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, team_members: List[TeamMember]): # Reset streaks for the previous week for member in team_members: if not member.on_vacation and member.weekly_checkins < 5: streaks_manager.reset_streak(member.discord_id) member.reset_streak() member.reset_weekly_checkins() # Initialize new weekly post await weekly_post_manager.initialize_post(team_members) def get_all_commit_messages_for_user(org_name: str, token: str, member: TeamMember) -> list: """Retrieve all commit messages for a user across all repos in an organization from the last 24 hours.""" headers = { "Authorization": f"token {token}", "Accept": "application/vnd.github.v3+json" } last_update_timestamp, user_time_zone = updates_manager.get_last_update_timestamp(member.discord_id) if last_update_timestamp: # Convert the timestamp to UTC local_tz = pytz.timezone(user_time_zone) localized_timestamp = local_tz.localize(last_update_timestamp) utc_timestamp = localized_timestamp.astimezone(pytz.utc) # Format the timestamp for the GitHub API and append 'Z' since_date = utc_timestamp.isoformat() if not since_date.endswith('Z'): since_date = utc_timestamp.isoformat().replace('+00:00', '') + 'Z' else: # If no updates found, default to last 24 hours since_date = (datetime.utcnow() - timedelta(days=1)).isoformat() + 'Z' all_commit_messages = [] # Paginate through all repositories in the organization repos_url = f"https://api.github.com/orgs/{org_name}/repos?type=all&per_page=100" while repos_url: response = requests.get(repos_url, headers=headers) if response.status_code != 200: # Log error and break loop print(f"Failed to fetch repos: {response.status_code} {response.text}") break repos = response.json() # Iterate over each repository for repo in repos: repo_name = repo["name"] commits_url = f"https://api.github.com/repos/{org_name}/{repo_name}/commits?author={member.github_username}&since={since_date}&per_page=100" # Paginate through commits for the repository while commits_url: response = requests.get(commits_url, headers=headers) if response.status_code != 200: # Log error and continue to the next repository print(f"Failed to fetch commits for {repo_name}: {response.status_code} {response.text}") break commits = response.json() repo_commit_messages = [commit["commit"]["message"] for commit in commits] all_commit_messages.extend(repo_commit_messages) # Check for the 'next' link for commits pagination commits_url = get_pagination_link(response.headers, 'next') # Check for the 'next' link for repositories pagination repos_url = get_pagination_link(response.headers, 'next') return all_commit_messages def get_pagination_link(headers, rel): """Extract pagination link for the 'rel' type from the Link header.""" link = headers.get('Link', None) if link: links = link.split(', ') for link in links: if 'rel="{}"'.format(rel) in link: return link.split('; ')[0].strip('<>') return None async def send_status_request(member: TeamMember, weekly_post_manager: WeeklyPostManager, streaks_manager: StreaksManager, updates_manager: UpdatesManager): if member.weekly_checkins == 5: return # If already completed 5 check-ins, do nothing user = bot.get_user(member.discord_id) if user: # Notify the admin that a status request is being sent admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"Status request sent to {member.name}.") # Cancel the previous task if it exists ongoing_task: Task = ongoing_status_requests.get(member.discord_id) if ongoing_task: ongoing_task.cancel() # Retrieve all commit messages for the member commit_messages = get_all_commit_messages_for_user(ORG_NAME, ORG_TOKEN, member) if not commit_messages: summarized_report = "You have no commits for the previous working day." msg = f"{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." else: summarized_report = await updates_manager.summarize_technical_updates(commit_messages) msg = f"Here's your summarized report based on your commits:\n{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." raw_updates = summarized_report # Send initial message and wait for reaction await user.send( f"# Good morning {member.name}, time for your daily status update!\n" f"### I'm first going to check your commit messages and try to build a technical report for you.\n" f"### Next I will ask you for any non-technical updates from your previous work day.\n" f"### Finally I will ask you what you plan to work on today." ) sent_message = await user.send(msg) await sent_message.add_reaction(THUMBS_UP_EMOJI) await sent_message.add_reaction(PENCIL_EMOJI) await sent_message.add_reaction(REPORT_SUBMISSION_EMOJI) def check(m) -> bool: return m.author == user and isinstance(m.channel, DMChannel) # Store the new wait_for reaction task in the global dictionary ongoing_task = ensure_future(bot.wait_for('reaction_add', check=lambda r, u: u == user and r.message.id == sent_message.id and isinstance(r.message.channel, DMChannel) and str(r.emoji) in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI])) ongoing_status_requests[member.discord_id] = ongoing_task reaction, reactor = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the reaction for emoji in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: await sent_message.remove_reaction(emoji, bot.user) while str(reaction.emoji) in [PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: if str(reaction.emoji) == PENCIL_EMOJI: await user.send("What would you like me to change?") # Store the new wait_for message (feedback) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task feedback = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the feedback # Send original + feedback to LLM for reformatting summarized_report = await updates_manager.summarize_feedback_and_revisions(summarized_report, feedback.content) elif str(reaction.emoji) == REPORT_SUBMISSION_EMOJI: await user.send("Please submit your technical report directly.") # Store the new wait_for message (report submission) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task direct_report = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the report summarized_report = direct_report.content break # Exit the while loop as the user has submitted their report directly msg = f"Here's the revised report:\n{summarized_report}\nReact with {THUMBS_UP_EMOJI} to confirm, {PENCIL_EMOJI} to iterate with AI, or {REPORT_SUBMISSION_EMOJI} to submit your own report." last_sent_message = await send_long_message(user, msg) if last_sent_message: await last_sent_message.add_reaction(THUMBS_UP_EMOJI) await last_sent_message.add_reaction(PENCIL_EMOJI) await last_sent_message.add_reaction(REPORT_SUBMISSION_EMOJI) # Store the new wait_for reaction task in the global dictionary ongoing_task = ensure_future(bot.wait_for('reaction_add', check=lambda r, u: u == user and r.message.id == last_sent_message.id and isinstance(r.message.channel, DMChannel) and str(r.emoji) in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI])) ongoing_status_requests[member.discord_id] = ongoing_task reaction, user = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the reaction for emoji in [THUMBS_UP_EMOJI, PENCIL_EMOJI, REPORT_SUBMISSION_EMOJI]: await last_sent_message.remove_reaction(emoji, bot.user) # Prompt user for non-technical updates from the previous day non_technical_msg_prompt = "Please provide any non-technical updates from your previous working day, e.g., important meetings, interviews, etc." await user.send(non_technical_msg_prompt) # Store the new wait_for message (non-technical update) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task non_technical_update_raw = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the non-technical update raw_updates += f"\n\n{non_technical_update_raw.content}" # Summarize non-technical update with LLM non_technical_update = await updates_manager.summarize_non_technical_updates(non_technical_update_raw.content) # Prompt user for their goals for the day goals_msg_prompt = "What do you plan to work on or accomplish today?" await user.send(goals_msg_prompt) # Store the new wait_for message (goals for the day) task in the global dictionary ongoing_task = ensure_future(bot.wait_for('message', check=check)) ongoing_status_requests[member.discord_id] = ongoing_task goals_for_today_raw = await ongoing_task ongoing_status_requests.pop(member.discord_id, None) # Remove the task once we get the goals # Summarize goals for the day with LLM goals_for_today = await updates_manager.summarize_goals_for_the_day(goals_for_today_raw.content) # Update the streak for this member streak = streaks_manager.get_streak(member.discord_id) streaks_manager.update_streak(member.discord_id, streak + 1) member.update_streak(streaks_manager.get_streak(member.discord_id)) member.increment_weekly_checkins() raw_updates += f"\n\n{goals_for_today_raw.content}" final_updates = f"{summarized_report}\n\n{non_technical_update}\n\n{goals_for_today}" updates_manager.insert_status(member.discord_id, raw_updates, member.time_zone) updates_manager.update_summarized_status(member.discord_id, final_updates) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) # Member name update as a header member_update_header = f"## {member.name}'s Update:" # Compile the final report with Markdown formatting final_report = ( f"\n### Technical Update:\n" f"{summarized_report}\n" f"### Non-Technical Update:\n" f"{non_technical_update}\n" f"### Goals for Today:\n" f"{goals_for_today}" ) stand_up_feedback = await updates_manager.evaluate_performance(final_report) # Concatenate the member name update with the final report and send to the designated Discord channel complete_message = f"{member_update_header}{final_report}" guild = bot.get_guild(GUILD_TOKEN) channel_to_post_in = guild.get_channel(CHANNEL_TOKEN) await user.send(stand_up_feedback) await send_long_message(channel_to_post_in, complete_message) async def send_long_message(destination, msg): max_length = 2000 # Discord's max character limit for a message sent_messages = [] # Keep track of all messages sent while len(msg) > 0: # If the message is shorter than the max length, send it as is if len(msg) <= max_length: sent_message = await destination.send(msg) sent_messages.append(sent_message) break # The message is sent, so break out of the loop # Find the nearest newline character before the max_length split_index = msg.rfind('\n', 0, max_length) # If no newline is found, just split at max_length if split_index == -1: split_index = max_length # Split the message at the found index and send the first part part_to_send = msg[:split_index].strip() sent_message = await destination.send(part_to_send) sent_messages.append(sent_message) # Wait a bit to respect Discord's rate limits await asyncio.sleep(1) # Remove the part that was sent from the message msg = msg[split_index:].strip() # Return the last message sent for reaction addition return sent_messages[-1] if sent_messages else None @bot.command(name='viewscheduledjobs') async def view_scheduled_jobs(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view scheduled jobs.") return # Get all scheduled jobs using the Scheduler's method scheduled_jobs = scheduler.get_all_scheduled_jobs(team_member_manager) # Send the scheduled jobs to the admin user for job in scheduled_jobs: await ctx.send(job) @bot.command(name='statusrequest') async def status_request(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to request status.") return # Find the member object using the Discord ID member_to_request = team_member_manager.find_member(discord_id) if member_to_request: for member in team_member_manager.team_members: scheduler.remove_job(member.discord_id) scheduler.unschedule_weekly_post() # Send the status request to the member await ctx.send(f"Status request sent to user with Discord ID {discord_id}.") for member in team_member_manager.team_members: scheduler.add_job(send_status_request, member, weekly_post_manager, streaks_manager, updates_manager) scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await send_status_request(member_to_request, weekly_post_manager, streaks_manager, updates_manager) await ctx.send(f"Status request received from user with Discord ID {discord_id}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='adduser') async def add_user(ctx, discord_id: int, time_zone: str, name: str, github_username: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to add users.") return # Add the new member using team_member_manager team_member_manager.add_member(discord_id, name, time_zone, github_username) # Update the weekly post to include the new member new_member = team_member_manager.find_member(discord_id) if new_member: await weekly_post_manager.rebuild_post(team_member_manager.team_members) scheduler.add_job(send_status_request, new_member, weekly_post_manager, streaks_manager, updates_manager) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"User {name} added successfully.") @bot.command(name='removeuser') async def remove_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to remove users.") return # Find the member object member_to_remove = team_member_manager.find_member(discord_id) if member_to_remove: # Remove the member from the database team_member_manager.remove_member(discord_id) # Update the weekly post to remove the member await weekly_post_manager.rebuild_post(team_member_manager.team_members) scheduler.remove_job(discord_id) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"User with Discord ID {discord_id} removed successfully.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='listusers') async def list_users(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to list users.") return # List users using team_member_manager users = [(member.discord_id, member.name, member.time_zone, member.github_username, member.current_streak) for member in team_member_manager.team_members] user_list = '\n'.join([f"Name: {user[1]}, Discord ID: {user[0]}, Time Zone: {user[2]}, GitHub Username: {user[3]}, Current Streak: {user[4]}" for user in users]) await ctx.send(f"List of users:\n{user_list}") @bot.command(name='updatetimezone') async def update_timezone(ctx, discord_id: int, new_time_zone: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update timezones.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the timezone in the database team_member_manager.update_member_timezone(discord_id, new_time_zone) scheduler.remove_job(discord_id) scheduler.add_job(send_status_request, member_to_update, weekly_post_manager, streaks_manager, updates_manager) scheduler.unschedule_weekly_post() scheduler.schedule_weekly_post(weekly_state_reset, weekly_post_manager, streaks_manager, team_member_manager.team_members) await ctx.send(f"Timezone for user with Discord ID {discord_id} updated to {new_time_zone}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='updatestreak') async def update_streak(ctx, discord_id: int, new_streak: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to update streaks.") return # Find the member object using the Discord ID member_to_update = team_member_manager.find_member(discord_id) if member_to_update: # Update the streak in the database streaks_manager.update_streak(discord_id, new_streak) member_to_update.update_streak(new_streak) # Update the Discord post using WeeklyPostManager await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send(f"Streak for user with Discord ID {discord_id} updated to {new_streak}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='forcepostrebuild') async def force_post_rebuild(ctx): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to force a post rebuild.") return # Rebuild the post await weekly_post_manager.rebuild_post(team_member_manager.team_members) await ctx.send("Post rebuilt successfully.") @bot.command(name='deletelateststatus') async def delete_latest_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to delete status updates.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Delete the newest status using the UpdatesManager's method updates_manager.delete_newest_status(discord_id) await ctx.send(f"Latest status update for user with Discord ID {discord_id} deleted successfully.") @bot.command(name='viewuser') async def view_user(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to view user data.") return # Get the member's statuses using the UpdatesManager's method statuses = updates_manager.get_all_statuses_for_user(discord_id) if not statuses: await ctx.send(f"No status updates found for user with Discord ID {discord_id}.") return # Loop through the statuses and send individual messages for status in statuses: await ctx.send(f"### **Timestamp:** {status['timestamp']}") await ctx.send(f"### **Raw Status:** {status['status']}") await ctx.send(f"### **Summarized Status:** \n{status['summarized_status']}") @bot.command(name='setvacationstatus') async def set_vacation_status(ctx, discord_id: int): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to set vacation status.") return member = team_member_manager.find_member(discord_id) if member: new_status = not member.on_vacation team_member_manager.set_member_vacation_status(discord_id, new_status) await ctx.send(f"Vacation status for user with Discord ID {discord_id} set to {'on vacation' if new_status else 'not on vacation'}.") else: await ctx.send(f"No user with Discord ID {discord_id} found.") @bot.command(name='weeklysummary') async def weekly_summary(ctx, discord_id: int, start_date: str, end_date: str): if ctx.message.author.id != ADMIN_DISCORD_ID or not isinstance(ctx.channel, DMChannel): await ctx.send("You're not authorized to generate weekly summaries.") return # Find the member object using the Discord ID member = team_member_manager.find_member(discord_id) if not member: await ctx.send(f"No user with Discord ID {discord_id} found.") return # Convert the start_date and end_date strings to datetime objects # Adjusting the date format to MM-DD-YYYY and setting the time try: start_date = datetime.strptime(start_date, '%m-%d-%Y') end_date = datetime.strptime(end_date, '%m-%d-%Y') # Setting the time to ensure the whole week is captured start_date = start_date.replace(hour=0, minute=0, second=0, microsecond=0) end_date = end_date.replace(hour=23, minute=59, second=59, microsecond=999999) except ValueError: await ctx.send("Invalid date format. Please use MM-DD-YYYY.") return # Generate the weekly summary weekly_summary = await updates_manager.generate_weekly_summary(discord_id, start_date, end_date) # Send the weekly summary to the admin user admin_user = bot.get_user(ADMIN_DISCORD_ID) if admin_user: await admin_user.send(f"**{member.name}'s Weekly Summary for {start_date.strftime('%m-%d-%Y')} to {end_date.strftime('%m-%d-%Y')}:**\n{weekly_summary}") else: await ctx.send("Unable to find the admin user.") @bot.event async def on_ready(): print("Bot is online!") # Log that the bot is online streaks_db = StreaksDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) team_member_db = TeamMemberDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) weekly_posts_db = WeeklyPostsDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) updates_db = UpdatesDB(MYSQL_HOST, MYSQL_USER, MYSQL_PASSWORD, MYSQL_DB, MYSQL_PORT) guild = bot.get_guild(GUILD_TOKEN) channel = guild.get_channel(CHANNEL_TOKEN) global updates_manager updates_manager = UpdatesManager(updates_db) global streaks_manager streaks_manager = StreaksManager(streaks_db) global team_member_manager
team_member_manager = TeamMemberManager(team_member_db)
5
2023-10-12 02:01:46+00:00
24k
azuline/rose
rose/cache_test.py
[ { "identifier": "TEST_COLLAGE_1", "path": "conftest.py", "snippet": "TEST_COLLAGE_1 = TESTDATA / \"Collage 1\"" }, { "identifier": "TEST_PLAYLIST_1", "path": "conftest.py", "snippet": "TEST_PLAYLIST_1 = TESTDATA / \"Playlist 1\"" }, { "identifier": "TEST_RELEASE_1", "path": "conftest.py", "snippet": "TEST_RELEASE_1 = TESTDATA / \"Test Release 1\"" }, { "identifier": "TEST_RELEASE_2", "path": "conftest.py", "snippet": "TEST_RELEASE_2 = TESTDATA / \"Test Release 2\"" }, { "identifier": "TEST_RELEASE_3", "path": "conftest.py", "snippet": "TEST_RELEASE_3 = TESTDATA / \"Test Release 3\"" }, { "identifier": "AudioTags", "path": "rose/audiotags.py", "snippet": "class AudioTags:\n id: str | None\n release_id: str | None\n title: str | None\n year: int | None\n tracknumber: str | None\n tracktotal: int | None\n discnumber: str | None\n disctotal: int | None\n album: str | None\n genre: list[str]\n label: list[str]\n releasetype: str\n\n albumartists: ArtistMapping\n trackartists: ArtistMapping\n\n duration_sec: int\n\n path: Path\n\n @classmethod\n def from_file(cls, p: Path) -> AudioTags:\n \"\"\"Read the tags of an audio file on disk.\"\"\"\n if not any(p.suffix.lower() == ext for ext in SUPPORTED_AUDIO_EXTENSIONS):\n raise UnsupportedFiletypeError(f\"{p.suffix} not a supported filetype\")\n try:\n m = mutagen.File(p) # type: ignore\n except mutagen.MutagenError as e: # type: ignore\n raise UnsupportedFiletypeError(f\"Failed to open file: {e}\") from e\n if isinstance(m, mutagen.mp3.MP3):\n # ID3 returns trackno/discno tags as no/total. We have to parse.\n tracknumber = discnumber = tracktotal = disctotal = None\n if tracknos := _get_tag(m.tags, [\"TRCK\"]):\n try:\n tracknumber, tracktotalstr = tracknos.split(\"/\", 1)\n tracktotal = _parse_int(tracktotalstr)\n except ValueError:\n tracknumber = tracknos\n if discnos := _get_tag(m.tags, [\"TPOS\"]):\n try:\n discnumber, disctotalstr = discnos.split(\"/\", 1)\n disctotal = _parse_int(disctotalstr)\n except ValueError:\n discnumber = discnos\n\n def _get_paired_frame(x: str) -> str | None:\n if not m.tags:\n return None\n for tag in [\"TIPL\", \"IPLS\"]:\n try:\n frame = m.tags[tag]\n except KeyError:\n continue\n return r\" \\\\ \".join([p[1] for p in frame.people if p[0].lower() == x.lower()])\n return None\n\n return AudioTags(\n id=_get_tag(m.tags, [\"TXXX:ROSEID\"]),\n release_id=_get_tag(m.tags, [\"TXXX:ROSERELEASEID\"]),\n title=_get_tag(m.tags, [\"TIT2\"]),\n year=_parse_year(_get_tag(m.tags, [\"TDRC\", \"TYER\"])),\n tracknumber=tracknumber,\n tracktotal=tracktotal,\n discnumber=discnumber,\n disctotal=disctotal,\n album=_get_tag(m.tags, [\"TALB\"]),\n genre=_split_tag(_get_tag(m.tags, [\"TCON\"], split=True)),\n label=_split_tag(_get_tag(m.tags, [\"TPUB\"], split=True)),\n releasetype=_normalize_rtype(_get_tag(m.tags, [\"TXXX:RELEASETYPE\"], first=True)),\n albumartists=parse_artist_string(main=_get_tag(m.tags, [\"TPE2\"], split=True)),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"TPE1\"], split=True),\n remixer=_get_tag(m.tags, [\"TPE4\"], split=True),\n composer=_get_tag(m.tags, [\"TCOM\"], split=True),\n conductor=_get_tag(m.tags, [\"TPE3\"], split=True),\n producer=_get_paired_frame(\"producer\"),\n dj=_get_paired_frame(\"DJ-mix\"),\n ),\n duration_sec=round(m.info.length),\n path=p,\n )\n if isinstance(m, mutagen.mp4.MP4):\n tracknumber = discnumber = tracktotal = disctotal = None\n with contextlib.suppress(ValueError):\n tracknumber, tracktotalstr = _get_tuple_tag(m.tags, [\"trkn\"]) # type: ignore\n tracktotal = _parse_int(tracktotalstr)\n with contextlib.suppress(ValueError):\n discnumber, disctotalstr = _get_tuple_tag(m.tags, [\"disk\"]) # type: ignore\n disctotal = _parse_int(disctotalstr)\n\n return AudioTags(\n id=_get_tag(m.tags, [\"----:net.sunsetglow.rose:ID\"]),\n release_id=_get_tag(m.tags, [\"----:net.sunsetglow.rose:RELEASEID\"]),\n title=_get_tag(m.tags, [\"\\xa9nam\"]),\n year=_parse_year(_get_tag(m.tags, [\"\\xa9day\"])),\n tracknumber=str(tracknumber),\n tracktotal=tracktotal,\n discnumber=str(discnumber),\n disctotal=disctotal,\n album=_get_tag(m.tags, [\"\\xa9alb\"]),\n genre=_split_tag(_get_tag(m.tags, [\"\\xa9gen\"], split=True)),\n label=_split_tag(_get_tag(m.tags, [\"----:com.apple.iTunes:LABEL\"], split=True)),\n releasetype=_normalize_rtype(\n _get_tag(m.tags, [\"----:com.apple.iTunes:RELEASETYPE\"], first=True)\n ),\n albumartists=parse_artist_string(main=_get_tag(m.tags, [\"aART\"], split=True)),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"\\xa9ART\"], split=True),\n remixer=_get_tag(m.tags, [\"----:com.apple.iTunes:REMIXER\"], split=True),\n producer=_get_tag(m.tags, [\"----:com.apple.iTunes:PRODUCER\"], split=True),\n composer=_get_tag(m.tags, [\"\\xa9wrt\"], split=True),\n conductor=_get_tag(m.tags, [\"----:com.apple.iTunes:CONDUCTOR\"], split=True),\n dj=_get_tag(m.tags, [\"----:com.apple.iTunes:DJMIXER\"], split=True),\n ),\n duration_sec=round(m.info.length), # type: ignore\n path=p,\n )\n if isinstance(m, (mutagen.flac.FLAC, mutagen.oggvorbis.OggVorbis, mutagen.oggopus.OggOpus)):\n return AudioTags(\n id=_get_tag(m.tags, [\"roseid\"]),\n release_id=_get_tag(m.tags, [\"rosereleaseid\"]),\n title=_get_tag(m.tags, [\"title\"]),\n year=_parse_year(_get_tag(m.tags, [\"date\", \"year\"])),\n tracknumber=_get_tag(m.tags, [\"tracknumber\"], first=True),\n tracktotal=_parse_int(_get_tag(m.tags, [\"tracktotal\"], first=True)),\n discnumber=_get_tag(m.tags, [\"discnumber\"], first=True),\n disctotal=_parse_int(_get_tag(m.tags, [\"disctotal\"], first=True)),\n album=_get_tag(m.tags, [\"album\"]),\n genre=_split_tag(_get_tag(m.tags, [\"genre\"], split=True)),\n label=_split_tag(\n _get_tag(m.tags, [\"organization\", \"label\", \"recordlabel\"], split=True)\n ),\n releasetype=_normalize_rtype(_get_tag(m.tags, [\"releasetype\"], first=True)),\n albumartists=parse_artist_string(\n main=_get_tag(m.tags, [\"albumartist\"], split=True)\n ),\n trackartists=parse_artist_string(\n main=_get_tag(m.tags, [\"artist\"], split=True),\n remixer=_get_tag(m.tags, [\"remixer\"], split=True),\n producer=_get_tag(m.tags, [\"producer\"], split=True),\n composer=_get_tag(m.tags, [\"composer\"], split=True),\n conductor=_get_tag(m.tags, [\"conductor\"], split=True),\n dj=_get_tag(m.tags, [\"djmixer\"], split=True),\n ),\n duration_sec=round(m.info.length), # type: ignore\n path=p,\n )\n raise UnsupportedFiletypeError(f\"{p} is not a supported audio file\")\n\n @no_type_check\n def flush(self, *, validate: bool = True) -> None:\n \"\"\"Flush the current tags to the file on disk.\"\"\"\n m = mutagen.File(self.path)\n if not validate and \"pytest\" not in sys.modules:\n raise Exception(\"Validate can only be turned off by tests.\")\n\n self.releasetype = (self.releasetype or \"unknown\").lower()\n if validate and self.releasetype not in SUPPORTED_RELEASE_TYPES:\n raise UnsupportedTagValueTypeError(\n f\"Release type {self.releasetype} is not a supported release type.\\n\"\n f\"Supported release types: {', '.join(SUPPORTED_RELEASE_TYPES)}\"\n )\n\n if isinstance(m, mutagen.mp3.MP3):\n if m.tags is None:\n m.tags = mutagen.id3.ID3()\n\n def _write_standard_tag(key: str, value: str | None) -> None:\n m.tags.delall(key)\n frame = getattr(mutagen.id3, key)(text=value)\n if value:\n m.tags.add(frame)\n\n def _write_tag_with_description(name: str, value: str | None) -> None:\n key, desc = name.split(\":\", 1)\n # Since the ID3 tags work with the shared prefix key before `:`, manually preserve\n # the other tags with the shared prefix key.\n keep_fields = [f for f in m.tags.getall(key) if getattr(f, \"desc\", None) != desc]\n m.tags.delall(key)\n if value:\n frame = getattr(mutagen.id3, key)(desc=desc, text=value)\n m.tags.add(frame)\n for f in keep_fields:\n m.tags.add(f)\n\n _write_tag_with_description(\"TXXX:ROSEID\", self.id)\n _write_tag_with_description(\"TXXX:ROSERELEASEID\", self.release_id)\n _write_standard_tag(\"TIT2\", self.title)\n _write_standard_tag(\"TDRC\", str(self.year).zfill(4))\n _write_standard_tag(\"TRCK\", self.tracknumber)\n _write_standard_tag(\"TPOS\", self.discnumber)\n _write_standard_tag(\"TALB\", self.album)\n _write_standard_tag(\"TCON\", \";\".join(self.genre))\n _write_standard_tag(\"TPUB\", \";\".join(self.label))\n _write_tag_with_description(\"TXXX:RELEASETYPE\", self.releasetype)\n _write_standard_tag(\"TPE2\", format_artist_string(self.albumartists))\n _write_standard_tag(\"TPE1\", format_artist_string(self.trackartists))\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n m.tags.delall(\"TPE4\")\n m.tags.delall(\"TCOM\")\n m.tags.delall(\"TPE3\")\n # Delete all paired text frames, since these represent additional artist roles. We don't\n # want to preserve them.\n m.tags.delall(\"TIPL\")\n m.tags.delall(\"IPLS\")\n m.save()\n return\n if isinstance(m, mutagen.mp4.MP4):\n if m.tags is None:\n m.tags = mutagen.mp4.MP4Tags()\n m.tags[\"----:net.sunsetglow.rose:ID\"] = (self.id or \"\").encode()\n m.tags[\"----:net.sunsetglow.rose:RELEASEID\"] = (self.release_id or \"\").encode()\n m.tags[\"\\xa9nam\"] = self.title or \"\"\n m.tags[\"\\xa9day\"] = str(self.year).zfill(4)\n m.tags[\"\\xa9alb\"] = self.album or \"\"\n m.tags[\"\\xa9gen\"] = \";\".join(self.genre)\n m.tags[\"----:com.apple.iTunes:LABEL\"] = \";\".join(self.label).encode()\n m.tags[\"----:com.apple.iTunes:RELEASETYPE\"] = self.releasetype.encode()\n m.tags[\"aART\"] = format_artist_string(self.albumartists)\n m.tags[\"\\xa9ART\"] = format_artist_string(self.trackartists)\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:REMIXER\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:PRODUCER\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"\\xa9wrt\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:CONDUCTOR\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"----:com.apple.iTunes:DJMIXER\"]\n\n # The track and disc numbers in MP4 are a bit annoying, because they must be a\n # single-element list of 2-tuple ints. We preserve the previous tracktotal/disctotal (as\n # Rose does not care about those values), and then attempt to write our own tracknumber\n # and discnumber.\n try:\n prev_tracktotal = m.tags[\"trkn\"][0][1]\n except (KeyError, IndexError):\n prev_tracktotal = 1\n try:\n prev_disctotal = m.tags[\"disk\"][0][1]\n except (KeyError, IndexError):\n prev_disctotal = 1\n try:\n m.tags[\"trkn\"] = [(int(self.tracknumber or \"0\"), prev_tracktotal)]\n m.tags[\"disk\"] = [(int(self.discnumber or \"0\"), prev_disctotal)]\n except ValueError as e:\n raise UnsupportedTagValueTypeError(\n \"Could not write m4a trackno/discno tags: must be integers. \"\n f\"Got: {self.tracknumber=} / {self.discnumber=}\"\n ) from e\n\n m.save()\n return\n if isinstance(m, (mutagen.flac.FLAC, mutagen.oggvorbis.OggVorbis, mutagen.oggopus.OggOpus)):\n if m.tags is None:\n if isinstance(m, mutagen.flac.FLAC):\n m.tags = mutagen.flac.VCFLACDict()\n elif isinstance(m, mutagen.oggvorbis.OggVorbis):\n m.tags = mutagen.oggvorbis.OggVCommentDict()\n else:\n m.tags = mutagen.oggopus.OggOpusVComment()\n assert not isinstance(m.tags, mutagen.flac.MetadataBlock)\n m.tags[\"roseid\"] = self.id or \"\"\n m.tags[\"rosereleaseid\"] = self.release_id or \"\"\n m.tags[\"title\"] = self.title or \"\"\n m.tags[\"date\"] = str(self.year).zfill(4)\n m.tags[\"tracknumber\"] = self.tracknumber or \"\"\n m.tags[\"discnumber\"] = self.discnumber or \"\"\n m.tags[\"album\"] = self.album or \"\"\n m.tags[\"genre\"] = \";\".join(self.genre)\n m.tags[\"organization\"] = \";\".join(self.label)\n m.tags[\"releasetype\"] = self.releasetype\n m.tags[\"albumartist\"] = format_artist_string(self.albumartists)\n m.tags[\"artist\"] = format_artist_string(self.trackartists)\n # Wipe the alt. role artist tags, since we encode the full artist into the main tag.\n with contextlib.suppress(KeyError):\n del m.tags[\"remixer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"producer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"composer\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"conductor\"]\n with contextlib.suppress(KeyError):\n del m.tags[\"djmixer\"]\n m.save()\n return\n\n raise RoseError(f\"Impossible: unknown mutagen type: {type(m)=} ({repr(m)=})\")" }, { "identifier": "CACHE_SCHEMA_PATH", "path": "rose/cache.py", "snippet": "CACHE_SCHEMA_PATH = Path(__file__).resolve().parent / \"cache.sql\"" }, { "identifier": "STORED_DATA_FILE_REGEX", "path": "rose/cache.py", "snippet": "STORED_DATA_FILE_REGEX = re.compile(r\"\\.rose\\.([^.]+)\\.toml\")" }, { "identifier": "CachedCollage", "path": "rose/cache.py", "snippet": "class CachedCollage:\n name: str\n source_mtime: str\n release_ids: list[str]" }, { "identifier": "CachedPlaylist", "path": "rose/cache.py", "snippet": "class CachedPlaylist:\n name: str\n source_mtime: str\n cover_path: Path | None\n track_ids: list[str]" }, { "identifier": "CachedRelease", "path": "rose/cache.py", "snippet": "class CachedRelease:\n id: str\n source_path: Path\n cover_image_path: Path | None\n added_at: str # ISO8601 timestamp\n datafile_mtime: str\n albumtitle: str\n releasetype: str\n year: int | None\n new: bool\n disctotal: int\n genres: list[str]\n labels: list[str]\n albumartists: ArtistMapping\n metahash: str\n\n @classmethod\n def from_view(cls, c: Config, row: dict[str, Any], aliases: bool = True) -> CachedRelease:\n return CachedRelease(\n id=row[\"id\"],\n source_path=Path(row[\"source_path\"]),\n cover_image_path=Path(row[\"cover_image_path\"]) if row[\"cover_image_path\"] else None,\n added_at=row[\"added_at\"],\n datafile_mtime=row[\"datafile_mtime\"],\n albumtitle=row[\"albumtitle\"],\n releasetype=row[\"releasetype\"],\n year=row[\"year\"],\n disctotal=row[\"disctotal\"],\n new=bool(row[\"new\"]),\n genres=_split(row[\"genres\"]) if row[\"genres\"] else [],\n labels=_split(row[\"labels\"]) if row[\"labels\"] else [],\n albumartists=_unpack_artists(\n c, row[\"albumartist_names\"], row[\"albumartist_roles\"], aliases=aliases\n ),\n metahash=row[\"metahash\"],\n )\n\n def dump(self) -> dict[str, Any]:\n return {\n \"id\": self.id,\n \"source_path\": str(self.source_path.resolve()),\n \"cover_image_path\": str(self.cover_image_path.resolve())\n if self.cover_image_path\n else None,\n \"added_at\": self.added_at,\n \"albumtitle\": self.albumtitle,\n \"releasetype\": self.releasetype,\n \"year\": self.year,\n \"new\": self.new,\n \"disctotal\": self.disctotal,\n \"genres\": self.genres,\n \"labels\": self.labels,\n \"albumartists\": self.albumartists.dump(),\n }" }, { "identifier": "CachedTrack", "path": "rose/cache.py", "snippet": "class CachedTrack:\n id: str\n source_path: Path\n source_mtime: str\n tracktitle: str\n tracknumber: str\n tracktotal: int\n discnumber: str\n disctotal: int\n duration_seconds: int\n trackartists: ArtistMapping\n metahash: str\n\n release: CachedRelease\n\n @classmethod\n def from_view(\n cls, c: Config, row: dict[str, Any], release: CachedRelease, aliases: bool = True\n ) -> CachedTrack:\n return CachedTrack(\n id=row[\"id\"],\n source_path=Path(row[\"source_path\"]),\n source_mtime=row[\"source_mtime\"],\n tracktitle=row[\"tracktitle\"],\n tracknumber=row[\"tracknumber\"],\n tracktotal=row[\"tracktotal\"],\n discnumber=row[\"discnumber\"],\n disctotal=row[\"disctotal\"],\n duration_seconds=row[\"duration_seconds\"],\n trackartists=_unpack_artists(\n c,\n row[\"trackartist_names\"],\n row[\"trackartist_roles\"],\n aliases=aliases,\n ),\n metahash=row[\"metahash\"],\n release=release,\n )\n\n def dump(self, with_release_info: bool = True) -> dict[str, Any]:\n r = {\n \"id\": self.id,\n \"source_path\": str(self.source_path.resolve()),\n \"tracktitle\": self.tracktitle,\n \"tracknumber\": self.tracknumber,\n \"tracktotal\": self.tracktotal,\n \"discnumber\": self.discnumber,\n \"disctotal\": self.disctotal,\n \"duration_seconds\": self.duration_seconds,\n \"trackartists\": self.trackartists.dump(),\n }\n if with_release_info:\n r.update(\n {\n \"release_id\": self.release.id,\n \"added_at\": self.release.added_at,\n \"albumtitle\": self.release.albumtitle,\n \"releasetype\": self.release.releasetype,\n \"year\": self.release.year,\n \"new\": self.release.new,\n \"genres\": self.release.genres,\n \"labels\": self.release.labels,\n \"albumartists\": self.release.albumartists.dump(),\n }\n )\n return r" }, { "identifier": "_unpack", "path": "rose/cache.py", "snippet": "def _unpack(*xxs: str) -> Iterator[tuple[str, ...]]:\n \"\"\"\n Unpack an arbitrary number of strings, each of which is a \" ¬ \"-delimited list in actuality,\n but encoded as a string. This \" ¬ \"-delimited list-as-a-string is the convention we use to\n return arrayed data from a SQL query without introducing additional disk accesses.\n\n As a concrete example:\n\n >>> _unpack(\"Rose ¬ Lisa ¬ Jisoo ¬ Jennie\", \"vocal ¬ dance ¬ visual ¬ vocal\")\n [(\"Rose\", \"vocal\"), (\"Lisa\", \"dance\"), (\"Jisoo\", \"visual\"), (\"Jennie\", \"vocal\")]\n \"\"\"\n # If the strings are empty, then split will resolve to `[\"\"]`. But we don't want to loop over an\n # empty string, so we specially exit if we hit that case.\n if all(not xs for xs in xxs):\n return []\n yield from zip(*[_split(xs) for xs in xxs])" }, { "identifier": "artist_exists", "path": "rose/cache.py", "snippet": "def artist_exists(c: Config, artist_sanitized: str) -> bool:\n args: list[str] = [artist_sanitized]\n for alias in c.sanitized_artist_aliases_map.get(artist_sanitized, []):\n args.append(alias)\n with connect(c) as conn:\n cursor = conn.execute(\n f\"\"\"\n SELECT EXISTS(\n SELECT * FROM releases_artists\n WHERE artist_sanitized IN ({','.join(['?']*len(args))})\n )\n \"\"\",\n args,\n )\n return bool(cursor.fetchone()[0])" }, { "identifier": "connect", "path": "rose/cache.py", "snippet": "@contextlib.contextmanager\ndef connect(c: Config) -> Iterator[sqlite3.Connection]:\n conn = sqlite3.connect(\n c.cache_database_path,\n detect_types=sqlite3.PARSE_DECLTYPES,\n isolation_level=None,\n timeout=15.0,\n )\n try:\n conn.row_factory = sqlite3.Row\n conn.execute(\"PRAGMA foreign_keys=ON\")\n conn.execute(\"PRAGMA journal_mode=WAL\")\n yield conn\n finally:\n if conn:\n conn.close()" }, { "identifier": "genre_exists", "path": "rose/cache.py", "snippet": "def genre_exists(c: Config, genre_sanitized: str) -> bool:\n with connect(c) as conn:\n cursor = conn.execute(\n \"SELECT EXISTS(SELECT * FROM releases_genres WHERE genre_sanitized = ?)\",\n (genre_sanitized,),\n )\n return bool(cursor.fetchone()[0])" }, { "identifier": "get_collage", "path": "rose/cache.py", "snippet": "def get_collage(c: Config, collage_name: str) -> tuple[CachedCollage, list[CachedRelease]] | None:\n with connect(c) as conn:\n cursor = conn.execute(\n \"SELECT name, source_mtime FROM collages WHERE name = ?\",\n (collage_name,),\n )\n row = cursor.fetchone()\n if not row:\n return None\n collage = CachedCollage(\n name=row[\"name\"],\n source_mtime=row[\"source_mtime\"],\n # Accumulated below when we query the releases.\n release_ids=[],\n )\n cursor = conn.execute(\n \"\"\"\n SELECT r.*\n FROM releases_view r\n JOIN collages_releases cr ON cr.release_id = r.id\n WHERE cr.collage_name = ? AND NOT cr.missing\n ORDER BY cr.position ASC\n \"\"\",\n (collage_name,),\n )\n releases: list[CachedRelease] = []\n for row in cursor:\n collage.release_ids.append(row[\"id\"])\n releases.append(CachedRelease.from_view(c, row))\n\n return (collage, releases)" }, { "identifier": "get_playlist", "path": "rose/cache.py", "snippet": "def get_playlist(c: Config, playlist_name: str) -> tuple[CachedPlaylist, list[CachedTrack]] | None:\n with connect(c) as conn:\n cursor = conn.execute(\n \"\"\"\n SELECT\n name\n , source_mtime\n , cover_path\n FROM playlists\n WHERE name = ?\n \"\"\",\n (playlist_name,),\n )\n row = cursor.fetchone()\n if not row:\n return None\n playlist = CachedPlaylist(\n name=row[\"name\"],\n source_mtime=row[\"source_mtime\"],\n cover_path=Path(row[\"cover_path\"]) if row[\"cover_path\"] else None,\n # Accumulated below when we query the tracks.\n track_ids=[],\n )\n\n cursor = conn.execute(\n \"\"\"\n SELECT t.*\n FROM tracks_view t\n JOIN playlists_tracks pt ON pt.track_id = t.id\n WHERE pt.playlist_name = ? AND NOT pt.missing\n ORDER BY pt.position ASC\n \"\"\",\n (playlist_name,),\n )\n trackrows = cursor.fetchall()\n\n release_ids = [r[\"release_id\"] for r in trackrows]\n cursor = conn.execute(\n f\"\"\"\n SELECT *\n FROM releases_view\n WHERE id IN ({','.join(['?']*len(release_ids))})\n \"\"\",\n release_ids,\n )\n releases_map: dict[str, CachedRelease] = {}\n for row in cursor:\n releases_map[row[\"id\"]] = CachedRelease.from_view(c, row)\n\n tracks: list[CachedTrack] = []\n for row in trackrows:\n playlist.track_ids.append(row[\"id\"])\n tracks.append(CachedTrack.from_view(c, row, releases_map[row[\"release_id\"]]))\n\n return playlist, tracks" }, { "identifier": "get_release", "path": "rose/cache.py", "snippet": "def get_release(c: Config, release_id: str) -> CachedRelease | None:\n with connect(c) as conn:\n cursor = conn.execute(\n \"SELECT * FROM releases_view WHERE id = ?\",\n (release_id,),\n )\n row = cursor.fetchone()\n if not row:\n return None\n return CachedRelease.from_view(c, row)" }, { "identifier": "get_release_logtext", "path": "rose/cache.py", "snippet": "def get_release_logtext(c: Config, release_id: str) -> str | None:\n \"\"\"Get a human-readable identifier for a release suitable for logging.\"\"\"\n with connect(c) as conn:\n cursor = conn.execute(\n \"SELECT albumtitle, year, albumartist_names, albumartist_roles FROM releases_view WHERE id = ?\",\n (release_id,),\n )\n row = cursor.fetchone()\n if not row:\n return None\n return calculate_release_logtext(\n title=row[\"albumtitle\"],\n year=row[\"year\"],\n artists=_unpack_artists(c, row[\"albumartist_names\"], row[\"albumartist_roles\"]),\n )" }, { "identifier": "get_track", "path": "rose/cache.py", "snippet": "def get_track(c: Config, uuid: str) -> CachedTrack | None:\n with connect(c) as conn:\n cursor = conn.execute(\"SELECT * FROM tracks_view WHERE id = ?\", (uuid,))\n trackrow = cursor.fetchone()\n if not trackrow:\n return None\n cursor = conn.execute(\"SELECT * FROM releases_view WHERE id = ?\", (trackrow[\"release_id\"],))\n release = CachedRelease.from_view(c, cursor.fetchone())\n return CachedTrack.from_view(c, trackrow, release)" }, { "identifier": "get_track_logtext", "path": "rose/cache.py", "snippet": "def get_track_logtext(c: Config, track_id: str) -> str | None:\n \"\"\"Get a human-readable identifier for a track suitable for logging.\"\"\"\n with connect(c) as conn:\n cursor = conn.execute(\n \"SELECT tracktitle, source_path, trackartist_names, trackartist_roles FROM tracks_view WHERE id = ?\",\n (track_id,),\n )\n row = cursor.fetchone()\n if not row:\n return None\n return calculate_track_logtext(\n title=row[\"tracktitle\"],\n artists=_unpack_artists(c, row[\"trackartist_names\"], row[\"trackartist_roles\"]),\n suffix=Path(row[\"source_path\"]).suffix,\n )" }, { "identifier": "get_tracks_associated_with_release", "path": "rose/cache.py", "snippet": "def get_tracks_associated_with_release(\n c: Config,\n release: CachedRelease,\n) -> list[CachedTrack]:\n with connect(c) as conn:\n cursor = conn.execute(\n \"\"\"\n SELECT *\n FROM tracks_view\n WHERE release_id = ?\n ORDER BY release_id, FORMAT('%4d.%4d', discnumber, tracknumber)\n \"\"\",\n (release.id,),\n )\n rval = []\n for row in cursor:\n rval.append(CachedTrack.from_view(c, row, release))\n return rval" }, { "identifier": "get_tracks_associated_with_releases", "path": "rose/cache.py", "snippet": "def get_tracks_associated_with_releases(\n c: Config,\n releases: list[CachedRelease],\n) -> list[tuple[CachedRelease, list[CachedTrack]]]:\n releases_map = {r.id: r for r in releases}\n tracks_map: dict[str, list[CachedTrack]] = defaultdict(list)\n with connect(c) as conn:\n cursor = conn.execute(\n f\"\"\"\n SELECT *\n FROM tracks_view\n WHERE release_id IN ({','.join(['?']*len(releases))})\n ORDER BY release_id, FORMAT('%4d.%4d', discnumber, tracknumber)\n \"\"\",\n [r.id for r in releases],\n )\n for row in cursor:\n tracks_map[row[\"release_id\"]].append(\n CachedTrack.from_view(c, row, releases_map[row[\"release_id\"]])\n )\n\n rval = []\n for release in releases:\n tracks = tracks_map[release.id]\n rval.append((release, tracks))\n return rval" }, { "identifier": "label_exists", "path": "rose/cache.py", "snippet": "def label_exists(c: Config, label_sanitized: str) -> bool:\n with connect(c) as conn:\n cursor = conn.execute(\n \"SELECT EXISTS(SELECT * FROM releases_labels WHERE label_sanitized = ?)\",\n (label_sanitized,),\n )\n return bool(cursor.fetchone()[0])" }, { "identifier": "list_artists", "path": "rose/cache.py", "snippet": "def list_artists(c: Config) -> list[tuple[str, str]]:\n with connect(c) as conn:\n cursor = conn.execute(\"SELECT DISTINCT artist, artist_sanitized FROM releases_artists\")\n return [(row[\"artist\"], row[\"artist_sanitized\"]) for row in cursor]" }, { "identifier": "list_collages", "path": "rose/cache.py", "snippet": "def list_collages(c: Config) -> list[str]:\n with connect(c) as conn:\n cursor = conn.execute(\"SELECT DISTINCT name FROM collages\")\n return [r[\"name\"] for r in cursor]" }, { "identifier": "list_genres", "path": "rose/cache.py", "snippet": "def list_genres(c: Config) -> list[tuple[str, str]]:\n with connect(c) as conn:\n cursor = conn.execute(\"SELECT DISTINCT genre, genre_sanitized FROM releases_genres\")\n return [(row[\"genre\"], row[\"genre_sanitized\"]) for row in cursor]" }, { "identifier": "list_labels", "path": "rose/cache.py", "snippet": "def list_labels(c: Config) -> list[tuple[str, str]]:\n with connect(c) as conn:\n cursor = conn.execute(\"SELECT DISTINCT label, label_sanitized FROM releases_labels\")\n return [(row[\"label\"], row[\"label_sanitized\"]) for row in cursor]" }, { "identifier": "list_playlists", "path": "rose/cache.py", "snippet": "def list_playlists(c: Config) -> list[str]:\n with connect(c) as conn:\n cursor = conn.execute(\"SELECT DISTINCT name FROM playlists\")\n return [r[\"name\"] for r in cursor]" }, { "identifier": "list_releases", "path": "rose/cache.py", "snippet": "def list_releases(c: Config, release_ids: list[str] | None = None) -> list[CachedRelease]:\n \"\"\"Fetch data associated with given release IDs. Pass None to fetch all.\"\"\"\n query = \"SELECT * FROM releases_view\"\n args = []\n if release_ids is not None:\n query += f\" WHERE id IN ({','.join(['?']*len(release_ids))})\"\n args = release_ids\n query += \" ORDER BY source_path\"\n with connect(c) as conn:\n cursor = conn.execute(query, args)\n releases: list[CachedRelease] = []\n for row in cursor:\n releases.append(CachedRelease.from_view(c, row))\n return releases" }, { "identifier": "list_tracks", "path": "rose/cache.py", "snippet": "def list_tracks(c: Config, track_ids: list[str] | None = None) -> list[CachedTrack]:\n \"\"\"Fetch data associated with given track IDs. Pass None to fetch all.\"\"\"\n query = \"SELECT * FROM tracks_view\"\n args = []\n if track_ids is not None:\n query += f\" WHERE id IN ({','.join(['?']*len(track_ids))})\"\n args = track_ids\n query += \" ORDER BY source_path\"\n with connect(c) as conn:\n cursor = conn.execute(query, args)\n trackrows = cursor.fetchall()\n\n release_ids = [r[\"release_id\"] for r in trackrows]\n cursor = conn.execute(\n f\"\"\"\n SELECT *\n FROM releases_view\n WHERE id IN ({','.join(['?']*len(release_ids))})\n \"\"\",\n release_ids,\n )\n releases_map: dict[str, CachedRelease] = {}\n for row in cursor:\n releases_map[row[\"id\"]] = CachedRelease.from_view(c, row)\n\n rval = []\n for row in trackrows:\n rval.append(CachedTrack.from_view(c, row, releases_map[row[\"release_id\"]]))\n return rval" }, { "identifier": "lock", "path": "rose/cache.py", "snippet": "@contextlib.contextmanager\ndef lock(c: Config, name: str, timeout: float = 1.0) -> Iterator[None]:\n try:\n while True:\n with connect(c) as conn:\n cursor = conn.execute(\"SELECT MAX(valid_until) FROM locks WHERE name = ?\", (name,))\n row = cursor.fetchone()\n # If a lock exists, sleep until the lock is available. All locks should be very\n # short lived, so this shouldn't be a big performance penalty.\n if row and row[0] and row[0] > time.time():\n sleep = max(0, row[0] - time.time())\n logger.debug(f\"Failed to acquire lock for {name}: sleeping for {sleep}\")\n time.sleep(sleep)\n continue\n logger.debug(f\"Attempting to acquire lock for {name} with timeout {timeout}\")\n valid_until = time.time() + timeout\n try:\n conn.execute(\n \"INSERT INTO locks (name, valid_until) VALUES (?, ?)\", (name, valid_until)\n )\n except sqlite3.IntegrityError as e:\n logger.debug(f\"Failed to acquire lock for {name}, trying again: {e}\")\n continue\n logger.debug(\n f\"Successfully acquired lock for {name} with timeout {timeout} \"\n f\"until {valid_until}\"\n )\n break\n yield\n finally:\n logger.debug(f\"Releasing lock {name}\")\n with connect(c) as conn:\n conn.execute(\"DELETE FROM locks WHERE name = ?\", (name,))" }, { "identifier": "maybe_invalidate_cache_database", "path": "rose/cache.py", "snippet": "def maybe_invalidate_cache_database(c: Config) -> None:\n \"\"\"\n \"Migrate\" the database. If the schema in the database does not match that on disk, then nuke the\n database and recreate it from scratch. Otherwise, no op.\n\n We can do this because the database is just a read cache. It is not source-of-truth for any of\n its own data.\n \"\"\"\n with CACHE_SCHEMA_PATH.open(\"rb\") as fp:\n schema_hash = hashlib.sha256(fp.read()).hexdigest()\n\n # Hash a subset of the config fields to use as the cache hash, which invalidates the cache on\n # change. These are the fields that affect cache population. Invalidating the cache on config\n # change ensures that the cache is consistent with the config.\n config_hash_fields = {\n \"music_source_dir\": str(c.music_source_dir),\n \"cache_dir\": str(c.cache_dir),\n \"cover_art_stems\": c.cover_art_stems,\n \"valid_art_exts\": c.valid_art_exts,\n \"ignore_release_directories\": c.ignore_release_directories,\n }\n config_hash = sha256(json.dumps(config_hash_fields).encode()).hexdigest()\n\n with connect(c) as conn:\n cursor = conn.execute(\n \"\"\"\n SELECT EXISTS(\n SELECT * FROM sqlite_master\n WHERE type = 'table' AND name = '_schema_hash'\n )\n \"\"\"\n )\n if cursor.fetchone()[0]:\n cursor = conn.execute(\"SELECT schema_hash, config_hash, version FROM _schema_hash\")\n row = cursor.fetchone()\n if (\n row\n and row[\"schema_hash\"] == schema_hash\n and row[\"config_hash\"] == config_hash\n and row[\"version\"] == VERSION\n ):\n # Everything matches! Exit!\n return\n\n c.cache_database_path.unlink(missing_ok=True)\n with connect(c) as conn:\n with CACHE_SCHEMA_PATH.open(\"r\") as fp:\n conn.executescript(fp.read())\n conn.execute(\n \"\"\"\n CREATE TABLE _schema_hash (\n schema_hash TEXT\n , config_hash TEXT\n , version TEXT\n , PRIMARY KEY (schema_hash, config_hash, version)\n )\n \"\"\"\n )\n conn.execute(\n \"INSERT INTO _schema_hash (schema_hash, config_hash, version) VALUES (?, ?, ?)\",\n (schema_hash, config_hash, VERSION),\n )" }, { "identifier": "update_cache", "path": "rose/cache.py", "snippet": "def update_cache(\n c: Config,\n force: bool = False,\n # For testing.\n force_multiprocessing: bool = False,\n) -> None:\n \"\"\"\n Update the read cache to match the data for all releases in the music source directory. Delete\n any cached releases that are no longer present on disk.\n \"\"\"\n update_cache_for_releases(c, None, force, force_multiprocessing=force_multiprocessing)\n update_cache_evict_nonexistent_releases(c)\n update_cache_for_collages(c, None, force)\n update_cache_evict_nonexistent_collages(c)\n update_cache_for_playlists(c, None, force)\n update_cache_evict_nonexistent_playlists(c)" }, { "identifier": "update_cache_evict_nonexistent_releases", "path": "rose/cache.py", "snippet": "def update_cache_evict_nonexistent_releases(c: Config) -> None:\n logger.debug(\"Evicting cached releases that are not on disk\")\n dirs = [Path(d.path).resolve() for d in os.scandir(c.music_source_dir) if d.is_dir()]\n with connect(c) as conn:\n cursor = conn.execute(\n f\"\"\"\n DELETE FROM releases\n WHERE source_path NOT IN ({\",\".join([\"?\"] * len(dirs))})\n RETURNING source_path\n \"\"\",\n [str(d) for d in dirs],\n )\n for row in cursor:\n logger.info(f\"Evicted missing release {row['source_path']} from cache\")" }, { "identifier": "update_cache_for_releases", "path": "rose/cache.py", "snippet": "def update_cache_for_releases(\n c: Config,\n # Leave as None to update all releases.\n release_dirs: list[Path] | None = None,\n force: bool = False,\n # For testing.\n force_multiprocessing: bool = False,\n) -> None:\n \"\"\"\n Update the read cache to match the data for any passed-in releases. If a directory lacks a\n .rose.{uuid}.toml datafile, create the datafile for the release and set it to the initial state.\n\n This is a hot path and is thus performance-optimized. The bottleneck is disk accesses, so we\n structure this function in order to minimize them. We solely read files that have changed since\n last run and batch writes together. We trade higher memory for reduced disk accesses.\n Concretely, we:\n\n 1. Execute one big SQL query at the start to fetch the relevant previous caches.\n 2. Skip reading a file's data if the mtime has not changed since the previous cache update.\n 3. Batch SQLite write operations to the end of this function, and only execute a SQLite upsert\n if the read data differs from the previous caches.\n\n We also shard the directories across multiple processes and execute them simultaneously.\n \"\"\"\n release_dirs = release_dirs or [\n Path(d.path) for d in os.scandir(c.music_source_dir) if d.is_dir()\n ]\n release_dirs = [\n d\n for d in release_dirs\n if d.name != \"!collages\"\n and d.name != \"!playlists\"\n and d.name not in c.ignore_release_directories\n ]\n if not release_dirs:\n logger.debug(\"No-Op: No whitelisted releases passed into update_cache_for_releases\")\n return\n logger.debug(f\"Refreshing the read cache for {len(release_dirs)} releases\")\n if len(release_dirs) < 10:\n logger.debug(f\"Refreshing cached data for {', '.join([r.name for r in release_dirs])}\")\n\n # If the number of releases changed is less than 50; do not bother with all that multiprocessing\n # gunk: instead, directly call the executor.\n #\n # This has an added benefit of not spawning processes from the virtual filesystem and watchdog\n # processes, as those processes always update the cache for one release at a time and are\n # multithreaded. Starting other processes from threads is bad!\n if not force_multiprocessing and len(release_dirs) < 50:\n logger.debug(\n f\"Running cache update executor in same process because {len(release_dirs)=} < 50\"\n )\n _update_cache_for_releases_executor(c, release_dirs, force)\n return\n\n # Batch size defaults to equal split across all processes. However, if the number of directories\n # is small, we shrink the # of processes to save on overhead.\n num_proc = c.max_proc\n if len(release_dirs) < c.max_proc * 50:\n num_proc = max(1, math.ceil(len(release_dirs) // 50))\n batch_size = len(release_dirs) // num_proc + 1\n\n manager = multiprocessing.Manager()\n # Have each process propagate the collages and playlists it wants to update back upwards. We\n # will dispatch the force updater only once in the main process, instead of many times in each\n # process.\n collages_to_force_update = manager.list()\n playlists_to_force_update = manager.list()\n\n errors: list[BaseException] = []\n\n logger.debug(\"Creating multiprocessing pool to parallelize cache executors.\")\n with multiprocessing.Pool(processes=c.max_proc) as pool:\n # At 0, no batch. At 1, 1 batch. At 49, 1 batch. At 50, 1 batch. At 51, 2 batches.\n for i in range(0, len(release_dirs), batch_size):\n logger.debug(\n f\"Spawning release cache update process for releases [{i}, {i+batch_size})\"\n )\n pool.apply_async(\n _update_cache_for_releases_executor,\n (\n c,\n release_dirs[i : i + batch_size],\n force,\n collages_to_force_update,\n playlists_to_force_update,\n ),\n error_callback=lambda e: errors.append(e),\n )\n pool.close()\n pool.join()\n\n if errors:\n raise ExceptionGroup(\"Exception occurred in cache update subprocesses\", errors) # type: ignore\n\n if collages_to_force_update:\n update_cache_for_collages(c, uniq(list(collages_to_force_update)), force=True)\n if playlists_to_force_update:\n update_cache_for_playlists(c, uniq(list(playlists_to_force_update)), force=True)" }, { "identifier": "VERSION", "path": "rose/common.py", "snippet": "VERSION = fp.read().strip()" }, { "identifier": "Artist", "path": "rose/common.py", "snippet": "class Artist:\n name: str\n alias: bool = False\n\n def __hash__(self) -> int:\n return hash((self.name, self.alias))" }, { "identifier": "ArtistMapping", "path": "rose/common.py", "snippet": "class ArtistMapping:\n main: list[Artist] = dataclasses.field(default_factory=list)\n guest: list[Artist] = dataclasses.field(default_factory=list)\n remixer: list[Artist] = dataclasses.field(default_factory=list)\n producer: list[Artist] = dataclasses.field(default_factory=list)\n composer: list[Artist] = dataclasses.field(default_factory=list)\n djmixer: list[Artist] = dataclasses.field(default_factory=list)\n\n @property\n def all(self) -> list[Artist]:\n return uniq(\n self.main + self.guest + self.remixer + self.producer + self.composer + self.djmixer\n )\n\n def dump(self) -> dict[str, Any]:\n return dataclasses.asdict(self)\n\n def items(self) -> Iterator[tuple[str, list[Artist]]]:\n yield \"main\", self.main\n yield \"guest\", self.guest\n yield \"remixer\", self.remixer\n yield \"producer\", self.producer\n yield \"composer\", self.composer\n yield \"djmixer\", self.djmixer" }, { "identifier": "Config", "path": "rose/config.py", "snippet": "class Config:\n music_source_dir: Path\n fuse_mount_dir: Path\n cache_dir: Path\n # Maximum parallel processes for cache updates. Defaults to nproc/2.\n max_proc: int\n ignore_release_directories: list[str]\n\n # A map from parent artist -> subartists.\n artist_aliases_map: dict[str, list[str]]\n # A map from subartist -> parent artists.\n artist_aliases_parents_map: dict[str, list[str]]\n\n fuse_artists_whitelist: list[str] | None\n fuse_genres_whitelist: list[str] | None\n fuse_labels_whitelist: list[str] | None\n fuse_artists_blacklist: list[str] | None\n fuse_genres_blacklist: list[str] | None\n fuse_labels_blacklist: list[str] | None\n\n cover_art_stems: list[str]\n valid_art_exts: list[str]\n\n rename_source_files: bool\n path_templates: PathTemplateConfig\n\n stored_metadata_rules: list[MetadataRule]\n\n @classmethod\n def parse(cls, config_path_override: Path | None = None) -> Config:\n # As we parse, delete consumed values from the data dictionary. If any are left over at the\n # end of the config, warn that unknown config keys were found.\n cfgpath = config_path_override or CONFIG_PATH\n cfgtext = \"\"\n try:\n with cfgpath.open(\"r\") as fp:\n cfgtext = fp.read()\n data = tomllib.loads(cfgtext)\n except FileNotFoundError as e:\n raise ConfigNotFoundError(f\"Configuration file not found ({cfgpath})\") from e\n except tomllib.TOMLDecodeError as e:\n raise ConfigDecodeError(\n f\"Failed to decode configuration file: invalid TOML: {e}\"\n ) from e\n\n try:\n music_source_dir = Path(data[\"music_source_dir\"]).expanduser()\n del data[\"music_source_dir\"]\n except KeyError as e:\n raise MissingConfigKeyError(\n f\"Missing key music_source_dir in configuration file ({cfgpath})\"\n ) from e\n except (ValueError, TypeError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for music_source_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n\n try:\n fuse_mount_dir = Path(data[\"fuse_mount_dir\"]).expanduser()\n del data[\"fuse_mount_dir\"]\n except KeyError as e:\n raise MissingConfigKeyError(\n f\"Missing key fuse_mount_dir in configuration file ({cfgpath})\"\n ) from e\n except (ValueError, TypeError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_mount_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n\n try:\n cache_dir = Path(data[\"cache_dir\"]).expanduser()\n del data[\"cache_dir\"]\n except KeyError:\n cache_dir = XDG_CACHE_ROSE\n except (TypeError, ValueError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for cache_dir in configuration file ({cfgpath}): must be a path\"\n ) from e\n cache_dir.mkdir(parents=True, exist_ok=True)\n\n try:\n max_proc = int(data[\"max_proc\"])\n del data[\"max_proc\"]\n if max_proc <= 0:\n raise ValueError(f\"must be a positive integer: got {max_proc}\")\n except KeyError:\n max_proc = max(1, multiprocessing.cpu_count() // 2)\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for max_proc in configuration file ({cfgpath}): must be a positive integer\"\n ) from e\n\n artist_aliases_map: dict[str, list[str]] = defaultdict(list)\n artist_aliases_parents_map: dict[str, list[str]] = defaultdict(list)\n try:\n for entry in data.get(\"artist_aliases\", []):\n if not isinstance(entry[\"artist\"], str):\n raise ValueError(f\"Artists must be of type str: got {type(entry['artist'])}\")\n artist_aliases_map[entry[\"artist\"]] = entry[\"aliases\"]\n if not isinstance(entry[\"aliases\"], list):\n raise ValueError(\n f\"Aliases must be of type list[str]: got {type(entry['aliases'])}\"\n )\n for s in entry[\"aliases\"]:\n if not isinstance(s, str):\n raise ValueError(f\"Each alias must be of type str: got {type(s)}\")\n artist_aliases_parents_map[s].append(entry[\"artist\"])\n with contextlib.suppress(KeyError):\n del data[\"artist_aliases\"]\n except (ValueError, TypeError, KeyError) as e:\n raise InvalidConfigValueError(\n f\"Invalid value for artist_aliases in configuration file ({cfgpath}): must be a list of {{ artist = str, aliases = list[str] }} records\"\n ) from e\n\n try:\n fuse_artists_whitelist = data[\"fuse_artists_whitelist\"]\n del data[\"fuse_artists_whitelist\"]\n if not isinstance(fuse_artists_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_artists_whitelist)}\")\n for s in fuse_artists_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each artist must be of type str: got {type(s)}\")\n except KeyError:\n fuse_artists_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_artists_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_genres_whitelist = data[\"fuse_genres_whitelist\"]\n del data[\"fuse_genres_whitelist\"]\n if not isinstance(fuse_genres_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_genres_whitelist)}\")\n for s in fuse_genres_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each genre must be of type str: got {type(s)}\")\n except KeyError:\n fuse_genres_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_genres_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_labels_whitelist = data[\"fuse_labels_whitelist\"]\n del data[\"fuse_labels_whitelist\"]\n if not isinstance(fuse_labels_whitelist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_labels_whitelist)}\")\n for s in fuse_labels_whitelist:\n if not isinstance(s, str):\n raise ValueError(f\"Each label must be of type str: got {type(s)}\")\n except KeyError:\n fuse_labels_whitelist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_labels_whitelist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_artists_blacklist = data[\"fuse_artists_blacklist\"]\n del data[\"fuse_artists_blacklist\"]\n if not isinstance(fuse_artists_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_artists_blacklist)}\")\n for s in fuse_artists_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each artist must be of type str: got {type(s)}\")\n except KeyError:\n fuse_artists_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_artists_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_genres_blacklist = data[\"fuse_genres_blacklist\"]\n del data[\"fuse_genres_blacklist\"]\n if not isinstance(fuse_genres_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_genres_blacklist)}\")\n for s in fuse_genres_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each genre must be of type str: got {type(s)}\")\n except KeyError:\n fuse_genres_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_genres_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n fuse_labels_blacklist = data[\"fuse_labels_blacklist\"]\n del data[\"fuse_labels_blacklist\"]\n if not isinstance(fuse_labels_blacklist, list):\n raise ValueError(f\"Must be a list[str]: got {type(fuse_labels_blacklist)}\")\n for s in fuse_labels_blacklist:\n if not isinstance(s, str):\n raise ValueError(f\"Each label must be of type str: got {type(s)}\")\n except KeyError:\n fuse_labels_blacklist = None\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for fuse_labels_blacklist in configuration file ({cfgpath}): {e}\"\n ) from e\n\n if fuse_artists_whitelist and fuse_artists_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_artists_whitelist and fuse_artists_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n if fuse_genres_whitelist and fuse_genres_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_genres_whitelist and fuse_genres_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n if fuse_labels_whitelist and fuse_labels_blacklist:\n raise InvalidConfigValueError(\n f\"Cannot specify both fuse_labels_whitelist and fuse_labels_blacklist in configuration file ({cfgpath}): must specify only one or the other\"\n )\n\n try:\n cover_art_stems = data[\"cover_art_stems\"]\n del data[\"cover_art_stems\"]\n if not isinstance(cover_art_stems, list):\n raise ValueError(f\"Must be a list[str]: got {type(cover_art_stems)}\")\n for s in cover_art_stems:\n if not isinstance(s, str):\n raise ValueError(f\"Each cover art stem must be of type str: got {type(s)}\")\n except KeyError:\n cover_art_stems = [\"folder\", \"cover\", \"art\", \"front\"]\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for cover_art_stems in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n valid_art_exts = data[\"valid_art_exts\"]\n del data[\"valid_art_exts\"]\n if not isinstance(valid_art_exts, list):\n raise ValueError(f\"Must be a list[str]: got {type(valid_art_exts)}\")\n for s in valid_art_exts:\n if not isinstance(s, str):\n raise ValueError(f\"Each art extension must be of type str: got {type(s)}\")\n except KeyError:\n valid_art_exts = [\"jpg\", \"jpeg\", \"png\"]\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for valid_art_exts in configuration file ({cfgpath}): {e}\"\n ) from e\n\n cover_art_stems = [x.lower() for x in cover_art_stems]\n valid_art_exts = [x.lower() for x in valid_art_exts]\n\n try:\n rename_source_files = data[\"rename_source_files\"]\n del data[\"rename_source_files\"]\n if not isinstance(rename_source_files, bool):\n raise ValueError(f\"Must be a bool: got {type(rename_source_files)}\")\n except KeyError:\n rename_source_files = False\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for rename_source_files in configuration file ({cfgpath}): {e}\"\n ) from e\n\n try:\n ignore_release_directories = data[\"ignore_release_directories\"]\n del data[\"ignore_release_directories\"]\n if not isinstance(ignore_release_directories, list):\n raise ValueError(f\"Must be a list[str]: got {type(ignore_release_directories)}\")\n for s in ignore_release_directories:\n if not isinstance(s, str):\n raise ValueError(f\"Each release directory must be of type str: got {type(s)}\")\n except KeyError:\n ignore_release_directories = []\n except ValueError as e:\n raise InvalidConfigValueError(\n f\"Invalid value for ignore_release_directories in configuration file ({cfgpath}): {e}\"\n ) from e\n\n stored_metadata_rules: list[MetadataRule] = []\n for d in data.get(\"stored_metadata_rules\", []):\n if not isinstance(d, dict):\n raise InvalidConfigValueError(\n f\"Invalid value in stored_metadata_rules in configuration file ({cfgpath}): list values must be a dict: got {type(d)}\"\n )\n\n try:\n matcher = d[\"matcher\"]\n except KeyError as e:\n raise InvalidConfigValueError(\n f\"Missing key `matcher` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}\"\n ) from e\n if not isinstance(matcher, str):\n raise InvalidConfigValueError(\n f\"Invalid value for `matcher` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a string\"\n )\n\n try:\n actions = d[\"actions\"]\n except KeyError as e:\n raise InvalidConfigValueError(\n f\"Missing key `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}\"\n ) from e\n if not isinstance(actions, list):\n raise InvalidConfigValueError(\n f\"Invalid value for `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a list of strings\"\n )\n for action in actions:\n if not isinstance(action, str):\n raise InvalidConfigValueError(\n f\"Invalid value for `actions` in stored_metadata_rules in configuration file ({cfgpath}): rule {d}: must be a list of strings: got {type(action)}\"\n )\n\n try:\n stored_metadata_rules.append(MetadataRule.parse(matcher, actions))\n except RuleSyntaxError as e:\n raise InvalidConfigValueError(\n f\"Failed to parse stored_metadata_rules in configuration file ({cfgpath}): rule {d}: {e}\"\n ) from e\n if \"stored_metadata_rules\" in data:\n del data[\"stored_metadata_rules\"]\n\n # Get the potential default template before evaluating the rest.\n default_templates = deepcopy(DEFAULT_TEMPLATE_PAIR)\n with contextlib.suppress(KeyError):\n default_templates.release = PathTemplate(data[\"path_templates\"][\"default\"][\"release\"])\n del data[\"path_templates\"][\"default\"][\"release\"]\n with contextlib.suppress(KeyError):\n default_templates.track = PathTemplate(data[\"path_templates\"][\"default\"][\"track\"])\n del data[\"path_templates\"][\"default\"][\"track\"]\n with contextlib.suppress(KeyError):\n if not data[\"path_templates\"][\"default\"]:\n del data[\"path_templates\"][\"default\"]\n\n path_templates = PathTemplateConfig.with_defaults(default_templates)\n if tmpl_config := data.get(\"path_templates\", None):\n for key in [\n \"source\",\n \"all_releases\",\n \"new_releases\",\n \"recently_added_releases\",\n \"artists\",\n \"genres\",\n \"labels\",\n \"collages\",\n ]:\n with contextlib.suppress(KeyError):\n getattr(path_templates, key).release = PathTemplate(tmpl_config[key][\"release\"])\n del tmpl_config[key][\"release\"]\n with contextlib.suppress(KeyError):\n getattr(path_templates, key).track = PathTemplate(tmpl_config[key][\"track\"])\n del tmpl_config[key][\"track\"]\n with contextlib.suppress(KeyError):\n if not tmpl_config[key]:\n del tmpl_config[key]\n\n with contextlib.suppress(KeyError):\n path_templates.playlists = PathTemplate(tmpl_config[\"playlists\"])\n del tmpl_config[\"playlists\"]\n with contextlib.suppress(KeyError):\n if not data[\"path_templates\"]:\n del data[\"path_templates\"]\n\n try:\n path_templates.parse()\n except InvalidPathTemplateError as e:\n raise InvalidConfigValueError(\n f\"Invalid path template in configuration file ({cfgpath}) for template {e.key}: {e}\"\n ) from e\n\n if data:\n unrecognized_accessors: list[str] = []\n # Do a DFS over the data keys to assemble the map of unknown keys. State is a tuple of\n # (\"accessor\", node).\n dfs_state: deque[tuple[str, dict[str, Any]]] = deque([(\"\", data)])\n while dfs_state:\n accessor, node = dfs_state.pop()\n if isinstance(node, dict):\n for k, v in node.items():\n child_accessor = k if not accessor else f\"{accessor}.{k}\"\n dfs_state.append((child_accessor, v))\n continue\n unrecognized_accessors.append(accessor)\n logger.warning(\n f\"Unrecognized options found in configuration file: {', '.join(unrecognized_accessors)}\"\n )\n\n return Config(\n music_source_dir=music_source_dir,\n fuse_mount_dir=fuse_mount_dir,\n cache_dir=cache_dir,\n max_proc=max_proc,\n artist_aliases_map=artist_aliases_map,\n artist_aliases_parents_map=artist_aliases_parents_map,\n fuse_artists_whitelist=fuse_artists_whitelist,\n fuse_genres_whitelist=fuse_genres_whitelist,\n fuse_labels_whitelist=fuse_labels_whitelist,\n fuse_artists_blacklist=fuse_artists_blacklist,\n fuse_genres_blacklist=fuse_genres_blacklist,\n fuse_labels_blacklist=fuse_labels_blacklist,\n cover_art_stems=cover_art_stems,\n valid_art_exts=valid_art_exts,\n path_templates=path_templates,\n rename_source_files=rename_source_files,\n ignore_release_directories=ignore_release_directories,\n stored_metadata_rules=stored_metadata_rules,\n )\n\n @functools.cached_property\n def valid_cover_arts(self) -> list[str]:\n return [s + \".\" + e for s in self.cover_art_stems for e in self.valid_art_exts]\n\n @functools.cached_property\n def cache_database_path(self) -> Path:\n return self.cache_dir / \"cache.sqlite3\"\n\n @functools.cached_property\n def watchdog_pid_path(self) -> Path:\n return self.cache_dir / \"watchdog.pid\"\n\n @functools.cached_property\n def sanitized_artist_aliases_map(self) -> dict[str, list[str]]:\n return {sanitize_dirname(k, False): v for k, v in self.artist_aliases_map.items()}\n\n @functools.cached_property\n def sanitized_artist_aliases_parents_map(self) -> dict[str, list[str]]:\n return {sanitize_dirname(k, False): v for k, v in self.artist_aliases_parents_map.items()}" } ]
import dataclasses import hashlib import shutil import time import pytest import tomllib from pathlib import Path from conftest import TEST_COLLAGE_1, TEST_PLAYLIST_1, TEST_RELEASE_1, TEST_RELEASE_2, TEST_RELEASE_3 from rose.audiotags import AudioTags from rose.cache import ( CACHE_SCHEMA_PATH, STORED_DATA_FILE_REGEX, CachedCollage, CachedPlaylist, CachedRelease, CachedTrack, _unpack, artist_exists, connect, genre_exists, get_collage, get_playlist, get_release, get_release_logtext, get_track, get_track_logtext, get_tracks_associated_with_release, get_tracks_associated_with_releases, label_exists, list_artists, list_collages, list_genres, list_labels, list_playlists, list_releases, list_tracks, lock, maybe_invalidate_cache_database, update_cache, update_cache_evict_nonexistent_releases, update_cache_for_releases, ) from rose.common import VERSION, Artist, ArtistMapping from rose.config import Config
18,311
] # Assert that source file was not updated to remove the track. with (config.music_source_dir / "!playlists" / "Lala Lisa.toml").open("rb") as fp: data = tomllib.load(fp) assert not [t for t in data["tracks"] if "missing" in t] assert len(data["tracks"]) == 2 @pytest.mark.usefixtures("seeded_cache") def test_list_releases(config: Config) -> None: expected = [ CachedRelease( datafile_mtime="999", id="r1", source_path=Path(config.music_source_dir / "r1"), cover_image_path=None, added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 1", releasetype="album", year=2023, disctotal=1, new=False, genres=["Techno", "Deep House"], labels=["Silk Music"], albumartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="1", ), CachedRelease( datafile_mtime="999", id="r2", source_path=Path(config.music_source_dir / "r2"), cover_image_path=Path(config.music_source_dir / "r2" / "cover.jpg"), added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 2", releasetype="album", year=2021, disctotal=1, new=False, genres=["Classical"], labels=["Native State"], albumartists=ArtistMapping( main=[Artist("Violin Woman")], guest=[Artist("Conductor Woman")] ), metahash="2", ), CachedRelease( datafile_mtime="999", id="r3", source_path=Path(config.music_source_dir / "r3"), cover_image_path=None, added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 3", releasetype="album", year=2021, disctotal=1, new=True, genres=[], labels=[], albumartists=ArtistMapping(), metahash="3", ), ] assert list_releases(config) == expected assert list_releases(config, ["r1"]) == expected[:1] @pytest.mark.usefixtures("seeded_cache") def test_get_release_and_associated_tracks(config: Config) -> None: release = get_release(config, "r1") assert release is not None assert release == CachedRelease( datafile_mtime="999", id="r1", source_path=Path(config.music_source_dir / "r1"), cover_image_path=None, added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 1", releasetype="album", year=2023, disctotal=1, new=False, genres=["Techno", "Deep House"], labels=["Silk Music"], albumartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="1", ) expected_tracks = [ CachedTrack( id="t1", source_path=config.music_source_dir / "r1" / "01.m4a", source_mtime="999", tracktitle="Track 1", tracknumber="01", tracktotal=2, discnumber="01", disctotal=1, duration_seconds=120, trackartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="1", release=release, ), CachedTrack( id="t2", source_path=config.music_source_dir / "r1" / "02.m4a", source_mtime="999", tracktitle="Track 2", tracknumber="02", tracktotal=2, discnumber="01", disctotal=1, duration_seconds=240, trackartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="2", release=release, ), ]
def test_schema(config: Config) -> None: """Test that the schema successfully bootstraps.""" with CACHE_SCHEMA_PATH.open("rb") as fp: schema_hash = hashlib.sha256(fp.read()).hexdigest() maybe_invalidate_cache_database(config) with connect(config) as conn: cursor = conn.execute("SELECT schema_hash, config_hash, version FROM _schema_hash") row = cursor.fetchone() assert row["schema_hash"] == schema_hash assert row["config_hash"] is not None assert row["version"] == VERSION def test_migration(config: Config) -> None: """Test that "migrating" the database correctly migrates it.""" config.cache_database_path.unlink() with connect(config) as conn: conn.execute( """ CREATE TABLE _schema_hash ( schema_hash TEXT , config_hash TEXT , version TEXT , PRIMARY KEY (schema_hash, config_hash, version) ) """ ) conn.execute( """ INSERT INTO _schema_hash (schema_hash, config_hash, version) VALUES ('haha', 'lala', 'blabla') """, ) with CACHE_SCHEMA_PATH.open("rb") as fp: latest_schema_hash = hashlib.sha256(fp.read()).hexdigest() maybe_invalidate_cache_database(config) with connect(config) as conn: cursor = conn.execute("SELECT schema_hash, config_hash, version FROM _schema_hash") row = cursor.fetchone() assert row["schema_hash"] == latest_schema_hash assert row["config_hash"] is not None assert row["version"] == VERSION cursor = conn.execute("SELECT COUNT(*) FROM _schema_hash") assert cursor.fetchone()[0] == 1 def test_locks(config: Config) -> None: """Test that taking locks works. The times are a bit loose b/c GH Actions is slow.""" lock_name = "lol" # Test that the locking and timeout work. start = time.time() with lock(config, lock_name, timeout=0.2): lock1_acq = time.time() with lock(config, lock_name, timeout=0.2): lock2_acq = time.time() # Assert that we had to wait ~0.1sec to get the second lock. assert lock1_acq - start < 0.08 assert lock2_acq - lock1_acq > 0.17 # Test that releasing a lock actually works. start = time.time() with lock(config, lock_name, timeout=0.2): lock1_acq = time.time() with lock(config, lock_name, timeout=0.2): lock2_acq = time.time() # Assert that we had to wait negligible time to get the second lock. assert lock1_acq - start < 0.08 assert lock2_acq - lock1_acq < 0.08 def test_update_cache_all(config: Config) -> None: """Test that the update all function works.""" shutil.copytree(TEST_RELEASE_1, config.music_source_dir / TEST_RELEASE_1.name) shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) # Test that we prune deleted releases too. with connect(config) as conn: conn.execute( """ INSERT INTO releases (id, source_path, added_at, datafile_mtime, title, releasetype, disctotal, metahash) VALUES ('aaaaaa', '0000-01-01T00:00:00+00:00', '999', 'nonexistent', 'aa', 'unknown', false, '0') """ ) update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 2 cursor = conn.execute("SELECT COUNT(*) FROM tracks") assert cursor.fetchone()[0] == 4 def test_update_cache_multiprocessing(config: Config) -> None: """Test that the update all function works.""" shutil.copytree(TEST_RELEASE_1, config.music_source_dir / TEST_RELEASE_1.name) shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) update_cache_for_releases(config, force_multiprocessing=True) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 2 cursor = conn.execute("SELECT COUNT(*) FROM tracks") assert cursor.fetchone()[0] == 4 def test_update_cache_releases(config: Config) -> None: release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) # Check that the release directory was given a UUID. release_id: str | None = None for f in release_dir.iterdir(): if m := STORED_DATA_FILE_REGEX.match(f.name): release_id = m[1] assert release_id is not None # Assert that the release metadata was read correctly. with connect(config) as conn: cursor = conn.execute( """ SELECT id, source_path, title, releasetype, year, new FROM releases WHERE id = ? """, (release_id,), ) row = cursor.fetchone() assert row["source_path"] == str(release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] cursor = conn.execute( "SELECT genre FROM releases_genres WHERE release_id = ?", (release_id,), ) genres = {r["genre"] for r in cursor.fetchall()} assert genres == {"K-Pop", "Pop"} cursor = conn.execute( "SELECT label FROM releases_labels WHERE release_id = ?", (release_id,), ) labels = {r["label"] for r in cursor.fetchall()} assert labels == {"A Cool Label"} cursor = conn.execute( "SELECT artist, role FROM releases_artists WHERE release_id = ?", (release_id,), ) artists = {(r["artist"], r["role"]) for r in cursor.fetchall()} assert artists == { ("BLACKPINK", "main"), } for f in release_dir.iterdir(): if f.suffix != ".m4a": continue # Assert that the track metadata was read correctly. cursor = conn.execute( """ SELECT id, source_path, title, release_id, tracknumber, discnumber, duration_seconds FROM tracks WHERE source_path = ? """, (str(f),), ) row = cursor.fetchone() track_id = row["id"] assert row["title"].startswith("Track") assert row["release_id"] == release_id assert row["tracknumber"] != "" assert row["discnumber"] == "1" assert row["duration_seconds"] == 2 cursor = conn.execute( "SELECT artist, role FROM tracks_artists WHERE track_id = ?", (track_id,), ) artists = {(r["artist"], r["role"]) for r in cursor.fetchall()} assert artists == { ("BLACKPINK", "main"), } def test_update_cache_releases_uncached_with_existing_id(config: Config) -> None: """Test that IDs in filenames are read and preserved.""" release_dir = config.music_source_dir / TEST_RELEASE_2.name shutil.copytree(TEST_RELEASE_2, release_dir) update_cache_for_releases(config, [release_dir]) # Check that the release directory was given a UUID. release_id: str | None = None for f in release_dir.iterdir(): if m := STORED_DATA_FILE_REGEX.match(f.name): release_id = m[1] assert release_id == "ilovecarly" # Hardcoded ID for testing. def test_update_cache_releases_preserves_track_ids_across_rebuilds(config: Config) -> None: """Test that track IDs are preserved across cache rebuilds.""" release_dir = config.music_source_dir / TEST_RELEASE_3.name shutil.copytree(TEST_RELEASE_3, release_dir) update_cache_for_releases(config, [release_dir]) with connect(config) as conn: cursor = conn.execute("SELECT id FROM tracks") first_track_ids = {r["id"] for r in cursor} # Nuke the database. config.cache_database_path.unlink() maybe_invalidate_cache_database(config) # Repeat cache population. update_cache_for_releases(config, [release_dir]) with connect(config) as conn: cursor = conn.execute("SELECT id FROM tracks") second_track_ids = {r["id"] for r in cursor} # Assert IDs are equivalent. assert first_track_ids == second_track_ids def test_update_cache_releases_writes_ids_to_tags(config: Config) -> None: """Test that track IDs and release IDs are written to files.""" release_dir = config.music_source_dir / TEST_RELEASE_3.name shutil.copytree(TEST_RELEASE_3, release_dir) af = AudioTags.from_file(release_dir / "01.m4a") assert af.id is None assert af.release_id is None af = AudioTags.from_file(release_dir / "02.m4a") assert af.id is None assert af.release_id is None update_cache_for_releases(config, [release_dir]) af = AudioTags.from_file(release_dir / "01.m4a") assert af.id is not None assert af.release_id is not None af = AudioTags.from_file(release_dir / "02.m4a") assert af.id is not None assert af.release_id is not None def test_update_cache_releases_already_fully_cached(config: Config) -> None: """Test that a fully cached release No Ops when updated again.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was read correctly. with connect(config) as conn: cursor = conn.execute( "SELECT id, source_path, title, releasetype, year, new FROM releases", ) row = cursor.fetchone() assert row["source_path"] == str(release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] def test_update_cache_releases_disk_update_to_previously_cached(config: Config) -> None: """Test that a cached release is updated after a track updates.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) # I'm too lazy to mutagen update the files, so instead we're going to update the database. And # then touch a file to signify that "we modified it." with connect(config) as conn: conn.execute("UPDATE releases SET title = 'An Uncool Album'") (release_dir / "01.m4a").touch() update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute( "SELECT id, source_path, title, releasetype, year, new FROM releases", ) row = cursor.fetchone() assert row["source_path"] == str(release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] def test_update_cache_releases_disk_update_to_datafile(config: Config) -> None: """Test that a cached release is updated after a datafile updates.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) with connect(config) as conn: conn.execute("UPDATE releases SET datafile_mtime = '0' AND new = false") update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute("SELECT new, added_at FROM releases") row = cursor.fetchone() assert row["new"] assert row["added_at"] def test_update_cache_releases_disk_upgrade_old_datafile(config: Config) -> None: """Test that a legacy invalid datafile is upgraded on index.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) datafile = release_dir / ".rose.lalala.toml" datafile.touch() update_cache_for_releases(config, [release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute("SELECT id, new, added_at FROM releases") row = cursor.fetchone() assert row["id"] == "lalala" assert row["new"] assert row["added_at"] with datafile.open("r") as fp: data = fp.read() assert "new = true" in data assert "added_at = " in data def test_update_cache_releases_source_path_renamed(config: Config) -> None: """Test that a cached release is updated after a directory rename.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) moved_release_dir = config.music_source_dir / "moved lol" release_dir.rename(moved_release_dir) update_cache_for_releases(config, [moved_release_dir]) # Assert that the release metadata was re-read and updated correctly. with connect(config) as conn: cursor = conn.execute( "SELECT id, source_path, title, releasetype, year, new FROM releases", ) row = cursor.fetchone() assert row["source_path"] == str(moved_release_dir) assert row["title"] == "I Love Blackpink" assert row["releasetype"] == "album" assert row["year"] == 1990 assert row["new"] def test_update_cache_releases_delete_nonexistent(config: Config) -> None: """Test that deleted releases that are no longer on disk are cleared from cache.""" with connect(config) as conn: conn.execute( """ INSERT INTO releases (id, source_path, added_at, datafile_mtime, title, releasetype, disctotal, metahash) VALUES ('aaaaaa', '0000-01-01T00:00:00+00:00', '999', 'nonexistent', 'aa', 'unknown', false, '0') """ ) update_cache_evict_nonexistent_releases(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 0 def test_update_cache_releases_skips_empty_directory(config: Config) -> None: """Test that an directory with no audio files is skipped.""" rd = config.music_source_dir / "lalala" rd.mkdir() (rd / "ignoreme.file").touch() update_cache_for_releases(config, [rd]) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 0 def test_update_cache_releases_uncaches_empty_directory(config: Config) -> None: """Test that a previously-cached directory with no audio files now is cleared from cache.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) shutil.rmtree(release_dir) release_dir.mkdir() update_cache_for_releases(config, [release_dir]) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 0 def test_update_cache_releases_evicts_relations(config: Config) -> None: """ Test that related entities (artist, genre, label) that have been removed from the tags are properly evicted from the cache on update. """ release_dir = config.music_source_dir / TEST_RELEASE_2.name shutil.copytree(TEST_RELEASE_2, release_dir) # Initial cache population. update_cache_for_releases(config, [release_dir]) # Pretend that we have more artists in the cache. with connect(config) as conn: conn.execute( """ INSERT INTO releases_genres (release_id, genre, genre_sanitized, position) VALUES ('ilovecarly', 'lalala', 'lalala', 2) """, ) conn.execute( """ INSERT INTO releases_labels (release_id, label, label_sanitized, position) VALUES ('ilovecarly', 'lalala', 'lalala', 1) """, ) conn.execute( """ INSERT INTO releases_artists (release_id, artist, artist_sanitized, role, position) VALUES ('ilovecarly', 'lalala', 'lalala', 'main', 1) """, ) conn.execute( """ INSERT INTO tracks_artists (track_id, artist, artist_sanitized, role, position) SELECT id, 'lalala', 'lalala', 'main', 1 FROM tracks """, ) # Second cache refresh. update_cache_for_releases(config, [release_dir], force=True) # Assert that all of the above were evicted. with connect(config) as conn: cursor = conn.execute( "SELECT EXISTS (SELECT * FROM releases_genres WHERE genre = 'lalala')" ) assert not cursor.fetchone()[0] cursor = conn.execute( "SELECT EXISTS (SELECT * FROM releases_labels WHERE label = 'lalala')" ) assert not cursor.fetchone()[0] cursor = conn.execute( "SELECT EXISTS (SELECT * FROM releases_artists WHERE artist = 'lalala')" ) assert not cursor.fetchone()[0] cursor = conn.execute( "SELECT EXISTS (SELECT * FROM tracks_artists WHERE artist = 'lalala')" ) assert not cursor.fetchone()[0] def test_update_cache_releases_ignores_directories(config: Config) -> None: """Test that the ignore_release_directories configuration value works.""" config = dataclasses.replace(config, ignore_release_directories=["lalala"]) release_dir = config.music_source_dir / "lalala" shutil.copytree(TEST_RELEASE_1, release_dir) # Test that both arg+no-arg ignore the directory. update_cache_for_releases(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 0 update_cache_for_releases(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 0 def test_update_cache_releases_notices_deleted_track(config: Config) -> None: """Test that we notice when a track is deleted.""" release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache(config) (release_dir / "02.m4a").unlink() update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM tracks") assert cursor.fetchone()[0] == 1 def test_update_cache_releases_ignores_partially_written_directory(config: Config) -> None: """Test that a partially-written cached release is ignored.""" # 1. Write the directory and index it. This should give it IDs and shit. release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache(config) # 2. Move the directory and "remove" the ID file. renamed_release_dir = config.music_source_dir / "lalala" release_dir.rename(renamed_release_dir) datafile = next(f for f in renamed_release_dir.iterdir() if f.stem.startswith(".rose")) tmpfile = datafile.with_name("tmp") datafile.rename(tmpfile) # 3. Re-update cache. We should see an empty cache now. update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 0 # 4. Put the datafile back. We should now see the release cache again properly. datafile.with_name("tmp").rename(datafile) update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 1 # 5. Rename and remove the ID file again. We should see an empty cache again. release_dir = renamed_release_dir renamed_release_dir = config.music_source_dir / "bahaha" release_dir.rename(renamed_release_dir) next(f for f in renamed_release_dir.iterdir() if f.stem.startswith(".rose")).unlink() update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 0 # 6. Run with force=True. This should index the directory and make a new .rose.toml file. update_cache(config, force=True) assert (renamed_release_dir / datafile.name).is_file() with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM releases") assert cursor.fetchone()[0] == 1 def test_update_cache_rename_source_files(config: Config) -> None: """Test that we properly rename the source directory on cache update.""" config = dataclasses.replace(config, rename_source_files=True) shutil.copytree(TEST_RELEASE_1, config.music_source_dir / TEST_RELEASE_1.name) (config.music_source_dir / TEST_RELEASE_1.name / "cover.jpg").touch() update_cache(config) expected_dir = config.music_source_dir / "BLACKPINK - 1990. I Love Blackpink [NEW]" assert expected_dir in list(config.music_source_dir.iterdir()) files_in_dir = list(expected_dir.iterdir()) assert expected_dir / "01. Track 1.m4a" in files_in_dir assert expected_dir / "02. Track 2.m4a" in files_in_dir with connect(config) as conn: cursor = conn.execute("SELECT source_path, cover_image_path FROM releases") row = cursor.fetchone() assert Path(row["source_path"]) == expected_dir assert Path(row["cover_image_path"]) == expected_dir / "cover.jpg" cursor = conn.execute("SELECT source_path FROM tracks") assert {Path(r[0]) for r in cursor} == { expected_dir / "01. Track 1.m4a", expected_dir / "02. Track 2.m4a", } def test_update_cache_rename_source_files_nested_file_directories(config: Config) -> None: """Test that we properly rename arbitrarily nested files and clean up the empty dirs.""" config = dataclasses.replace(config, rename_source_files=True) shutil.copytree(TEST_RELEASE_1, config.music_source_dir / TEST_RELEASE_1.name) (config.music_source_dir / TEST_RELEASE_1.name / "lala").mkdir() (config.music_source_dir / TEST_RELEASE_1.name / "01.m4a").rename( config.music_source_dir / TEST_RELEASE_1.name / "lala" / "1.m4a" ) update_cache(config) expected_dir = config.music_source_dir / "BLACKPINK - 1990. I Love Blackpink [NEW]" assert expected_dir in list(config.music_source_dir.iterdir()) files_in_dir = list(expected_dir.iterdir()) assert expected_dir / "01. Track 1.m4a" in files_in_dir assert expected_dir / "02. Track 2.m4a" in files_in_dir assert expected_dir / "lala" not in files_in_dir with connect(config) as conn: cursor = conn.execute("SELECT source_path FROM releases") assert Path(cursor.fetchone()[0]) == expected_dir cursor = conn.execute("SELECT source_path FROM tracks") assert {Path(r[0]) for r in cursor} == { expected_dir / "01. Track 1.m4a", expected_dir / "02. Track 2.m4a", } def test_update_cache_rename_source_files_collisions(config: Config) -> None: """Test that we properly rename arbitrarily nested files and clean up the empty dirs.""" config = dataclasses.replace(config, rename_source_files=True) # Three copies of the same directory, and two instances of Track 1. shutil.copytree(TEST_RELEASE_1, config.music_source_dir / TEST_RELEASE_1.name) shutil.copyfile( config.music_source_dir / TEST_RELEASE_1.name / "01.m4a", config.music_source_dir / TEST_RELEASE_1.name / "haha.m4a", ) shutil.copytree( config.music_source_dir / TEST_RELEASE_1.name, config.music_source_dir / "Number 2" ) shutil.copytree( config.music_source_dir / TEST_RELEASE_1.name, config.music_source_dir / "Number 3" ) update_cache(config) release_dirs = list(config.music_source_dir.iterdir()) for expected_dir in [ config.music_source_dir / "BLACKPINK - 1990. I Love Blackpink [NEW]", config.music_source_dir / "BLACKPINK - 1990. I Love Blackpink [NEW] [2]", config.music_source_dir / "BLACKPINK - 1990. I Love Blackpink [NEW] [3]", ]: assert expected_dir in release_dirs files_in_dir = list(expected_dir.iterdir()) assert expected_dir / "01. Track 1.m4a" in files_in_dir assert expected_dir / "01. Track 1 [2].m4a" in files_in_dir assert expected_dir / "02. Track 2.m4a" in files_in_dir with connect(config) as conn: cursor = conn.execute( "SELECT id FROM releases WHERE source_path = ?", (str(expected_dir),) ) release_id = cursor.fetchone()[0] assert release_id cursor = conn.execute( "SELECT source_path FROM tracks WHERE release_id = ?", (release_id,) ) assert {Path(r[0]) for r in cursor} == { expected_dir / "01. Track 1.m4a", expected_dir / "01. Track 1 [2].m4a", expected_dir / "02. Track 2.m4a", } def test_update_cache_releases_updates_full_text_search(config: Config) -> None: release_dir = config.music_source_dir / TEST_RELEASE_1.name shutil.copytree(TEST_RELEASE_1, release_dir) update_cache_for_releases(config, [release_dir]) with connect(config) as conn: cursor = conn.execute( """ SELECT rowid, * FROM rules_engine_fts """ ) print([dict(x) for x in cursor]) cursor = conn.execute( """ SELECT rowid, * FROM tracks """ ) print([dict(x) for x in cursor]) with connect(config) as conn: cursor = conn.execute( """ SELECT t.source_path FROM rules_engine_fts s JOIN tracks t ON t.rowid = s.rowid WHERE s.tracktitle MATCH 'r a c k' """ ) fnames = {Path(r["source_path"]) for r in cursor} assert fnames == { release_dir / "01.m4a", release_dir / "02.m4a", } # And then test the DELETE+INSERT behavior. And that the query still works. update_cache_for_releases(config, [release_dir], force=True) with connect(config) as conn: cursor = conn.execute( """ SELECT t.source_path FROM rules_engine_fts s JOIN tracks t ON t.rowid = s.rowid WHERE s.tracktitle MATCH 'r a c k' """ ) fnames = {Path(r["source_path"]) for r in cursor} assert fnames == { release_dir / "01.m4a", release_dir / "02.m4a", } def test_update_cache_collages(config: Config) -> None: shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) shutil.copytree(TEST_COLLAGE_1, config.music_source_dir / "!collages") update_cache(config) # Assert that the collage metadata was read correctly. with connect(config) as conn: cursor = conn.execute("SELECT name, source_mtime FROM collages") rows = cursor.fetchall() assert len(rows) == 1 row = rows[0] assert row["name"] == "Rose Gold" assert row["source_mtime"] cursor = conn.execute( "SELECT collage_name, release_id, position FROM collages_releases WHERE NOT missing" ) rows = cursor.fetchall() assert len(rows) == 1 row = rows[0] assert row["collage_name"] == "Rose Gold" assert row["release_id"] == "ilovecarly" assert row["position"] == 1 def test_update_cache_collages_missing_release_id(config: Config) -> None: shutil.copytree(TEST_COLLAGE_1, config.music_source_dir / "!collages") update_cache(config) # Assert that the releases in the collage were read as missing. with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM collages_releases WHERE missing") assert cursor.fetchone()[0] == 2 # Assert that source file was updated to set the releases missing. with (config.music_source_dir / "!collages" / "Rose Gold.toml").open("rb") as fp: data = tomllib.load(fp) assert len(data["releases"]) == 2 assert len([r for r in data["releases"] if r["missing"]]) == 2 shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) shutil.copytree(TEST_RELEASE_3, config.music_source_dir / TEST_RELEASE_3.name) update_cache(config) # Assert that the releases in the collage were unflagged as missing. with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM collages_releases WHERE NOT missing") assert cursor.fetchone()[0] == 2 # Assert that source file was updated to remove the missing flag. with (config.music_source_dir / "!collages" / "Rose Gold.toml").open("rb") as fp: data = tomllib.load(fp) assert len([r for r in data["releases"] if "missing" not in r]) == 2 def test_update_cache_collages_missing_release_id_multiprocessing(config: Config) -> None: shutil.copytree(TEST_COLLAGE_1, config.music_source_dir / "!collages") update_cache(config) # Assert that the releases in the collage were read as missing. with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM collages_releases WHERE missing") assert cursor.fetchone()[0] == 2 # Assert that source file was updated to set the releases missing. with (config.music_source_dir / "!collages" / "Rose Gold.toml").open("rb") as fp: data = tomllib.load(fp) assert len(data["releases"]) == 2 assert len([r for r in data["releases"] if r["missing"]]) == 2 shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) shutil.copytree(TEST_RELEASE_3, config.music_source_dir / TEST_RELEASE_3.name) update_cache(config, force_multiprocessing=True) # Assert that the releases in the collage were unflagged as missing. with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM collages_releases WHERE NOT missing") assert cursor.fetchone()[0] == 2 # Assert that source file was updated to remove the missing flag. with (config.music_source_dir / "!collages" / "Rose Gold.toml").open("rb") as fp: data = tomllib.load(fp) assert len([r for r in data["releases"] if "missing" not in r]) == 2 def test_update_cache_collages_on_release_rename(config: Config) -> None: """ Test that a renamed release source directory does not remove the release from any collages. This can occur because the rename operation is executed in SQL as release deletion followed by release creation. """ shutil.copytree(TEST_COLLAGE_1, config.music_source_dir / "!collages") shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) shutil.copytree(TEST_RELEASE_3, config.music_source_dir / TEST_RELEASE_3.name) update_cache(config) (config.music_source_dir / TEST_RELEASE_2.name).rename(config.music_source_dir / "lalala") update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT collage_name, release_id, position FROM collages_releases") rows = [dict(r) for r in cursor] assert rows == [ {"collage_name": "Rose Gold", "release_id": "ilovecarly", "position": 1}, {"collage_name": "Rose Gold", "release_id": "ilovenewjeans", "position": 2}, ] # Assert that source file was not updated to remove the release. with (config.music_source_dir / "!collages" / "Rose Gold.toml").open("rb") as fp: data = tomllib.load(fp) assert not [r for r in data["releases"] if "missing" in r] assert len(data["releases"]) == 2 def test_update_cache_playlists(config: Config) -> None: shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) shutil.copytree(TEST_PLAYLIST_1, config.music_source_dir / "!playlists") update_cache(config) # Assert that the playlist metadata was read correctly. with connect(config) as conn: cursor = conn.execute("SELECT name, source_mtime, cover_path FROM playlists") rows = cursor.fetchall() assert len(rows) == 1 row = rows[0] assert row["name"] == "Lala Lisa" assert row["source_mtime"] is not None assert row["cover_path"] == str(config.music_source_dir / "!playlists" / "Lala Lisa.jpg") cursor = conn.execute( "SELECT playlist_name, track_id, position FROM playlists_tracks ORDER BY position" ) assert [dict(r) for r in cursor] == [ {"playlist_name": "Lala Lisa", "track_id": "iloveloona", "position": 1}, {"playlist_name": "Lala Lisa", "track_id": "ilovetwice", "position": 2}, ] def test_update_cache_playlists_missing_track_id(config: Config) -> None: shutil.copytree(TEST_PLAYLIST_1, config.music_source_dir / "!playlists") update_cache(config) # Assert that the tracks in the playlist were read as missing. with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM playlists_tracks WHERE missing") assert cursor.fetchone()[0] == 2 # Assert that source file was updated to set the tracks missing. with (config.music_source_dir / "!playlists" / "Lala Lisa.toml").open("rb") as fp: data = tomllib.load(fp) assert len(data["tracks"]) == 2 assert len([r for r in data["tracks"] if r["missing"]]) == 2 shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) update_cache(config) # Assert that the tracks in the playlist were unflagged as missing. with connect(config) as conn: cursor = conn.execute("SELECT COUNT(*) FROM playlists_tracks WHERE NOT missing") assert cursor.fetchone()[0] == 2 # Assert that source file was updated to remove the missing flag. with (config.music_source_dir / "!playlists" / "Lala Lisa.toml").open("rb") as fp: data = tomllib.load(fp) assert len([r for r in data["tracks"] if "missing" not in r]) == 2 @pytest.mark.parametrize("multiprocessing", [True, False]) def test_update_releases_updates_collages_description_meta( config: Config, multiprocessing: bool ) -> None: shutil.copytree(TEST_RELEASE_1, config.music_source_dir / TEST_RELEASE_1.name) shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) shutil.copytree(TEST_RELEASE_3, config.music_source_dir / TEST_RELEASE_3.name) shutil.copytree(TEST_COLLAGE_1, config.music_source_dir / "!collages") cpath = config.music_source_dir / "!collages" / "Rose Gold.toml" # First cache update: releases are inserted, collage is new. This should update the collage # TOML. update_cache(config) with cpath.open("r") as fp: assert ( fp.read() == """\ releases = [ { uuid = "ilovecarly", description_meta = "Carly Rae Jepsen - 1990. I Love Carly" }, { uuid = "ilovenewjeans", description_meta = "NewJeans - 1990. I Love NewJeans" }, ] """ ) # Now prep for the second update. Reset the TOML to have garbage again, and update the database # such that the virtual dirnames are also incorrect. with cpath.open("w") as fp: fp.write( """\ [[releases]] uuid = "ilovecarly" description_meta = "lalala" [[releases]] uuid = "ilovenewjeans" description_meta = "hahaha" """ ) # Second cache update: releases exist, collages exist, release is "updated." This should also # trigger a metadata update. update_cache_for_releases(config, force=True, force_multiprocessing=multiprocessing) with cpath.open("r") as fp: assert ( fp.read() == """\ releases = [ { uuid = "ilovecarly", description_meta = "Carly Rae Jepsen - 1990. I Love Carly" }, { uuid = "ilovenewjeans", description_meta = "NewJeans - 1990. I Love NewJeans" }, ] """ ) @pytest.mark.parametrize("multiprocessing", [True, False]) def test_update_tracks_updates_playlists_description_meta( config: Config, multiprocessing: bool ) -> None: shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) shutil.copytree(TEST_PLAYLIST_1, config.music_source_dir / "!playlists") ppath = config.music_source_dir / "!playlists" / "Lala Lisa.toml" # First cache update: tracks are inserted, playlist is new. This should update the playlist # TOML. update_cache(config) with ppath.open("r") as fp: assert ( fp.read() == """\ tracks = [ { uuid = "iloveloona", description_meta = "Carly Rae Jepsen - Track 1.m4a" }, { uuid = "ilovetwice", description_meta = "Carly Rae Jepsen - Track 2.m4a" }, ] """ ) # Now prep for the second update. Reset the TOML to have garbage again, and update the database # such that the virtual filenames are also incorrect. with ppath.open("w") as fp: fp.write( """\ [[tracks]] uuid = "iloveloona" description_meta = "lalala" [[tracks]] uuid = "ilovetwice" description_meta = "hahaha" """ ) # Second cache update: tracks exist, playlists exist, track is "updated." This should also # trigger a metadata update. update_cache_for_releases(config, force=True, force_multiprocessing=multiprocessing) with ppath.open("r") as fp: assert ( fp.read() == """\ tracks = [ { uuid = "iloveloona", description_meta = "Carly Rae Jepsen - Track 1.m4a" }, { uuid = "ilovetwice", description_meta = "Carly Rae Jepsen - Track 2.m4a" }, ] """ ) def test_update_cache_playlists_on_release_rename(config: Config) -> None: """ Test that a renamed release source directory does not remove any of its tracks any playlists. This can occur because when a release is renamed, we remove all tracks from the database and then reinsert them. """ shutil.copytree(TEST_PLAYLIST_1, config.music_source_dir / "!playlists") shutil.copytree(TEST_RELEASE_2, config.music_source_dir / TEST_RELEASE_2.name) update_cache(config) (config.music_source_dir / TEST_RELEASE_2.name).rename(config.music_source_dir / "lalala") update_cache(config) with connect(config) as conn: cursor = conn.execute("SELECT playlist_name, track_id, position FROM playlists_tracks") rows = [dict(r) for r in cursor] assert rows == [ {"playlist_name": "Lala Lisa", "track_id": "iloveloona", "position": 1}, {"playlist_name": "Lala Lisa", "track_id": "ilovetwice", "position": 2}, ] # Assert that source file was not updated to remove the track. with (config.music_source_dir / "!playlists" / "Lala Lisa.toml").open("rb") as fp: data = tomllib.load(fp) assert not [t for t in data["tracks"] if "missing" in t] assert len(data["tracks"]) == 2 @pytest.mark.usefixtures("seeded_cache") def test_list_releases(config: Config) -> None: expected = [ CachedRelease( datafile_mtime="999", id="r1", source_path=Path(config.music_source_dir / "r1"), cover_image_path=None, added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 1", releasetype="album", year=2023, disctotal=1, new=False, genres=["Techno", "Deep House"], labels=["Silk Music"], albumartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="1", ), CachedRelease( datafile_mtime="999", id="r2", source_path=Path(config.music_source_dir / "r2"), cover_image_path=Path(config.music_source_dir / "r2" / "cover.jpg"), added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 2", releasetype="album", year=2021, disctotal=1, new=False, genres=["Classical"], labels=["Native State"], albumartists=ArtistMapping( main=[Artist("Violin Woman")], guest=[Artist("Conductor Woman")] ), metahash="2", ), CachedRelease( datafile_mtime="999", id="r3", source_path=Path(config.music_source_dir / "r3"), cover_image_path=None, added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 3", releasetype="album", year=2021, disctotal=1, new=True, genres=[], labels=[], albumartists=ArtistMapping(), metahash="3", ), ] assert list_releases(config) == expected assert list_releases(config, ["r1"]) == expected[:1] @pytest.mark.usefixtures("seeded_cache") def test_get_release_and_associated_tracks(config: Config) -> None: release = get_release(config, "r1") assert release is not None assert release == CachedRelease( datafile_mtime="999", id="r1", source_path=Path(config.music_source_dir / "r1"), cover_image_path=None, added_at="0000-01-01T00:00:00+00:00", albumtitle="Release 1", releasetype="album", year=2023, disctotal=1, new=False, genres=["Techno", "Deep House"], labels=["Silk Music"], albumartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="1", ) expected_tracks = [ CachedTrack( id="t1", source_path=config.music_source_dir / "r1" / "01.m4a", source_mtime="999", tracktitle="Track 1", tracknumber="01", tracktotal=2, discnumber="01", disctotal=1, duration_seconds=120, trackartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="1", release=release, ), CachedTrack( id="t2", source_path=config.music_source_dir / "r1" / "02.m4a", source_mtime="999", tracktitle="Track 2", tracknumber="02", tracktotal=2, discnumber="01", disctotal=1, duration_seconds=240, trackartists=ArtistMapping(main=[Artist("Techno Man"), Artist("Bass Man")]), metahash="2", release=release, ), ]
assert get_tracks_associated_with_release(config, release) == expected_tracks
22
2023-10-09 14:42:23+00:00
24k
zhaoyizhou1123/mbrcsl
examples/roboverse/run_mbrcsl_mlpdyn_roboverse.py
[ { "identifier": "EnsembleDynamicsModel", "path": "offlinerlkit/modules/dynamics_module.py", "snippet": "class EnsembleDynamicsModel(nn.Module):\n def __init__(\n self,\n obs_dim: int,\n action_dim: int,\n hidden_dims: Union[List[int], Tuple[int]],\n num_ensemble: int = 7,\n num_elites: int = 5,\n activation: nn.Module = Swish,\n weight_decays: Optional[Union[List[float], Tuple[float]]] = None,\n with_reward: bool = True,\n device: str = \"cpu\"\n ) -> None:\n super().__init__()\n\n self.num_ensemble = num_ensemble\n self.num_elites = num_elites\n self._with_reward = with_reward\n self.device = torch.device(device)\n\n self.activation = activation()\n\n assert len(weight_decays) == (len(hidden_dims) + 1)\n\n module_list = []\n hidden_dims = [obs_dim+action_dim] + list(hidden_dims)\n if weight_decays is None:\n weight_decays = [0.0] * (len(hidden_dims) + 1)\n for in_dim, out_dim, weight_decay in zip(hidden_dims[:-1], hidden_dims[1:], weight_decays[:-1]):\n module_list.append(EnsembleLinear(in_dim, out_dim, num_ensemble, weight_decay))\n self.backbones = nn.ModuleList(module_list)\n\n self.output_layer = EnsembleLinear(\n hidden_dims[-1],\n 2 * (obs_dim + self._with_reward),\n num_ensemble,\n weight_decays[-1]\n )\n\n self.register_parameter(\n \"max_logvar\",\n nn.Parameter(torch.ones(obs_dim + self._with_reward) * 0.5, requires_grad=True)\n )\n self.register_parameter(\n \"min_logvar\",\n nn.Parameter(torch.ones(obs_dim + self._with_reward) * -10, requires_grad=True)\n )\n\n self.register_parameter(\n \"elites\",\n nn.Parameter(torch.tensor(list(range(0, self.num_elites))), requires_grad=False)\n )\n\n self.to(self.device)\n\n def forward(self, obs_action: np.ndarray) -> Tuple[torch.Tensor, torch.Tensor]:\n obs_action = torch.as_tensor(obs_action, dtype=torch.float32).to(self.device)\n output = obs_action\n for layer in self.backbones:\n output = self.activation(layer(output))\n mean, logvar = torch.chunk(self.output_layer(output), 2, dim=-1)\n logvar = soft_clamp(logvar, self.min_logvar, self.max_logvar)\n return mean, logvar\n\n def load_save(self) -> None:\n for layer in self.backbones:\n layer.load_save()\n self.output_layer.load_save()\n\n def update_save(self, indexes: List[int]) -> None:\n for layer in self.backbones:\n layer.update_save(indexes)\n self.output_layer.update_save(indexes)\n \n def get_decay_loss(self) -> torch.Tensor:\n decay_loss = 0\n for layer in self.backbones:\n decay_loss += layer.get_decay_loss()\n decay_loss += self.output_layer.get_decay_loss()\n return decay_loss\n\n def set_elites(self, indexes: List[int]) -> None:\n assert len(indexes) <= self.num_ensemble and max(indexes) < self.num_ensemble\n self.register_parameter('elites', nn.Parameter(torch.tensor(indexes), requires_grad=False))\n \n def random_elite_idxs(self, batch_size: int) -> np.ndarray:\n idxs = np.random.choice(self.elites.data.cpu().numpy(), size=batch_size)\n return idxs" }, { "identifier": "EnsembleDynamics", "path": "offlinerlkit/dynamics/ensemble_dynamics.py", "snippet": "class EnsembleDynamics(BaseDynamics):\n def __init__(\n self,\n model: nn.Module,\n optim: torch.optim.Optimizer,\n scaler: StandardScaler,\n terminal_fn: Callable[[np.ndarray, np.ndarray, np.ndarray], np.ndarray],\n penalty_coef: float = 0.0,\n uncertainty_mode: str = \"aleatoric\"\n ) -> None:\n super().__init__(model, optim)\n self.scaler = scaler\n self.terminal_fn = terminal_fn\n self._penalty_coef = penalty_coef\n self._uncertainty_mode = uncertainty_mode\n\n @ torch.no_grad()\n def step(\n self,\n obs: np.ndarray,\n action: np.ndarray\n ) -> Tuple[np.ndarray, np.ndarray, np.ndarray, Dict]:\n '''\n Return:\n reward (B,1) (if obs has batch)\n terminal (B,1)\n '''\n \"imagine single forward step\"\n obs_act = np.concatenate([obs, action], axis=-1)\n obs_act = self.scaler.transform(obs_act)\n mean, logvar = self.model(obs_act)\n mean = mean.cpu().numpy()\n logvar = logvar.cpu().numpy()\n mean[..., :-1] += obs # We estimated delta_obs\n std = np.sqrt(np.exp(logvar))\n\n ensemble_samples = (mean + np.random.normal(size=mean.shape) * std).astype(np.float32)\n\n # choose one model from ensemble\n num_models, batch_size, _ = ensemble_samples.shape\n model_idxs = self.model.random_elite_idxs(batch_size)\n samples = ensemble_samples[model_idxs, np.arange(batch_size)]\n \n next_obs = samples[..., :-1]\n reward = samples[..., -1:]\n terminal = self.terminal_fn(obs, action, next_obs)\n info = {}\n info[\"raw_reward\"] = reward\n\n if self._penalty_coef:\n if self._uncertainty_mode == \"aleatoric\":\n penalty = np.amax(np.linalg.norm(std, axis=2), axis=0)\n elif self._uncertainty_mode == \"pairwise-diff\":\n next_obses_mean = mean[..., :-1]\n next_obs_mean = np.mean(next_obses_mean, axis=0)\n diff = next_obses_mean - next_obs_mean\n penalty = np.amax(np.linalg.norm(diff, axis=2), axis=0)\n elif self._uncertainty_mode == \"ensemble_std\":\n next_obses_mean = mean[..., :-1]\n penalty = np.sqrt(next_obses_mean.var(0).mean(1))\n else:\n raise ValueError\n penalty = np.expand_dims(penalty, 1).astype(np.float32)\n assert penalty.shape == reward.shape\n reward = reward - self._penalty_coef * penalty\n info[\"penalty\"] = penalty\n \n return next_obs, reward, terminal, info\n \n @ torch.no_grad()\n def sample_next_obss(\n self,\n obs: torch.Tensor,\n action: torch.Tensor,\n num_samples: int\n ) -> torch.Tensor:\n obs_act = torch.cat([obs, action], dim=-1)\n obs_act = self.scaler.transform_tensor(obs_act)\n mean, logvar = self.model(obs_act)\n mean[..., :-1] += obs\n std = torch.sqrt(torch.exp(logvar))\n\n mean = mean[self.model.elites.data.cpu().numpy()]\n std = std[self.model.elites.data.cpu().numpy()]\n\n samples = torch.stack([mean + torch.randn_like(std) * std for i in range(num_samples)], 0)\n next_obss = samples[..., :-1]\n return next_obss\n\n def format_samples_for_training(self, data: Dict) -> Tuple[np.ndarray, np.ndarray]:\n obss = data[\"observations\"]\n actions = data[\"actions\"]\n next_obss = data[\"next_observations\"]\n rewards = data[\"rewards\"]\n rewards = rewards.reshape(rewards.shape[0], -1)\n delta_obss = next_obss - obss\n inputs = np.concatenate((obss, actions), axis=-1)\n targets = np.concatenate((delta_obss, rewards), axis=-1)\n return inputs, targets\n\n def train(\n self,\n data: Dict,\n logger: Logger,\n max_epochs: Optional[float] = None,\n max_epochs_since_update: int = 5,\n batch_size: int = 256,\n holdout_ratio: float = 0.2,\n logvar_loss_coef: float = 0.01\n ) -> None:\n inputs, targets = self.format_samples_for_training(data)\n data_size = inputs.shape[0]\n holdout_size = min(int(data_size * holdout_ratio), 1000)\n train_size = data_size - holdout_size\n train_splits, holdout_splits = torch.utils.data.random_split(range(data_size), (train_size, holdout_size))\n train_inputs, train_targets = inputs[train_splits.indices], targets[train_splits.indices]\n holdout_inputs, holdout_targets = inputs[holdout_splits.indices], targets[holdout_splits.indices]\n\n self.scaler.fit(train_inputs)\n train_inputs = self.scaler.transform(train_inputs)\n holdout_inputs = self.scaler.transform(holdout_inputs)\n holdout_losses = [1e10 for i in range(self.model.num_ensemble)]\n\n data_idxes = np.random.randint(train_size, size=[self.model.num_ensemble, train_size])\n def shuffle_rows(arr):\n idxes = np.argsort(np.random.uniform(size=arr.shape), axis=-1)\n return arr[np.arange(arr.shape[0])[:, None], idxes]\n\n epoch = 0\n cnt = 0\n logger.log(\"Training dynamics:\")\n while True:\n epoch += 1\n train_loss = self.learn(train_inputs[data_idxes], train_targets[data_idxes], batch_size, logvar_loss_coef)\n new_holdout_losses = self.validate(holdout_inputs, holdout_targets)\n holdout_loss = (np.sort(new_holdout_losses)[:self.model.num_elites]).mean()\n logger.logkv(\"loss/dynamics_train_loss\", train_loss)\n logger.logkv(\"loss/dynamics_holdout_loss\", holdout_loss)\n logger.set_timestep(epoch)\n logger.dumpkvs(exclude=[\"policy_training_progress\"])\n\n # shuffle data for each base learner\n data_idxes = shuffle_rows(data_idxes)\n\n indexes = []\n for i, new_loss, old_loss in zip(range(len(holdout_losses)), new_holdout_losses, holdout_losses):\n improvement = (old_loss - new_loss) / old_loss\n if improvement > 0.01:\n indexes.append(i)\n holdout_losses[i] = new_loss\n \n if len(indexes) > 0:\n self.model.update_save(indexes)\n cnt = 0\n else:\n cnt += 1\n \n if (cnt >= max_epochs_since_update) or (max_epochs and (epoch >= max_epochs)):\n break\n\n indexes = self.select_elites(holdout_losses)\n self.model.set_elites(indexes)\n self.model.load_save()\n self.save(logger.model_dir)\n self.model.eval()\n logger.log(\"elites:{} , holdout loss: {}\".format(indexes, (np.sort(holdout_losses)[:self.model.num_elites]).mean()))\n \n def learn(\n self,\n inputs: np.ndarray,\n targets: np.ndarray,\n batch_size: int = 256,\n logvar_loss_coef: float = 0.01\n ) -> float:\n self.model.train()\n train_size = inputs.shape[1]\n losses = []\n\n for batch_num in range(int(np.ceil(train_size / batch_size))):\n inputs_batch = inputs[:, batch_num * batch_size:(batch_num + 1) * batch_size]\n targets_batch = targets[:, batch_num * batch_size:(batch_num + 1) * batch_size]\n targets_batch = torch.as_tensor(targets_batch).to(self.model.device)\n \n mean, logvar = self.model(inputs_batch)\n inv_var = torch.exp(-logvar)\n # Average over batch and dim, sum over ensembles.\n mse_loss_inv = (torch.pow(mean - targets_batch, 2) * inv_var).mean(dim=(1, 2)) # MLE for Gaussian\n var_loss = logvar.mean(dim=(1, 2))\n loss = mse_loss_inv.sum() + var_loss.sum()\n loss = loss + self.model.get_decay_loss()\n loss = loss + logvar_loss_coef * self.model.max_logvar.sum() - logvar_loss_coef * self.model.min_logvar.sum()\n\n self.optim.zero_grad()\n loss.backward()\n self.optim.step()\n\n losses.append(loss.item())\n return np.mean(losses)\n \n @ torch.no_grad()\n def validate(self, inputs: np.ndarray, targets: np.ndarray) -> List[float]:\n self.model.eval()\n targets = torch.as_tensor(targets).to(self.model.device)\n mean, _ = self.model(inputs)\n loss = ((mean - targets) ** 2).mean(dim=(1, 2))\n val_loss = list(loss.cpu().numpy())\n return val_loss\n \n def select_elites(self, metrics: List) -> List[int]:\n pairs = [(metric, index) for metric, index in zip(metrics, range(len(metrics)))]\n pairs = sorted(pairs, key=lambda x: x[0])\n elites = [pairs[i][1] for i in range(self.model.num_elites)]\n return elites\n\n def save(self, save_path: str) -> None:\n torch.save(self.model.state_dict(), os.path.join(save_path, \"dynamics.pth\"))\n self.scaler.save_scaler(save_path)\n \n def load(self, load_path: str) -> None:\n self.model.load_state_dict(torch.load(os.path.join(load_path, \"dynamics.pth\"), map_location=self.model.device))\n self.scaler.load_scaler(load_path)" }, { "identifier": "get_termination_fn", "path": "offlinerlkit/utils/termination_fns.py", "snippet": "def get_termination_fn(task):\n if 'halfcheetahvel' in task:\n return termination_fn_halfcheetahveljump\n elif 'halfcheetah' in task:\n return termination_fn_halfcheetah\n elif 'hopper' in task:\n return termination_fn_hopper\n elif 'antangle' in task:\n return termination_fn_antangle\n elif 'ant' in task:\n return termination_fn_ant\n elif 'walker2d' in task:\n return termination_fn_walker2d\n elif 'point2denv' in task:\n return termination_fn_point2denv\n elif 'point2dwallenv' in task:\n return termination_fn_point2dwallenv\n elif 'pendulum' in task:\n return termination_fn_pendulum\n elif 'humanoid' in task:\n return termination_fn_humanoid\n elif 'pen' in task:\n return termination_fn_pen\n elif 'door' in task:\n return termination_fn_door\n else:\n return termination_fn_default" }, { "identifier": "StandardScaler", "path": "offlinerlkit/utils/scaler.py", "snippet": "class StandardScaler(object):\n def __init__(self, mu=None, std=None):\n self.mu = mu\n self.std = std\n\n def fit(self, data):\n \"\"\"Runs two ops, one for assigning the mean of the data to the internal mean, and\n another for assigning the standard deviation of the data to the internal standard deviation.\n This function must be called within a 'with <session>.as_default()' block.\n\n Arguments:\n data (np.ndarray): A numpy array containing the input\n\n Returns: None.\n \"\"\"\n self.mu = np.mean(data, axis=0, keepdims=True)\n self.std = np.std(data, axis=0, keepdims=True)\n self.std[self.std < 1e-12] = 1.0\n\n def transform(self, data):\n \"\"\"Transforms the input matrix data using the parameters of this scaler.\n\n Arguments:\n data (np.array): A numpy array containing the points to be transformed.\n\n Returns: (np.array) The transformed dataset.\n \"\"\"\n return (data - self.mu) / self.std\n\n def inverse_transform(self, data):\n \"\"\"Undoes the transformation performed by this scaler.\n\n Arguments:\n data (np.array): A numpy array containing the points to be transformed.\n\n Returns: (np.array) The transformed dataset.\n \"\"\"\n return self.std * data + self.mu\n \n def save_scaler(self, save_path):\n mu_path = path.join(save_path, \"mu.npy\")\n std_path = path.join(save_path, \"std.npy\")\n np.save(mu_path, self.mu)\n np.save(std_path, self.std)\n \n def load_scaler(self, load_path):\n mu_path = path.join(load_path, \"mu.npy\")\n std_path = path.join(load_path, \"std.npy\")\n self.mu = np.load(mu_path)\n self.std = np.load(std_path)\n\n def transform_tensor(self, data: torch.Tensor):\n device = data.device\n data = self.transform(data.cpu().numpy())\n data = torch.tensor(data, device=device)\n return data" }, { "identifier": "PickPlaceObsWrapper", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "class PickPlaceObsWrapper(gym.ObservationWrapper):\n '''\n Wrap pick place environment to return desired obs\n '''\n def __init__(self, env):\n super().__init__(env)\n # Get observation space\n tmp_obs = env.reset()\n\n tmp_true_obs = get_pickplace_obs(tmp_obs)\n low = env.observation_space['state'].low[0]\n high = env.observation_space['state'].high[0]\n self.observation_space = Box(shape = tmp_true_obs.shape, low = low, high = high)\n\n def observation(self, observation: Dict[str, np.ndarray]) -> np.ndarray:\n return get_pickplace_obs(observation)\n\n def reset(self, seed = None):\n if seed is not None:\n np.random.seed(seed) # controls env seed\n return self.observation(self.env.reset())" }, { "identifier": "DoubleDrawerObsWrapper", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "class DoubleDrawerObsWrapper(gym.Wrapper):\n '''\n Wrap pick place environment to return desired obs\n '''\n def __init__(self, env):\n super().__init__(env)\n # Get observation space\n tmp_obs = env.reset()\n info = env.get_info()\n\n tmp_true_obs = get_doubledrawer_obs(tmp_obs, info)\n low = env.observation_space['state'].low[0]\n high = env.observation_space['state'].high[0]\n self.observation_space = Box(shape = tmp_true_obs.shape, low = low, high = high)\n\n def step(self, action):\n obs, reward, done, info = self.env.step(action)\n obs = get_doubledrawer_obs(obs, info)\n return obs, reward, done, info\n\n def reset(self, seed = None):\n if seed is not None:\n np.random.seed(seed) # controls env seed\n obs = self.env.reset()\n info = self.env.get_info()\n return get_doubledrawer_obs(obs, info)" }, { "identifier": "get_pickplace_dataset", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "def get_pickplace_dataset(\n prior_data_path: str, \n task_data_path: str,\n prior_weight: float =1., \n task_weight: float = 1., \n set_type: str = 'full', \n sample_ratio: float = 1.) -> Tuple[Dict, np.ndarray]:\n '''\n Concatenate prior_data and task_data\n prior_weight and task_weight: weight of data point\n\n Args:\n set_type: 'prior', 'task', 'full'\n sample_ratio: Ratio of trajectories sampled. Sometimes we want to train on a smaller dataset.\n\n Return:\n dataset: Dict, additional key 'weights'\n init_obss: np.ndarray (num_traj, obs_dim)\n '''\n with open(prior_data_path, \"rb\") as fp:\n prior_data = np.load(fp, allow_pickle=True)\n with open(task_data_path, \"rb\") as ft:\n task_data = np.load(ft, allow_pickle=True)\n set_weight(prior_data, prior_weight)\n set_weight(task_data, task_weight)\n\n # Sample trajectories\n num_trajs_prior = int(len(prior_data) * sample_ratio)\n idxs_prior = np.random.choice(len(prior_data), size=(num_trajs_prior), replace = False)\n prior_data = prior_data[idxs_prior]\n\n num_trajs_task = int(len(task_data) * sample_ratio)\n idxs_task = np.random.choice(len(task_data), size=(num_trajs_task), replace = False)\n task_data = task_data[idxs_task]\n\n if set_type == 'full':\n full_data = np.concatenate([prior_data, task_data], axis=0) # list of dict\n elif set_type == 'prior':\n full_data = prior_data\n elif set_type =='task':\n full_data = task_data\n keys = ['observations', 'actions', 'rewards', 'next_observations', 'terminals', 'weights']\n\n init_obss = []\n for d in prior_data:\n obs_list = d['observations']\n init_obss.append(get_pickplace_obs(obs_list[0]))\n \n dict_data = {}\n for key in keys:\n values = []\n for d in full_data: # trajectory, dict of lists\n value_list = d[key] # list of timesteps data\n if key == 'observations':\n values += [get_pickplace_obs(obs) for obs in value_list] # element is list\n elif key == 'next_observations':\n values += [get_pickplace_obs(obs) for obs in value_list] # element is list\n else:\n values += value_list # element is list\n values = np.asarray(values)\n dict_data[key] = values\n rtgs = np.zeros_like(dict_data['rewards']) # no return\n dict_data['rtgs'] = rtgs\n\n init_obss = np.asarray(init_obss)\n return dict_data, init_obss" }, { "identifier": "get_doubledrawer_dataset", "path": "offlinerlkit/utils/roboverse_utils.py", "snippet": "def get_doubledrawer_dataset(\n prior_data_path: str, \n task_data_path: str,\n prior_weight: float =1., \n task_weight: float = 1., \n set_type: str = 'full', \n sample_ratio: float = 1.) -> Tuple[Dict, np.ndarray]:\n '''\n Concatenate prior_data and task_data\n prior_weight and task_weight: weight of data point\n\n Args:\n set_type: 'prior', 'task', 'full'\n sample_ratio: Ratio of trajectories sampled. Sometimes we want to train on a smaller dataset.\n\n Return:\n dataset: Dict, additional key 'weights'\n init_obss: np.ndarray (num_traj, obs_dim)\n '''\n with open(prior_data_path, \"rb\") as fp:\n prior_data = np.load(fp, allow_pickle=True)\n with open(task_data_path, \"rb\") as ft:\n task_data = np.load(ft, allow_pickle=True)\n set_weight(prior_data, prior_weight)\n set_weight(task_data, task_weight)\n\n # Sample trajectories\n num_trajs_prior = int(len(prior_data) * sample_ratio)\n idxs_prior = np.random.choice(len(prior_data), size=(num_trajs_prior), replace = False)\n prior_data = prior_data[idxs_prior]\n\n num_trajs_task = int(len(task_data) * sample_ratio)\n idxs_task = np.random.choice(len(task_data), size=(num_trajs_task), replace = False)\n task_data = task_data[idxs_task]\n\n if set_type == 'full':\n full_data = np.concatenate([prior_data, task_data], axis=0) # list of dict\n elif set_type == 'prior':\n full_data = prior_data\n elif set_type =='task':\n full_data = task_data\n keys = ['observations', 'actions', 'rewards', 'next_observations', 'terminals', 'weights']\n\n init_obss = []\n for d in prior_data:\n obs_list = d['observations']\n info_list = d['env_infos']\n init_obss.append(get_doubledrawer_obs(obs_list[0], info_list[0]))\n \n dict_data = {}\n for key in keys:\n values = []\n for d in full_data: # trajectory, dict of lists\n value_list = d[key] # list of timesteps data\n if key == 'observations':\n info_list = d['env_infos']\n # initial info is similar to step 1\n values += [get_doubledrawer_obs(obs,info) for obs,info in zip(value_list, [info_list[0]] + info_list[:-1])]\n elif key == 'next_observations':\n info_list = d['env_infos']\n values += [get_doubledrawer_obs(obs,info) for obs,info in zip(value_list, info_list)]\n else:\n values += value_list # element is list\n values = np.asarray(values)\n dict_data[key] = values\n rtgs = np.zeros_like(dict_data['rewards']) # no return\n dict_data['rtgs'] = rtgs\n\n init_obss = np.asarray(init_obss)\n return dict_data, init_obss" }, { "identifier": "Logger", "path": "offlinerlkit/utils/logger.py", "snippet": "class Logger(object):\n def __init__(self, dir: str, ouput_config: Dict) -> None:\n self._dir = dir\n self._init_dirs()\n self._init_ouput_handlers(ouput_config)\n self._name2val = defaultdict(float)\n self._name2cnt = defaultdict(int)\n self._level = INFO\n self._timestep = 0\n \n def _init_dirs(self) -> None:\n self._record_dir = os.path.join(self._dir, \"record\")\n self._checkpoint_dir = os.path.join(self._dir, \"checkpoint\")\n self._model_dir = os.path.join(self._dir, \"model\")\n self._result_dir = os.path.join(self._dir, \"result\")\n os.mkdir(self._record_dir)\n os.mkdir(self._checkpoint_dir)\n os.mkdir(self._model_dir)\n os.mkdir(self._result_dir)\n \n def _init_ouput_handlers(self, output_config: Dict) -> None:\n self._output_handlers = []\n for file_name, fmt in output_config.items():\n try:\n self._output_handlers.append(HANDLER[fmt](os.path.join(self._record_dir, file_name)))\n except KeyError:\n warnings.warn(\"Invalid output type, Valid types: stdout, csv, tensorboard\", DeprecationWarning)\n # default output to console\n self._output_handlers.append(StandardOutputHandler(sys.stdout))\n \n def log_hyperparameters(self, hyper_param: Dict) -> None:\n json_output_handler = JSONOutputHandler(os.path.join(self._record_dir, \"hyper_param\"))\n json_output_handler.writekvs(hyper_param)\n json_output_handler.close()\n for handler in self._output_handlers:\n if isinstance(handler, TensorBoardOutputHandler):\n handler.add_hyper_params_to_tb(hyper_param)\n\n def logkv(self, key: Any, val: Any) -> None:\n \"\"\"\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n If called many times, last value will be used.\n \"\"\"\n self._name2val[key] = val\n\n def logkv_mean(self, key: Any, val: Number) -> None:\n \"\"\"\n The same as logkv(), but if called many times, values averaged.\n \"\"\"\n oldval, cnt = self._name2val[key], self._name2cnt[key]\n self._name2val[key] = oldval*cnt/(cnt+1) + val/(cnt+1)\n self._name2cnt[key] = cnt + 1\n\n def dumpkvs(self, exclude:Optional[Union[str, Tuple[str, ...]]]=None) -> None:\n # log timestep\n self.logkv(DEFAULT_X_NAME, self._timestep)\n for handler in self._output_handlers:\n if isinstance(handler, KVWriter):\n if exclude is not None and handler.handler_name in exclude:\n continue\n handler.writekvs(self._name2val)\n self._name2val.clear()\n self._name2cnt.clear()\n\n def log(self, s: str, level=INFO) -> None:\n for handler in self._output_handlers:\n if isinstance(handler, StandardOutputHandler):\n handler.writestr(s)\n \n def set_timestep(self, timestep: int) -> None:\n self._timestep = timestep\n for handler in self._output_handlers:\n if isinstance(handler, TensorBoardOutputHandler):\n handler.set_step(timestep)\n\n def set_level(self, level) -> None:\n self._level = level\n\n @property\n def record_dir(self) -> str:\n return self._record_dir\n \n @property\n def checkpoint_dir(self) -> str:\n return self._checkpoint_dir\n\n @property\n def model_dir(self) -> str:\n return self._model_dir\n \n @property\n def result_dir(self) -> str:\n return self._result_dir\n \n def close(self) -> None:\n for handler in self._output_handlers:\n handler.close()" }, { "identifier": "make_log_dirs", "path": "offlinerlkit/utils/logger.py", "snippet": "def make_log_dirs(\n task_name: str,\n algo_name: str,\n exp_name: str,\n args: Dict,\n part: Optional[str] = None,\n record_params: Optional[List]=None\n) -> str:\n if record_params is not None:\n for param_name in record_params:\n algo_name += f\"&{param_name}={args[param_name]}\"\n\n if part is not None:\n log_dirs = os.path.join(ROOT_DIR, task_name, algo_name, exp_name, part)\n else:\n log_dirs = os.path.join(ROOT_DIR, task_name, algo_name, exp_name)\n os.makedirs(log_dirs)\n return log_dirs" }, { "identifier": "RcslPolicyTrainer", "path": "offlinerlkit/policy_trainer/rcsl_policy_trainer.py", "snippet": "class RcslPolicyTrainer:\n def __init__(\n self,\n policy: BasePolicy,\n eval_env: Union[gym.Env, gymnasium.Env],\n offline_dataset: Dict[str, np.ndarray],\n rollout_dataset: Optional[Dict[str, np.ndarray]],\n goal: float,\n logger: Logger,\n seed,\n eval_env2: Optional[Union[gym.Env, gymnasium.Env]] = None,\n epoch: int = 1000,\n batch_size: int = 256,\n offline_ratio: float = 0,\n eval_episodes: int = 10,\n lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n horizon: Optional[int] = None,\n num_workers = 1,\n has_terminal = False,\n binary_return = True\n ) -> None:\n '''\n offline_ratio = 0: rollout only, 1: offline only\n '''\n self.policy = policy\n self.eval_env = eval_env\n self.eval_env2 = eval_env2\n self.horizon = horizon\n self.offline_dataset = offline_dataset\n self.rollout_dataset = rollout_dataset\n self.goal = goal\n self.logger = logger\n\n self._epoch = epoch\n self._batch_size = batch_size\n self._offline_ratio = offline_ratio\n self._eval_episodes = eval_episodes\n self.lr_scheduler = lr_scheduler\n self.num_workers = num_workers\n self.env_seed = seed\n self.binary_return = binary_return\n\n self.is_gymnasium_env = hasattr(self.eval_env, \"get_true_observation\")\n assert (not self.is_gymnasium_env) or (self.horizon is not None), \"Horizon must be specified for Gymnasium env\"\n self.has_terminal = has_terminal\n\n def train(self, holdout_ratio: float = 0.1, last_eval = False, find_best_start: Optional[int] = None, improve_threshold: float = 0.01) -> Dict[str, float]:\n '''\n last_eval: If True, only evaluates at the last epoch\n find_best_start: If >=0, begin to find the best epoch by holdout loss\n '''\n start_time = time.time()\n\n num_timesteps = 0\n last_10_performance = deque(maxlen=10)\n\n dataset = DictDataset(self.offline_dataset)\n\n if holdout_ratio == 0.:\n has_holdout = False\n train_dataset = dataset\n else:\n has_holdout = True\n holdout_size = int(len(dataset) * holdout_ratio)\n train_size = len(dataset) - holdout_size\n train_dataset, holdout_dataset = torch.utils.data.random_split(dataset, [train_size, holdout_size], \n generator=torch.Generator().manual_seed(self.env_seed))\n data_loader = DataLoader(\n train_dataset,\n batch_size = self._batch_size,\n shuffle = True,\n pin_memory = True,\n num_workers = self.num_workers\n )\n best_policy_dict = self.policy.state_dict()\n best_holdout_loss = 1e10\n epochs_since_upd = 0\n stop_by_holdout = (find_best_start is not None)\n for e in range(1, self._epoch + 1):\n\n self.policy.train()\n\n pbar = tqdm(enumerate(data_loader), desc=f\"Epoch #{e}/{self._epoch}\")\n for it, batch in pbar:\n '''\n batch: dict with keys\n 'observations'\n 'next_observations'\n 'actions'\n 'terminals'\n 'rewards'\n 'rtgs'\n\n '''\n loss_dict = self.policy.learn(batch)\n pbar.set_postfix(**loss_dict)\n\n for k, v in loss_dict.items():\n self.logger.logkv_mean(k, v)\n \n num_timesteps += 1\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n # Test validation loss\n if has_holdout:\n holdout_loss = self.validate(holdout_dataset)\n if stop_by_holdout and e >= find_best_start: # test holdout improvement\n if (best_holdout_loss - holdout_loss) / best_holdout_loss > improve_threshold:\n best_holdout_loss = holdout_loss\n best_policy_dict = deepcopy(self.policy.state_dict())\n epochs_since_upd = 0\n else:\n epochs_since_upd += 1\n\n if last_eval and e < self._epoch: # When last_eval is True, only evaluate on last epoch\n pass\n else:\n eval_info = self._evaluate()\n ep_reward_mean, ep_reward_std = np.mean(eval_info[\"eval/episode_reward\"]), np.std(eval_info[\"eval/episode_reward\"])\n ep_reward_max, ep_reward_min = np.max(eval_info[\"eval/episode_reward\"]), np.min(eval_info[\"eval/episode_reward\"])\n ep_length_mean, ep_length_std = np.mean(eval_info[\"eval/episode_length\"]), np.std(eval_info[\"eval/episode_length\"])\n\n if not hasattr(self.eval_env, \"get_normalized_score\"): # gymnasium_env does not have normalized score\n last_10_performance.append(ep_reward_mean)\n self.logger.logkv(\"eval/episode_reward\", ep_reward_mean)\n self.logger.logkv(\"eval/episode_reward_std\", ep_reward_std) \n else: \n norm_ep_rew_mean = self.eval_env.get_normalized_score(ep_reward_mean) * 100\n norm_ep_rew_std = self.eval_env.get_normalized_score(ep_reward_std) * 100\n norm_ep_rew_max = self.eval_env.get_normalized_score(ep_reward_max) * 100\n norm_ep_rew_min = self.eval_env.get_normalized_score(ep_reward_min) * 100\n last_10_performance.append(norm_ep_rew_mean)\n self.logger.logkv(\"eval/normalized_episode_reward\", norm_ep_rew_mean)\n self.logger.logkv(\"eval/normalized_episode_reward_std\", norm_ep_rew_std)\n self.logger.logkv(\"eval/normalized_episode_reward_max\", norm_ep_rew_max)\n self.logger.logkv(\"eval/normalized_episode_reward_min\", norm_ep_rew_min)\n self.logger.logkv(\"eval/episode_length\", ep_length_mean)\n self.logger.logkv(\"eval/episode_length_std\", ep_length_std)\n\n self.logger.set_timestep(num_timesteps)\n self.logger.dumpkvs(exclude=[\"dynamics_training_progress\"])\n\n if stop_by_holdout and epochs_since_upd >= 5: # Stop, evaluate for the last time\n self.policy.load_state_dict(best_policy_dict)\n eval_info = self._evaluate()\n ep_reward_mean, ep_reward_std = np.mean(eval_info[\"eval/episode_reward\"]), np.std(eval_info[\"eval/episode_reward\"])\n self.logger.log(f\"Final evaluation: Mean {ep_reward_mean}, std {ep_reward_std}\\n\")\n break\n \n self.logger.log(\"total time: {:.2f}s\".format(time.time() - start_time))\n torch.save(self.policy.state_dict(), os.path.join(self.logger.model_dir, \"policy_final.pth\"))\n self.logger.close()\n \n return {\"last_10_performance\": np.mean(last_10_performance)}\n\n def _evaluate(self, eval_episodes: int = -1) -> Dict[str, List[float]]:\n '''\n Always set desired rtg to 0\n '''\n # Pointmaze obs has different format, needs to be treated differently\n if eval_episodes == -1:\n real_eval_episodes = self._eval_episodes\n else:\n real_eval_episodes = eval_episodes\n is_gymnasium_env = self.is_gymnasium_env\n\n self.eval_env.reset(seed=self.env_seed) # Fix seed\n \n self.policy.eval()\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n \n eval_ep_info_buffer = []\n num_episodes = 0\n episode_reward, episode_length = 0, 0\n\n if not self.has_terminal: # don't use terminal signal, terminate when reach horizon\n while num_episodes < real_eval_episodes:\n rtg = torch.tensor([[self.goal]]).type(torch.float32)\n for timestep in range(self.horizon): # One epoch\n action = self.policy.select_action(obs.reshape(1, -1), rtg)\n if hasattr(self.eval_env, \"get_true_observation\"): # gymnasium env \n next_obs, reward, terminal, _, _ = self.eval_env.step(action.flatten())\n else:\n next_obs, reward, terminal, info = self.eval_env.step(action.flatten())\n if is_gymnasium_env:\n next_obs = self.eval_env.get_true_observation(next_obs)\n episode_reward += reward\n rtg = rtg - reward\n episode_length += 1\n\n obs = next_obs\n if self.binary_return:\n episode_reward = 1 if episode_reward > 0 else 0 # Clip to 1\n eval_ep_info_buffer.append(\n {\"episode_reward\": episode_reward, \"episode_length\": episode_length}\n )\n num_episodes +=1\n episode_reward, episode_length = 0, 0\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n else:\n rtg = torch.tensor([[self.goal]]).type(torch.float32)\n while num_episodes < self._eval_episodes:\n action = self.policy.select_action(obs.reshape(1, -1), rtg)\n if hasattr(self.eval_env, \"get_true_observation\"): # gymnasium env \n next_obs, reward, terminal, _, _ = self.eval_env.step(action.flatten())\n else:\n next_obs, reward, terminal, _ = self.eval_env.step(action.flatten())\n if is_gymnasium_env:\n next_obs = self.eval_env.get_true_observation(next_obs)\n episode_reward += reward\n episode_length += 1\n\n obs = next_obs\n\n if terminal: # Episode finishes\n if self.binary_return:\n episode_reward = 1 if episode_reward > 0 else 0 # Clip to 1\n eval_ep_info_buffer.append(\n {\"episode_reward\": episode_reward, \"episode_length\": episode_length}\n )\n episode_reward, episode_length = 0, 0\n if is_gymnasium_env:\n obs, _ = self.eval_env.reset()\n obs = self.eval_env.get_true_observation(obs)\n else:\n obs = self.eval_env.reset()\n rtg = torch.tensor([[self.goal]]).type(torch.float32)\n \n return {\n \"eval/episode_reward\": [ep_info[\"episode_reward\"] for ep_info in eval_ep_info_buffer],\n \"eval/episode_length\": [ep_info[\"episode_length\"] for ep_info in eval_ep_info_buffer]\n }\n \n @ torch.no_grad()\n def validate(self, holdout_dataset: torch.utils.data.Dataset) -> Optional[float]:\n data_loader = DataLoader(\n holdout_dataset,\n batch_size = self._batch_size,\n shuffle = True,\n pin_memory = True,\n num_workers = self.num_workers\n )\n self.policy.eval()\n\n pbar = tqdm(enumerate(data_loader), total=len(data_loader))\n losses = []\n for it, batch in pbar:\n '''\n batch: dict with keys\n 'observations'\n 'next_observations'\n 'actions'\n 'terminals'\n 'rewards'\n 'rtgs'\n '''\n loss_dict = self.policy.validate(batch)\n\n for k, v in loss_dict.items():\n self.logger.logkv_mean(k, v)\n\n if \"holdout_loss\" in loss_dict:\n loss = loss_dict[\"holdout_loss\"]\n losses.append(loss)\n\n if len(losses) > 0:\n return(sum(losses) / len(losses))\n else:\n return None" }, { "identifier": "DiffusionPolicyTrainer", "path": "offlinerlkit/policy_trainer/diffusion_policy_trainer.py", "snippet": "class DiffusionPolicyTrainer:\n def __init__(\n self,\n policy: BasePolicy,\n offline_dataset: Dict[str, np.ndarray],\n logger: Logger,\n seed,\n epoch: int = 25,\n batch_size: int = 256,\n lr_scheduler: Optional[torch.optim.lr_scheduler._LRScheduler] = None,\n horizon: Optional[int] = None,\n num_workers = 1,\n has_terminal = False\n ) -> None:\n '''\n offline_ratio = 0: rollout only, 1: offline only\n '''\n self.policy = policy\n self.horizon = horizon\n self.offline_dataset = offline_dataset\n self.logger = logger\n\n self._epoch = epoch\n self._batch_size = batch_size\n self.lr_scheduler = lr_scheduler\n self.num_workers = num_workers\n self.env_seed = seed\n self.has_terminal = has_terminal\n\n def train(self) -> Dict[str, float]:\n start_time = time.time()\n\n num_timesteps = 0\n last_10_performance = deque(maxlen=10)\n\n data_loader = DataLoader(\n DictDataset(self.offline_dataset),\n batch_size = self._batch_size,\n shuffle = True,\n pin_memory = True,\n num_workers = self.num_workers\n ) \n\n # train loop\n for e in range(1, self._epoch + 1):\n\n self.policy.train()\n\n pbar = tqdm(enumerate(data_loader), desc=f\"Epoch #{e}/{self._epoch}\")\n for it, batch in pbar:\n '''\n batch: dict with keys\n 'observations'\n 'next_observations'\n 'actions'\n 'terminals'\n 'rewards'\n 'rtgs'\n\n '''\n loss_dict = self.policy.learn(batch)\n pbar.set_postfix(**loss_dict)\n\n for k, v in loss_dict.items():\n self.logger.logkv_mean(k, v)\n \n num_timesteps += 1\n\n if self.lr_scheduler is not None:\n self.lr_scheduler.step()\n\n self.logger.set_timestep(num_timesteps)\n self.logger.dumpkvs(exclude=[\"dynamics_training_progress\"])\n \n # save checkpoint\n torch.save(self.policy.state_dict(), os.path.join(self.logger.checkpoint_dir, \"policy.pth\"))\n\n self.logger.log(\"total time: {:.2f}s\".format(time.time() - start_time))\n torch.save(self.policy.state_dict(), os.path.join(self.logger.model_dir, \"policy.pth\"))\n self.logger.close()\n \n return {\"last_10_performance\": np.mean(last_10_performance)}" }, { "identifier": "none_or_str", "path": "offlinerlkit/utils/none_or_str.py", "snippet": "def none_or_str(value):\n if value == 'None':\n return None\n return value" }, { "identifier": "SimpleDiffusionPolicy", "path": "offlinerlkit/policy/diffusion/simple_diffusion.py", "snippet": "class SimpleDiffusionPolicy(ConditionalDiffusionModel):\n '''\n Note: When loading DiffusionPolicy, also need to load scaler manually\n '''\n def __init__(\n self,\n obs_shape,\n act_shape,\n feature_dim,\n num_training_steps,\n num_diffusion_steps,\n device,\n **kwargs,\n ):\n super().__init__(\n input_dim=np.prod(act_shape),\n cond_shape_dict={\"obs\": obs_shape, \"feat\": (feature_dim,)},\n num_training_steps=num_training_steps,\n num_diffusion_steps=num_diffusion_steps,\n clip_sample=True,\n device=device,\n **kwargs,\n )\n\n def learn(self, batch: Dict):\n '''\n Update one batch\n '''\n obss = batch['observations'].type(torch.float32).to(self.device)\n actions = batch['actions'].type(torch.float32).to(self.device)\n rtgs = batch['rtgs']\n rtgs = rtgs.reshape(rtgs.shape[0], -1).type(torch.float32).to(self.device)\n if 'weights' in batch:\n weights = batch['weights'].type(torch.float32).to(self.device) # (batch, )\n else:\n weights = None\n\n return super().learn(actions, {\"obs\": obss, \"feat\": rtgs}, weights)\n\n def validate(self, batch: Dict):\n '''\n Update one batch\n '''\n obss = batch['observations'].type(torch.float32).to(self.device)\n actions = batch['actions'].type(torch.float32).to(self.device)\n rtgs = batch['rtgs']\n rtgs = rtgs.reshape(rtgs.shape[0], -1).type(torch.float32).to(self.device)\n if 'weights' in batch:\n weights = batch['weights'].type(torch.float32).to(self.device) # (batch, )\n else:\n weights = None\n\n return super().validate(actions, {\"obs\": obss, \"feat\": rtgs}, weights)\n\n def select_action(self, obs, feat):\n # print(f\"DiffusionPolicy: select action with obs shape {obs.shape}, feat(rtg) shape {feat.shape}\")\n obs = torch.as_tensor(obs, dtype = torch.float32, device = self.device)\n feat = torch.as_tensor(feat, dtype = torch.float32, device = self.device)\n\n with torch.no_grad():\n action = super().sample({\"obs\": obs, \"feat\": feat})\n # print(action)\n return action.cpu().numpy()\n\n def train(self) -> None:\n self.noise_pred_net.train()\n self.cond_encoders.train()\n\n def eval(self) -> None:\n self.noise_pred_net.eval()\n self.cond_encoders.eval()" }, { "identifier": "AutoregressivePolicy", "path": "offlinerlkit/policy/rcsl/rcsl_autoregressive.py", "snippet": "class AutoregressivePolicy(nn.Module):\n def __init__(self, obs_dim, act_dim, hidden_dims, lr, device):\n super().__init__()\n self.obs_dim = obs_dim\n self.act_dim = act_dim\n\n # Input is obs + act + one-hot for the predicted dimension\n # Output is the mean and standard deviation of the predicted dimension\n input_dim = obs_dim + 1 + act_dim + act_dim # also depend on return\n all_dims = [input_dim] + hidden_dims + [2]\n self.model = nn.ModuleList()\n for in_dim, out_dim in zip(all_dims[:-1], all_dims[1:]):\n self.model.append(nn.Linear(in_dim, out_dim))\n self.model.append(nn.LeakyReLU())\n\n self.rcsl_optim = torch.optim.Adam(self.model.parameters(), lr=lr)\n self.device = device\n self.register_parameter(\n \"max_logstd\",\n nn.Parameter(torch.ones(1) * 0.5, requires_grad=True)\n )\n self.register_parameter(\n \"min_logstd\",\n nn.Parameter(torch.ones(1) * -10, requires_grad=True)\n )\n self.to(self.device)\n\n def forward(self, obs, rtg, deterministic: bool = False):\n batch_size = obs.size(0)\n rtg = rtg.reshape(batch_size, 1)\n\n # Initialize action to zeros\n act = torch.zeros((batch_size, self.act_dim), device=obs.device)\n\n # One-hot encoding for all dimensions\n one_hot_all = torch.eye(self.act_dim, device=obs.device)\n\n # Predict each dimension autoregressively\n for i in range(self.act_dim):\n one_hot = one_hot_all[i][None, :].repeat(batch_size, 1)\n x = torch.cat([obs, rtg, act, one_hot], dim=1)\n for layer in self.model:\n x = layer(x)\n mean, logstd = torch.chunk(x, 2, dim=-1)\n logstd = soft_clamp(logstd, self.min_logstd, self.max_logstd)\n\n # logstd might be too small\n if deterministic:\n next_dim = mean\n else:\n assert logstd.exp() != float('nan'), f\"{logstd}\"\n if logstd.exp() == 0:\n next_dim = mean\n else:\n dist = Normal(mean, logstd.exp())\n next_dim = dist.sample()\n act = torch.cat([act[:, :i], next_dim, act[:, i + 1 :]], dim=1)\n\n return act\n\n def select_action(self, obs: np.ndarray, rtg: np.ndarray, deterministic: bool = False) -> np.ndarray:\n with torch.no_grad():\n obs = torch.tensor(obs, dtype=torch.float32).to(self.device)\n rtg = torch.as_tensor(rtg).type(torch.float32).to(self.device)\n action = self.forward(obs, rtg, deterministic)\n return action.cpu().numpy()\n\n def fit(self, obs, rtg, act, weights = None):\n batch_size = obs.size(0)\n\n # Generate all the one-hot vectors, expand by repeat\n one_hot_all = torch.eye(self.act_dim, device=obs.device)\n one_hot_full = one_hot_all.repeat_interleave(batch_size, dim=0)\n\n # Repeat act by act_dim times and mask by one-hot encoding\n mask = (\n torch.tril(torch.ones((self.act_dim, self.act_dim), device=obs.device))\n - one_hot_all\n ) # lower trig - diag\n mask_full = mask.repeat_interleave(batch_size, dim=0)\n act_full = act.repeat(self.act_dim, 1) # (batch*act_dim, act_dim)\n act_masked = act_full * mask_full\n\n # Repeat obs by act_dim times\n rtg = rtg.reshape(batch_size, 1)\n obs_rtg = torch.cat([obs, rtg], dim = 1)\n obs_rtg_full = obs_rtg.repeat(self.act_dim, 1)\n\n # Concatenate everything to get input\n input_full = torch.cat([obs_rtg_full, act_masked, one_hot_full], dim=1)\n\n # Use the one-hot vector as boolean mask to get target\n target = act_full[one_hot_full.bool()].unsqueeze(1)\n\n # Forward through model and compute loss\n x = input_full\n for layer in self.model:\n x = layer(x)\n mean, logstd = torch.chunk(x, 2, dim=-1)\n logstd = soft_clamp(logstd, self.min_logstd, self.max_logstd)\n if any(torch.isnan(mean)):\n torch.save(self.model.state_dict(), \"model_debug.pth\")\n torch.save(input_full, \"input_debug.pth\")\n raise Exception(f\"Mean is nan, input_full {input_full.detach().cpu().numpy()}\")\n dist = Normal(mean, logstd.exp())\n loss = -dist.log_prob(target)\n if weights is None:\n loss = loss.mean()\n else:\n loss = loss.reshape(loss.shape[0], -1) # (batch * act_dim, 1)\n weights = weights.reshape(weights.shape[0], -1) # (batch, 1)\n weights = weights.repeat(self.act_dim, 1) # (batch * act_dim, 1)\n loss = torch.sum(loss * weights) / (torch.sum(weights) * loss.shape[-1])\n return loss\n \n def learn(self, batch: Dict) -> Dict[str, float]:\n obss, actions, rtgs = batch[\"observations\"], batch[\"actions\"], batch[\"rtgs\"]\n obss = obss.type(torch.float32).to(self.device)\n actions = actions.type(torch.float32).to(self.device)\n rtgs = rtgs.type(torch.float32).to(self.device)\n if 'weights' in batch:\n weights = batch['weights'].type(torch.float32).to(self.device) # (batch, )\n else:\n weights = None\n loss = self.fit(obss, rtgs, actions,weights)\n\n self.rcsl_optim.zero_grad()\n loss.backward()\n self.rcsl_optim.step()\n\n result = {\n \"loss\": loss.item(),\n }\n \n return result\n\n def validate(self, batch: Dict) -> Dict[str, float]:\n obss, actions, rtgs = batch[\"observations\"], batch[\"actions\"], batch[\"rtgs\"]\n obss = obss.type(torch.float32).to(self.device)\n actions = actions.type(torch.float32).to(self.device)\n rtgs = rtgs.type(torch.float32).to(self.device)\n if 'weights' in batch:\n weights = batch['weights'].type(torch.float32).to(self.device) # (batch, )\n else:\n weights = None\n with torch.no_grad():\n loss = self.fit(obss, rtgs, actions, weights)\n return {\n \"holdout_loss\": loss.item()\n }" } ]
import numpy as np import torch import roboverse import argparse import os import random import pickle import datetime from copy import deepcopy from typing import Dict, Tuple from collections import defaultdict from offlinerlkit.modules import EnsembleDynamicsModel from offlinerlkit.dynamics import EnsembleDynamics from offlinerlkit.utils.termination_fns import get_termination_fn from offlinerlkit.utils.scaler import StandardScaler from offlinerlkit.utils.roboverse_utils import PickPlaceObsWrapper, DoubleDrawerObsWrapper, get_pickplace_dataset, get_doubledrawer_dataset from offlinerlkit.utils.logger import Logger, make_log_dirs from offlinerlkit.policy_trainer import RcslPolicyTrainer, DiffusionPolicyTrainer from offlinerlkit.utils.none_or_str import none_or_str from offlinerlkit.policy import SimpleDiffusionPolicy, AutoregressivePolicy
16,163
def get_rollout_trajs(logger: Logger, threshold = 0.9) -> Tuple[Dict[str, np.ndarray], float]: ''' Rollout trajectories or load existing trajectories. If rollout, call `get_rollout_policy()` and `get_dynamics()` first to get rollout policy and dynamics Return: rollout trajectories ''' ''' diffusion behavior policy rollout - threshold: only keep trajs with ret > [threshold] (valid). Usually the max return in dataset - args.num_need_traj: number of valid trajectories needed. End rollout when get enough trajs - args.rollout_epoch: maximum rollout epoch. Should be large ''' device = args.device num_need_traj = args.num_need_traj rollout_data_all = None # Initialize rollout_dataset as nothing num_traj_all = 0 # Initialize total number of rollout trajs start_epoch = 0 # Default starting epoch returns_all = [] if args.rollout_ckpt_path is not None: print(f"Will save rollout trajectories to dir {args.rollout_ckpt_path}") os.makedirs(args.rollout_ckpt_path, exist_ok=True) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") if os.path.exists(data_path): # Load ckpt_data ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] returns_all = ckpt_dict['return'] start_epoch = ckpt_dict['epoch'] + 1 # trajs = ckpt_dict print(f"Loaded checkpoint. Already have {num_traj_all} valid trajectories, start from epoch {start_epoch}.") if num_traj_all >= num_need_traj: print(f"Checkpoint trajectories are enough. Skip rollout procedure.") return rollout_data_all, max(returns_all) # Still need training, get dynamics and rollout policy get_dynamics() get_rollout_policy() with torch.no_grad(): for epoch in range(start_epoch, args.rollout_epoch): batch_indexs = np.random.randint(0, init_obss_dataset.shape[0], size=args.rollout_batch) init_obss = init_obss_dataset[batch_indexs] rollout_data, rollout_info = rollout_simple(init_obss, dynamics, diffusion_policy, args.horizon) # print(pred_state) # Only keep trajs with returns > threshold returns = rollout_info['returns'] rewards_full = rollout_info['rewards_full'] min_last_rewards = np.min(rewards_full[:, -3:], axis = -1) # (B,), final steps must be large max_last_rewards = np.max(rewards_full[:, -3:], axis = -1) max_cond = np.logical_and(max_last_rewards > 0.9, max_last_rewards < 2) min_cond = min_last_rewards > 0.7 valid_cond = np.logical_and(max_cond, min_cond) valid_trajs = np.arange(args.rollout_batch)[valid_cond] # np.array, indexs of all valid trajs valid_data_idxs = [rollout_data['traj_idxs'][i] in valid_trajs for i in range(rollout_data['traj_idxs'].shape[0])] for k in rollout_data: rollout_data[k] = rollout_data[k][valid_data_idxs] # Add rollout_data to rollout_data_all if rollout_data_all is None: # No trajs collected rollout_data_all = deepcopy(rollout_data) else: for k in rollout_data: rollout_data_all[k] = np.concatenate([rollout_data_all[k], rollout_data[k]], axis=0) num_traj_all += len(valid_trajs) returns_all += list(returns[valid_trajs]) print(f"-----------\nEpoch {epoch}, get {len(valid_trajs)} new trajs") logger.logkv("Epoch", epoch) logger.logkv("num_new_trajs", len(valid_trajs)) logger.logkv("num_total_trajs", num_traj_all) logger.dumpkvs() save_path = os.path.join(logger.checkpoint_dir, "rollout.dat") pickle.dump({'epoch': epoch, 'data': rollout_data_all, 'num_traj': num_traj_all, 'return': returns_all}, open(save_path, "wb")) if num_traj_all >= num_need_traj: # Get enough trajs, quit rollout print(f"End rollout. Total epochs used: {epoch+1}") break return rollout_data_all, max(returns_all) rollout_save_dir = make_log_dirs(args.task, args.algo_name, exp_name, vars(args), part="rollout") print(f"Logging diffusion rollout to {rollout_save_dir}") rollout_logger = Logger(rollout_save_dir, {"consoleout_backup": "stdout"}) rollout_logger.log_hyperparameters(vars(args)) rollout_dataset, max_offline_return = get_rollout_trajs(rollout_logger) # train rcsl_policy = AutoregressivePolicy( obs_dim=obs_dim, act_dim = action_dim, hidden_dims=args.rcsl_hidden_dims, lr = args.rcsl_lr, device = args.device ) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(rcsl_policy.rcsl_optim, args.rcsl_epoch) task_name = args.task rcsl_log_dirs = make_log_dirs(task_name, args.algo_name, exp_name, vars(args), part='rcsl') # key: output file name, value: output handler type print(f"Logging autoregressive gaussian rcsl to {rcsl_log_dirs}") rcsl_output_config = { "consoleout_backup": "stdout", "policy_training_progress": "csv", "tb": "tensorboard" } rcsl_logger = Logger(rcsl_log_dirs, rcsl_output_config) rcsl_logger.log_hyperparameters(vars(args))
''' Recommended hyperparameters: pickplace, horizon=40, behavior_epoch=30 doubledraweropen, horizon=50, behavior_epoch=40 doubledrawercloseopen, horizon=80, behavior_epoch=40 ''' def get_args(): parser = argparse.ArgumentParser() # general parser.add_argument("--algo-name", type=str, default="mbrcsl_mlpdyn") parser.add_argument("--task", type=str, default="pickplace", help="task name") parser.add_argument("--seed", type=int, default=0) parser.add_argument("--num_workers", type=int, default=1, help="Dataloader workers, align with cpu number") parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu") parser.add_argument("--last_eval", action="store_false") # env config parser.add_argument('--data_dir', type=str, required=True) parser.add_argument('--horizon', type=int, default=40, help="max path length for pickplace") # transformer_autoregressive dynamics parser.add_argument("--dynamics_lr", type=float, default=1e-3) parser.add_argument("--dynamics_hidden_dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--dynamics_weight_decay", type=float, nargs='*', default=[2.5e-5, 5e-5, 7.5e-5, 7.5e-5, 1e-4]) parser.add_argument("--n_ensemble", type=int, default=7) parser.add_argument("--n_elites", type=int, default=5) parser.add_argument("--load_dynamics_path", type=none_or_str, default=None) # Behavior policy (diffusion) parser.add_argument("--behavior_epoch", type=int, default=30) parser.add_argument("--num_diffusion_iters", type=int, default=5, help="Number of diffusion steps") parser.add_argument('--behavior_batch', type=int, default=256) parser.add_argument('--load_diffusion_path', type=none_or_str, default=None) parser.add_argument('--task_weight', type=float, default=1.4, help="Weight on task data when training diffusion policy") parser.add_argument('--sample_ratio', type=float, default=0.8, help="Use (sample_ratio * num_total_data) data to train diffusion policy") # Rollout parser.add_argument('--rollout_ckpt_path', type=none_or_str, default=None, help="file dir, used to load/store rollout trajs" ) parser.add_argument('--rollout_epoch', type=int, default=200, help="Max number of epochs to rollout the policy") parser.add_argument('--num_need_traj', type=int, default=5000, help="Needed valid trajs in rollout") parser.add_argument("--rollout-batch", type=int, default=200, help="Number of trajs to be sampled at one time") # RCSL policy (mlp) parser.add_argument("--rcsl_hidden_dims", type=int, nargs='*', default=[200, 200, 200, 200]) parser.add_argument("--rcsl_lr", type=float, default=1e-3) parser.add_argument("--rcsl_batch", type=int, default=256) parser.add_argument("--rcsl_epoch", type=int, default=100) parser.add_argument("--eval_episodes", type=int, default=100) parser.add_argument("--holdout_ratio", type=float, default=0.2) return parser.parse_args() def rollout_simple( init_obss: np.ndarray, dynamics, rollout_policy: SimpleDiffusionPolicy, rollout_length: int ) -> Tuple[Dict[str, np.ndarray], Dict]: ''' Only serves for non-terminal cases Sample a batch of trajectories at the same time. Output rollout_transitions contain keys: obss, next_obss, actions rewards, (N,1) rtgs, (N,1) traj_idxs, (N) ''' num_transitions = 0 rewards_arr = np.array([]) rollout_transitions = defaultdict(list) batch_size = init_obss.shape[0] valid_idxs = np.arange(init_obss.shape[0]) # maintain current valid trajectory indexes returns = np.zeros(init_obss.shape[0]) # maintain return of each trajectory acc_returns = np.zeros(init_obss.shape[0]) # maintain accumulated return of each valid trajectory max_rewards = np.zeros(init_obss.shape[0]) # maintain max reward seen in trajectory rewards_full = np.zeros((init_obss.shape[0], rollout_length)) # full rewards (batch, H) # rollout observations = init_obss goal = np.zeros((init_obss.shape[0],1), dtype = np.float32) for t in range(rollout_length): actions = rollout_policy.select_action(observations, goal) next_observations, rewards, terminals, info = dynamics.step(observations, actions) rollout_transitions["observations"].append(observations) rollout_transitions["next_observations"].append(next_observations) rollout_transitions["actions"].append(actions) rollout_transitions["rewards"].append(rewards) rollout_transitions["terminals"].append(terminals) rollout_transitions["traj_idxs"].append(valid_idxs) rollout_transitions["acc_rets"].append(acc_returns) rewards = rewards.reshape(batch_size) # (B) rewards_full[:, t] = rewards num_transitions += len(observations) rewards_arr = np.append(rewards_arr, rewards.flatten()) returns = returns + rewards.flatten() # Update return (for valid idxs only) max_rewards = np.maximum(max_rewards, rewards.flatten()) # Update max reward acc_returns = acc_returns + rewards.flatten() observations = deepcopy(next_observations) for k, v in rollout_transitions.items(): rollout_transitions[k] = np.concatenate(v, axis=0) traj_idxs = rollout_transitions["traj_idxs"] rtgs = returns[traj_idxs] - rollout_transitions["acc_rets"] # rtgs = returns[traj_idxs] rollout_transitions["rtgs"] = rtgs[..., None] # (N,1) return rollout_transitions, \ {"num_transitions": num_transitions, "reward_mean": rewards_arr.mean(), "returns": returns, "max_rewards": max_rewards, "rewards_full": rewards_full} def train(args=get_args()): # seed random.seed(args.seed) np.random.seed(args.seed) torch.manual_seed(args.seed) torch.cuda.manual_seed_all(args.seed) torch.backends.cudnn.deterministic = True # create env and dataset if args.task == 'pickplace': env = roboverse.make('Widow250PickTray-v0') env = PickPlaceObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "pickplace_prior.npy") task_data_path = os.path.join(args.data_dir, "pickplace_task.npy") diff_dataset, _ = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_pickplace_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledraweropen': env = roboverse.make('Widow250DoubleDrawerOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "closed_drawer_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawercloseopen': env = roboverse.make('Widow250DoubleDrawerCloseOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_1_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) elif args.task == 'doubledrawerpickplaceopen': env = roboverse.make('Widow250DoubleDrawerPickPlaceOpenGraspNeutral-v0') env = DoubleDrawerObsWrapper(env) obs_space = env.observation_space args.obs_shape = obs_space.shape obs_dim = np.prod(args.obs_shape) args.action_shape = env.action_space.shape action_dim = np.prod(args.action_shape) prior_data_path = os.path.join(args.data_dir, "blocked_drawer_2_prior.npy") task_data_path = os.path.join(args.data_dir, "drawer_task.npy") diff_dataset, _ = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path, sample_ratio =args.sample_ratio, task_weight=args.task_weight) dyn_dataset, init_obss_dataset = get_doubledrawer_dataset( prior_data_path=prior_data_path, task_data_path=task_data_path) else: raise NotImplementedError env.reset(seed=args.seed) timestamp = datetime.datetime.now().strftime("%y-%m%d-%H%M%S") exp_name = f"timestamp_{timestamp}&{args.seed}" log_dirs = make_log_dirs(args.task, args.algo_name, exp_name, vars(args), part = "dynamics") # key: output file name, value: output handler type print(f"Logging dynamics to {log_dirs}") output_config = { "consoleout_backup": "stdout", "policy_training_progress": "csv", "dynamics_training_progress": "csv", "tb": "tensorboard" } logger = Logger(log_dirs, output_config) logger.log_hyperparameters(vars(args)) dynamics_model = EnsembleDynamicsModel( obs_dim=obs_dim, action_dim=action_dim, hidden_dims=args.dynamics_hidden_dims, num_ensemble=args.n_ensemble, num_elites=args.n_elites, weight_decays=args.dynamics_weight_decay, device=args.device ) dynamics_optim = torch.optim.Adam( dynamics_model.parameters(), lr=args.dynamics_lr ) scaler = StandardScaler() termination_fn = get_termination_fn(task=args.task) dynamics = EnsembleDynamics( dynamics_model, dynamics_optim, scaler, termination_fn ) # create rollout policy diffusion_policy = SimpleDiffusionPolicy( obs_shape = args.obs_shape, act_shape= args.action_shape, feature_dim = 1, num_training_steps = args.behavior_epoch, num_diffusion_steps = args.num_diffusion_iters, device = args.device ) diff_lr_scheduler = diffusion_policy.get_lr_scheduler() diff_log_dirs = make_log_dirs(args.task, args.algo_name, exp_name, vars(args), part="diffusion") print(f"Logging diffusion to {diff_log_dirs}") # key: output file name, value: output handler type diff_output_config = { "consoleout_backup": "stdout", "policy_training_progress": "csv", "dynamics_training_progress": "csv", "tb": "tensorboard" } diff_logger = Logger(diff_log_dirs, diff_output_config) diff_logger.log_hyperparameters(vars(args)) diff_policy_trainer = DiffusionPolicyTrainer( policy = diffusion_policy, offline_dataset = diff_dataset, logger = diff_logger, seed = args.seed, epoch = args.behavior_epoch, batch_size = args.behavior_batch, lr_scheduler = diff_lr_scheduler, horizon = args.horizon, num_workers = args.num_workers, has_terminal = False, ) # Training helper functions def get_dynamics(): ''' Load or train dynamics model ''' if args.load_dynamics_path: print(f"Load dynamics from {args.load_dynamics_path}") dynamics.load(args.load_dynamics_path) else: print(f"Train dynamics") dynamics.train(dyn_dataset, logger) def get_rollout_policy(): ''' Load or train rollout policy Return: rollout policy ''' if args.load_diffusion_path is not None: print(f"Load behavior policy from {args.load_diffusion_path}") with open(args.load_diffusion_path, 'rb') as f: state_dict = torch.load(f, map_location= args.device) diffusion_policy.load_state_dict(state_dict) else: print(f"Train diffusion behavior policy") diff_policy_trainer.train() # save checkpoint periodically def get_rollout_trajs(logger: Logger, threshold = 0.9) -> Tuple[Dict[str, np.ndarray], float]: ''' Rollout trajectories or load existing trajectories. If rollout, call `get_rollout_policy()` and `get_dynamics()` first to get rollout policy and dynamics Return: rollout trajectories ''' ''' diffusion behavior policy rollout - threshold: only keep trajs with ret > [threshold] (valid). Usually the max return in dataset - args.num_need_traj: number of valid trajectories needed. End rollout when get enough trajs - args.rollout_epoch: maximum rollout epoch. Should be large ''' device = args.device num_need_traj = args.num_need_traj rollout_data_all = None # Initialize rollout_dataset as nothing num_traj_all = 0 # Initialize total number of rollout trajs start_epoch = 0 # Default starting epoch returns_all = [] if args.rollout_ckpt_path is not None: print(f"Will save rollout trajectories to dir {args.rollout_ckpt_path}") os.makedirs(args.rollout_ckpt_path, exist_ok=True) data_path = os.path.join(args.rollout_ckpt_path, "rollout.dat") if os.path.exists(data_path): # Load ckpt_data ckpt_dict = pickle.load(open(data_path,"rb")) # checkpoint in dict type rollout_data_all = ckpt_dict['data'] # should be dict num_traj_all = ckpt_dict['num_traj'] returns_all = ckpt_dict['return'] start_epoch = ckpt_dict['epoch'] + 1 # trajs = ckpt_dict print(f"Loaded checkpoint. Already have {num_traj_all} valid trajectories, start from epoch {start_epoch}.") if num_traj_all >= num_need_traj: print(f"Checkpoint trajectories are enough. Skip rollout procedure.") return rollout_data_all, max(returns_all) # Still need training, get dynamics and rollout policy get_dynamics() get_rollout_policy() with torch.no_grad(): for epoch in range(start_epoch, args.rollout_epoch): batch_indexs = np.random.randint(0, init_obss_dataset.shape[0], size=args.rollout_batch) init_obss = init_obss_dataset[batch_indexs] rollout_data, rollout_info = rollout_simple(init_obss, dynamics, diffusion_policy, args.horizon) # print(pred_state) # Only keep trajs with returns > threshold returns = rollout_info['returns'] rewards_full = rollout_info['rewards_full'] min_last_rewards = np.min(rewards_full[:, -3:], axis = -1) # (B,), final steps must be large max_last_rewards = np.max(rewards_full[:, -3:], axis = -1) max_cond = np.logical_and(max_last_rewards > 0.9, max_last_rewards < 2) min_cond = min_last_rewards > 0.7 valid_cond = np.logical_and(max_cond, min_cond) valid_trajs = np.arange(args.rollout_batch)[valid_cond] # np.array, indexs of all valid trajs valid_data_idxs = [rollout_data['traj_idxs'][i] in valid_trajs for i in range(rollout_data['traj_idxs'].shape[0])] for k in rollout_data: rollout_data[k] = rollout_data[k][valid_data_idxs] # Add rollout_data to rollout_data_all if rollout_data_all is None: # No trajs collected rollout_data_all = deepcopy(rollout_data) else: for k in rollout_data: rollout_data_all[k] = np.concatenate([rollout_data_all[k], rollout_data[k]], axis=0) num_traj_all += len(valid_trajs) returns_all += list(returns[valid_trajs]) print(f"-----------\nEpoch {epoch}, get {len(valid_trajs)} new trajs") logger.logkv("Epoch", epoch) logger.logkv("num_new_trajs", len(valid_trajs)) logger.logkv("num_total_trajs", num_traj_all) logger.dumpkvs() save_path = os.path.join(logger.checkpoint_dir, "rollout.dat") pickle.dump({'epoch': epoch, 'data': rollout_data_all, 'num_traj': num_traj_all, 'return': returns_all}, open(save_path, "wb")) if num_traj_all >= num_need_traj: # Get enough trajs, quit rollout print(f"End rollout. Total epochs used: {epoch+1}") break return rollout_data_all, max(returns_all) rollout_save_dir = make_log_dirs(args.task, args.algo_name, exp_name, vars(args), part="rollout") print(f"Logging diffusion rollout to {rollout_save_dir}") rollout_logger = Logger(rollout_save_dir, {"consoleout_backup": "stdout"}) rollout_logger.log_hyperparameters(vars(args)) rollout_dataset, max_offline_return = get_rollout_trajs(rollout_logger) # train rcsl_policy = AutoregressivePolicy( obs_dim=obs_dim, act_dim = action_dim, hidden_dims=args.rcsl_hidden_dims, lr = args.rcsl_lr, device = args.device ) lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(rcsl_policy.rcsl_optim, args.rcsl_epoch) task_name = args.task rcsl_log_dirs = make_log_dirs(task_name, args.algo_name, exp_name, vars(args), part='rcsl') # key: output file name, value: output handler type print(f"Logging autoregressive gaussian rcsl to {rcsl_log_dirs}") rcsl_output_config = { "consoleout_backup": "stdout", "policy_training_progress": "csv", "tb": "tensorboard" } rcsl_logger = Logger(rcsl_log_dirs, rcsl_output_config) rcsl_logger.log_hyperparameters(vars(args))
policy_trainer = RcslPolicyTrainer(
10
2023-10-11 08:36:06+00:00
24k
lmb-freiburg/ldce
ldm/models/diffusion/dpm_solver/sampler.py
[ { "identifier": "NoiseScheduleVP", "path": "ldm/models/diffusion/dpm_solver/dpm_solver.py", "snippet": "class NoiseScheduleVP:\n def __init__(\n self,\n schedule='discrete',\n betas=None,\n alphas_cumprod=None,\n continuous_beta_0=0.1,\n continuous_beta_1=20.,\n ):\n \"\"\"Create a wrapper class for the forward SDE (VP type).\n\n ***\n Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.\n We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.\n ***\n\n The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).\n We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).\n Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:\n\n log_alpha_t = self.marginal_log_mean_coeff(t)\n sigma_t = self.marginal_std(t)\n lambda_t = self.marginal_lambda(t)\n\n Moreover, as lambda(t) is an invertible function, we also support its inverse function:\n\n t = self.inverse_lambda(lambda_t)\n\n ===============================================================\n\n We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).\n\n 1. For discrete-time DPMs:\n\n For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:\n t_i = (i + 1) / N\n e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.\n We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.\n\n Args:\n betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)\n alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)\n\n Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.\n\n **Important**: Please pay special attention for the args for `alphas_cumprod`:\n The `alphas_cumprod` is the \\hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that\n q_{t_n | 0}(x_{t_n} | x_0) = N ( \\sqrt{\\hat{alpha_n}} * x_0, (1 - \\hat{alpha_n}) * I ).\n Therefore, the notation \\hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have\n alpha_{t_n} = \\sqrt{\\hat{alpha_n}},\n and\n log(alpha_{t_n}) = 0.5 * log(\\hat{alpha_n}).\n\n\n 2. For continuous-time DPMs:\n\n We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise\n schedule are the default settings in DDPM and improved-DDPM:\n\n Args:\n beta_min: A `float` number. The smallest beta for the linear schedule.\n beta_max: A `float` number. The largest beta for the linear schedule.\n cosine_s: A `float` number. The hyperparameter in the cosine schedule.\n cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule.\n T: A `float` number. The ending time of the forward process.\n\n ===============================================================\n\n Args:\n schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,\n 'linear' or 'cosine' for continuous-time DPMs.\n Returns:\n A wrapper object of the forward SDE (VP type).\n \n ===============================================================\n\n Example:\n\n # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', betas=betas)\n\n # For discrete-time DPMs, given alphas_cumprod (the \\hat{alpha_n} array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)\n\n # For continuous-time DPMs (VPSDE), linear schedule:\n >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)\n\n \"\"\"\n\n if schedule not in ['discrete', 'linear', 'cosine']:\n raise ValueError(\"Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'\".format(schedule))\n\n self.schedule = schedule\n if schedule == 'discrete':\n if betas is not None:\n log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)\n else:\n assert alphas_cumprod is not None\n log_alphas = 0.5 * torch.log(alphas_cumprod)\n self.total_N = len(log_alphas)\n self.T = 1.\n self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1))\n self.log_alpha_array = log_alphas.reshape((1, -1,))\n else:\n self.total_N = 1000\n self.beta_0 = continuous_beta_0\n self.beta_1 = continuous_beta_1\n self.cosine_s = 0.008\n self.cosine_beta_max = 999.\n self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s\n self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.))\n self.schedule = schedule\n if schedule == 'cosine':\n # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T.\n # Note that T = 0.9946 may be not the optimal setting. However, we find it works well.\n self.T = 0.9946\n else:\n self.T = 1.\n\n def marginal_log_mean_coeff(self, t):\n \"\"\"\n Compute log(alpha_t) of a given continuous-time label t in [0, T].\n \"\"\"\n if self.schedule == 'discrete':\n return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1))\n elif self.schedule == 'linear':\n return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0\n elif self.schedule == 'cosine':\n log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.))\n log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0\n return log_alpha_t\n\n def marginal_alpha(self, t):\n \"\"\"\n Compute alpha_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.exp(self.marginal_log_mean_coeff(t))\n\n def marginal_std(self, t):\n \"\"\"\n Compute sigma_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))\n\n def marginal_lambda(self, t):\n \"\"\"\n Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].\n \"\"\"\n log_mean_coeff = self.marginal_log_mean_coeff(t)\n log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))\n return log_mean_coeff - log_std\n\n def inverse_lambda(self, lamb):\n \"\"\"\n Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.\n \"\"\"\n if self.schedule == 'linear':\n tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))\n Delta = self.beta_0**2 + tmp\n return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)\n elif self.schedule == 'discrete':\n log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)\n t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1]))\n return t.reshape((-1,))\n else:\n log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))\n t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s\n t = t_fn(log_alpha)\n return t" }, { "identifier": "model_wrapper", "path": "ldm/models/diffusion/dpm_solver/dpm_solver.py", "snippet": "def model_wrapper(\n model,\n noise_schedule,\n model_type=\"noise\",\n model_kwargs={},\n guidance_type=\"uncond\",\n condition=None,\n unconditional_condition=None,\n guidance_scale=1.,\n classifier_fn=None,\n classifier_kwargs={},\n):\n \"\"\"Create a wrapper function for the noise prediction model.\n\n DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to\n firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.\n\n We support four types of the diffusion model by setting `model_type`:\n\n 1. \"noise\": noise prediction model. (Trained by predicting noise).\n\n 2. \"x_start\": data prediction model. (Trained by predicting the data x_0 at time 0).\n\n 3. \"v\": velocity prediction model. (Trained by predicting the velocity).\n The \"v\" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].\n\n [1] Salimans, Tim, and Jonathan Ho. \"Progressive distillation for fast sampling of diffusion models.\"\n arXiv preprint arXiv:2202.00512 (2022).\n [2] Ho, Jonathan, et al. \"Imagen Video: High Definition Video Generation with Diffusion Models.\"\n arXiv preprint arXiv:2210.02303 (2022).\n \n 4. \"score\": marginal score function. (Trained by denoising score matching).\n Note that the score function and the noise prediction model follows a simple relationship:\n ```\n noise(x_t, t) = -sigma_t * score(x_t, t)\n ```\n\n We support three types of guided sampling by DPMs by setting `guidance_type`:\n 1. \"uncond\": unconditional sampling by DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n 2. \"classifier\": classifier guidance sampling [3] by DPMs and another classifier.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n `` \n\n The input `classifier_fn` has the following format:\n ``\n classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)\n ``\n\n [3] P. Dhariwal and A. Q. Nichol, \"Diffusion models beat GANs on image synthesis,\"\n in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.\n\n 3. \"classifier-free\": classifier-free guidance sampling by conditional DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score\n `` \n And if cond == `unconditional_condition`, the model output is the unconditional DPM output.\n\n [4] Ho, Jonathan, and Tim Salimans. \"Classifier-free diffusion guidance.\"\n arXiv preprint arXiv:2207.12598 (2022).\n \n\n The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)\n or continuous-time labels (i.e. epsilon to T).\n\n We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:\n ``\n def model_fn(x, t_continuous) -> noise:\n t_input = get_model_input_time(t_continuous)\n return noise_pred(model, x, t_input, **model_kwargs) \n ``\n where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.\n\n ===============================================================\n\n Args:\n model: A diffusion model with the corresponding format described above.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n model_type: A `str`. The parameterization type of the diffusion model.\n \"noise\" or \"x_start\" or \"v\" or \"score\".\n model_kwargs: A `dict`. A dict for the other inputs of the model function.\n guidance_type: A `str`. The type of the guidance for sampling.\n \"uncond\" or \"classifier\" or \"classifier-free\".\n condition: A pytorch tensor. The condition for the guided sampling.\n Only used for \"classifier\" or \"classifier-free\" guidance type.\n unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.\n Only used for \"classifier-free\" guidance type.\n guidance_scale: A `float`. The scale for the guided sampling.\n classifier_fn: A classifier function. Only used for the classifier guidance.\n classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.\n Returns:\n A noise prediction model that accepts the noised data and the continuous time as the inputs.\n \"\"\"\n\n def get_model_input_time(t_continuous):\n \"\"\"\n Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.\n For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].\n For continuous-time DPMs, we just use `t_continuous`.\n \"\"\"\n if noise_schedule.schedule == 'discrete':\n return (t_continuous - 1. / noise_schedule.total_N) * 1000.\n else:\n return t_continuous\n\n def noise_pred_fn(x, t_continuous, cond=None):\n if t_continuous.reshape((-1,)).shape[0] == 1:\n t_continuous = t_continuous.expand((x.shape[0]))\n t_input = get_model_input_time(t_continuous)\n if cond is None:\n output = model(x, t_input, **model_kwargs)\n else:\n output = model(x, t_input, cond, **model_kwargs)\n if model_type == \"noise\":\n return output\n elif model_type == \"x_start\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)\n dims = x.dim()\n return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims)\n elif model_type == \"v\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)\n dims = x.dim()\n return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x\n elif model_type == \"score\":\n sigma_t = noise_schedule.marginal_std(t_continuous)\n dims = x.dim()\n return -expand_dims(sigma_t, dims) * output\n\n def cond_grad_fn(x, t_input):\n \"\"\"\n Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).\n \"\"\"\n with torch.enable_grad():\n x_in = x.detach().requires_grad_(True)\n log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)\n return torch.autograd.grad(log_prob.sum(), x_in)[0]\n\n def model_fn(x, t_continuous):\n \"\"\"\n The noise predicition model function that is used for DPM-Solver.\n \"\"\"\n if t_continuous.reshape((-1,)).shape[0] == 1:\n t_continuous = t_continuous.expand((x.shape[0]))\n if guidance_type == \"uncond\":\n return noise_pred_fn(x, t_continuous)\n elif guidance_type == \"classifier\":\n assert classifier_fn is not None\n t_input = get_model_input_time(t_continuous)\n cond_grad = cond_grad_fn(x, t_input)\n sigma_t = noise_schedule.marginal_std(t_continuous)\n noise = noise_pred_fn(x, t_continuous)\n return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad\n elif guidance_type == \"classifier-free\":\n if guidance_scale == 1. or unconditional_condition is None:\n return noise_pred_fn(x, t_continuous, cond=condition)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t_continuous] * 2)\n c_in = torch.cat([unconditional_condition, condition])\n noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)\n return noise_uncond + guidance_scale * (noise - noise_uncond)\n\n assert model_type in [\"noise\", \"x_start\", \"v\"]\n assert guidance_type in [\"uncond\", \"classifier\", \"classifier-free\"]\n return model_fn" }, { "identifier": "DPM_Solver", "path": "ldm/models/diffusion/dpm_solver/dpm_solver.py", "snippet": "class DPM_Solver:\n def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.):\n \"\"\"Construct a DPM-Solver. \n\n We support both the noise prediction model (\"predicting epsilon\") and the data prediction model (\"predicting x0\").\n If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver).\n If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++).\n In such case, we further support the \"dynamic thresholding\" in [1] when `thresholding` is True.\n The \"dynamic thresholding\" can greatly improve the sample quality for pixel-space DPMs with large guidance scales.\n\n Args:\n model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):\n ``\n def model_fn(x, t_continuous):\n return noise\n ``\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model.\n thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the \"dynamic thresholding\" in [1].\n max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding.\n \n [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.\n \"\"\"\n self.model = model_fn\n self.noise_schedule = noise_schedule\n self.predict_x0 = predict_x0\n self.thresholding = thresholding\n self.max_val = max_val\n\n def noise_prediction_fn(self, x, t):\n \"\"\"\n Return the noise prediction model.\n \"\"\"\n return self.model(x, t)\n\n def data_prediction_fn(self, x, t):\n \"\"\"\n Return the data prediction model (with thresholding).\n \"\"\"\n noise = self.noise_prediction_fn(x, t)\n dims = x.dim()\n alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)\n x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims)\n if self.thresholding:\n p = 0.995 # A hyperparameter in the paper of \"Imagen\" [1].\n s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)\n s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims)\n x0 = torch.clamp(x0, -s, s) / s\n return x0\n\n def model_fn(self, x, t):\n \"\"\"\n Convert the model to the noise prediction model or the data prediction model. \n \"\"\"\n if self.predict_x0:\n return self.data_prediction_fn(x, t)\n else:\n return self.noise_prediction_fn(x, t)\n\n def get_time_steps(self, skip_type, t_T, t_0, N, device):\n \"\"\"Compute the intermediate time steps for sampling.\n\n Args:\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n N: A `int`. The total number of the spacing of the time steps.\n device: A torch device.\n Returns:\n A pytorch tensor of the time steps, with the shape (N + 1,).\n \"\"\"\n if skip_type == 'logSNR':\n lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))\n lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))\n logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)\n return self.noise_schedule.inverse_lambda(logSNR_steps)\n elif skip_type == 'time_uniform':\n return torch.linspace(t_T, t_0, N + 1).to(device)\n elif skip_type == 'time_quadratic':\n t_order = 2\n t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device)\n return t\n else:\n raise ValueError(\"Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'\".format(skip_type))\n\n def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):\n \"\"\"\n Get the order of each step for sampling by the singlestep DPM-Solver.\n\n We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as \"DPM-Solver-fast\".\n Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:\n - If order == 1:\n We take `steps` of DPM-Solver-1 (i.e. DDIM).\n - If order == 2:\n - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of DPM-Solver-2.\n - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If order == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.\n\n ============================================\n Args:\n order: A `int`. The max order for the solver (2 or 3).\n steps: A `int`. The total number of function evaluations (NFE).\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n device: A torch device.\n Returns:\n orders: A list of the solver order of each step.\n \"\"\"\n if order == 3:\n K = steps // 3 + 1\n if steps % 3 == 0:\n orders = [3,] * (K - 2) + [2, 1]\n elif steps % 3 == 1:\n orders = [3,] * (K - 1) + [1]\n else:\n orders = [3,] * (K - 1) + [2]\n elif order == 2:\n if steps % 2 == 0:\n K = steps // 2\n orders = [2,] * K\n else:\n K = steps // 2 + 1\n orders = [2,] * (K - 1) + [1]\n elif order == 1:\n K = 1\n orders = [1,] * steps\n else:\n raise ValueError(\"'order' must be '1' or '2' or '3'.\")\n if skip_type == 'logSNR':\n # To reproduce the results in DPM-Solver paper\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)\n else:\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders)).to(device)]\n return timesteps_outer, orders\n\n def denoise_to_zero_fn(self, x, s):\n \"\"\"\n Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. \n \"\"\"\n return self.data_prediction_fn(x, s)\n\n def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):\n \"\"\"\n DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (x.shape[0],).\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n dims = x.dim()\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n if self.predict_x0:\n phi_1 = torch.expm1(-h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = (\n expand_dims(sigma_t / sigma_s, dims) * x\n - expand_dims(alpha_t * phi_1, dims) * model_s\n )\n if return_intermediate:\n return x_t, {'model_s': model_s}\n else:\n return x_t\n else:\n phi_1 = torch.expm1(h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x\n - expand_dims(sigma_t * phi_1, dims) * model_s\n )\n if return_intermediate:\n return x_t, {'model_s': model_s}\n else:\n return x_t\n\n def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpm_solver'):\n \"\"\"\n Singlestep solver DPM-Solver-2 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (x.shape[0],).\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n r1: A `float`. The hyperparameter of the second-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpm_solver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpm_solver' or 'taylor', got {}\".format(solver_type))\n if r1 is None:\n r1 = 0.5\n ns = self.noise_schedule\n dims = x.dim()\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n s1 = ns.inverse_lambda(lambda_s1)\n log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)\n alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)\n\n if self.predict_x0:\n phi_11 = torch.expm1(-r1 * h)\n phi_1 = torch.expm1(-h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (\n expand_dims(sigma_s1 / sigma_s, dims) * x\n - expand_dims(alpha_s1 * phi_11, dims) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(sigma_t / sigma_s, dims) * x\n - expand_dims(alpha_t * phi_1, dims) * model_s\n - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s)\n )\n elif solver_type == 'taylor':\n x_t = (\n expand_dims(sigma_t / sigma_s, dims) * x\n - expand_dims(alpha_t * phi_1, dims) * model_s\n + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (model_s1 - model_s)\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_1 = torch.expm1(h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (\n expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x\n - expand_dims(sigma_s1 * phi_11, dims) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x\n - expand_dims(sigma_t * phi_1, dims) * model_s\n - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s)\n )\n elif solver_type == 'taylor':\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x\n - expand_dims(sigma_t * phi_1, dims) * model_s\n - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s)\n )\n if return_intermediate:\n return x_t, {'model_s': model_s, 'model_s1': model_s1}\n else:\n return x_t\n\n def singlestep_dpm_solver_third_update(self, x, s, t, r1=1./3., r2=2./3., model_s=None, model_s1=None, return_intermediate=False, solver_type='dpm_solver'):\n \"\"\"\n Singlestep solver DPM-Solver-3 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (x.shape[0],).\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n r1: A `float`. The hyperparameter of the third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).\n If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpm_solver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpm_solver' or 'taylor', got {}\".format(solver_type))\n if r1 is None:\n r1 = 1. / 3.\n if r2 is None:\n r2 = 2. / 3.\n ns = self.noise_schedule\n dims = x.dim()\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n lambda_s2 = lambda_s + r2 * h\n s1 = ns.inverse_lambda(lambda_s1)\n s2 = ns.inverse_lambda(lambda_s2)\n log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t)\n alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)\n\n if self.predict_x0:\n phi_11 = torch.expm1(-r1 * h)\n phi_12 = torch.expm1(-r2 * h)\n phi_1 = torch.expm1(-h)\n phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.\n phi_2 = phi_1 / h + 1.\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (\n expand_dims(sigma_s1 / sigma_s, dims) * x\n - expand_dims(alpha_s1 * phi_11, dims) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n expand_dims(sigma_s2 / sigma_s, dims) * x\n - expand_dims(alpha_s2 * phi_12, dims) * model_s\n + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(sigma_t / sigma_s, dims) * x\n - expand_dims(alpha_t * phi_1, dims) * model_s\n + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s)\n )\n elif solver_type == 'taylor':\n D1_0 = (1. / r1) * (model_s1 - model_s)\n D1_1 = (1. / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2. * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n expand_dims(sigma_t / sigma_s, dims) * x\n - expand_dims(alpha_t * phi_1, dims) * model_s\n + expand_dims(alpha_t * phi_2, dims) * D1\n - expand_dims(alpha_t * phi_3, dims) * D2\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_12 = torch.expm1(r2 * h)\n phi_1 = torch.expm1(h)\n phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.\n phi_2 = phi_1 / h - 1.\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (\n expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x\n - expand_dims(sigma_s1 * phi_11, dims) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x\n - expand_dims(sigma_s2 * phi_12, dims) * model_s\n - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x\n - expand_dims(sigma_t * phi_1, dims) * model_s\n - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s)\n )\n elif solver_type == 'taylor':\n D1_0 = (1. / r1) * (model_s1 - model_s)\n D1_1 = (1. / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2. * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x\n - expand_dims(sigma_t * phi_1, dims) * model_s\n - expand_dims(sigma_t * phi_2, dims) * D1\n - expand_dims(sigma_t * phi_3, dims) * D2\n )\n\n if return_intermediate:\n return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}\n else:\n return x_t\n\n def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type=\"dpm_solver\"):\n \"\"\"\n Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpm_solver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpm_solver' or 'taylor', got {}\".format(solver_type))\n ns = self.noise_schedule\n dims = x.dim()\n model_prev_1, model_prev_0 = model_prev_list\n t_prev_1, t_prev_0 = t_prev_list\n lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0 = h_0 / h\n D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)\n if self.predict_x0:\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(sigma_t / sigma_prev_0, dims) * x\n - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0\n - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0\n )\n elif solver_type == 'taylor':\n x_t = (\n expand_dims(sigma_t / sigma_prev_0, dims) * x\n - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0\n + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0\n )\n else:\n if solver_type == 'dpm_solver':\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x\n - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0\n - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0\n )\n elif solver_type == 'taylor':\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x\n - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0\n - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0\n )\n return x_t\n\n def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'):\n \"\"\"\n Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n dims = x.dim()\n model_prev_2, model_prev_1, model_prev_0 = model_prev_list\n t_prev_2, t_prev_1, t_prev_0 = t_prev_list\n lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_1 = lambda_prev_1 - lambda_prev_2\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0, r1 = h_0 / h, h_1 / h\n D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1)\n D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2)\n D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1)\n D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1)\n if self.predict_x0:\n x_t = (\n expand_dims(sigma_t / sigma_prev_0, dims) * x\n - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0\n + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1\n - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h**2 - 0.5), dims) * D2\n )\n else:\n x_t = (\n expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x\n - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0\n - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1\n - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h**2 - 0.5), dims) * D2\n )\n return x_t\n\n def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, r2=None):\n \"\"\"\n Singlestep DPM-Solver with the order `order` from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (x.shape[0],).\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n r1: A `float`. The hyperparameter of the second-order or third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)\n elif order == 2:\n return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1)\n elif order == 3:\n return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2)\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'):\n \"\"\"\n Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],)\n t: A pytorch tensor. The ending time, with the shape (x.shape[0],).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])\n elif order == 2:\n return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)\n elif order == 3:\n return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, solver_type='dpm_solver'):\n \"\"\"\n The adaptive step size solver based on singlestep DPM-Solver.\n\n Args:\n x: A pytorch tensor. The initial value at time `t_T`.\n order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n h_init: A `float`. The initial step size (for logSNR).\n atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].\n rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.\n theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].\n t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the \n current time and `t_0` is less than `t_err`. The default setting is 1e-5.\n solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpm_solver' type.\n Returns:\n x_0: A pytorch tensor. The approximated solution at time `t_0`.\n\n [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, \"Gotta go fast when generating data with score-based models,\" arXiv preprint arXiv:2105.14080, 2021.\n \"\"\"\n ns = self.noise_schedule\n s = t_T * torch.ones((x.shape[0],)).to(x)\n lambda_s = ns.marginal_lambda(s)\n lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))\n h = h_init * torch.ones_like(s).to(x)\n x_prev = x\n nfe = 0\n if order == 2:\n r1 = 0.5\n lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)\n higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs)\n elif order == 3:\n r1, r2 = 1. / 3., 2. / 3.\n lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type)\n higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs)\n else:\n raise ValueError(\"For adaptive step size solver, order must be 2 or 3, got {}\".format(order))\n while torch.abs((s - t_0)).mean() > t_err:\n t = ns.inverse_lambda(lambda_s + h)\n x_lower, lower_noise_kwargs = lower_update(x, s, t)\n x_higher = higher_update(x, s, t, **lower_noise_kwargs)\n delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))\n norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))\n E = norm_fn((x_higher - x_lower) / delta).max()\n if torch.all(E <= 1.):\n x = x_higher\n s = t\n x_prev = x_lower\n lambda_s = ns.marginal_lambda(s)\n h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)\n nfe += order\n print('adaptive solver nfe', nfe)\n return x\n\n def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform',\n method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver',\n atol=0.0078, rtol=0.05,\n ):\n \"\"\"\n Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.\n\n =====================================================\n\n We support the following algorithms for both noise prediction model and data prediction model:\n - 'singlestep':\n Singlestep DPM-Solver (i.e. \"DPM-Solver-fast\" in the paper), which combines different orders of singlestep DPM-Solver. \n We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).\n The total number of function evaluations (NFE) == `steps`.\n Given a fixed NFE == `steps`, the sampling procedure is:\n - If `order` == 1:\n - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.\n - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If `order` == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.\n - 'multistep':\n Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.\n We initialize the first `order` values by lower order multistep solvers.\n Given a fixed NFE == `steps`, the sampling procedure is:\n Denote K = steps.\n - If `order` == 1:\n - We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.\n - If `order` == 3:\n - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.\n - 'singlestep_fixed':\n Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).\n We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.\n - 'adaptive':\n Adaptive step size DPM-Solver (i.e. \"DPM-Solver-12\" and \"DPM-Solver-23\" in the paper).\n We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.\n You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs\n (NFE) and the sample quality.\n - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.\n - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.\n\n =====================================================\n\n Some advices for choosing the algorithm:\n - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:\n Use singlestep DPM-Solver (\"DPM-Solver-fast\" in the paper) with `order = 3`.\n e.g.\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False)\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type='time_uniform', method='singlestep')\n - For **guided sampling with large guidance scale** by DPMs:\n Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`.\n e.g.\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True)\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,\n skip_type='time_uniform', method='multistep')\n\n We support three types of `skip_type`:\n - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**\n - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.\n - 'time_quadratic': quadratic time for the time steps.\n\n =====================================================\n Args:\n x: A pytorch tensor. The initial value at time `t_start`\n e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.\n steps: A `int`. The total number of function evaluations (NFE).\n t_start: A `float`. The starting time of the sampling.\n If `T` is None, we use self.noise_schedule.T (default is 1.0).\n t_end: A `float`. The ending time of the sampling.\n If `t_end` is None, we use 1. / self.noise_schedule.total_N.\n e.g. if total_N == 1000, we have `t_end` == 1e-3.\n For discrete-time DPMs:\n - We recommend `t_end` == 1. / self.noise_schedule.total_N.\n For continuous-time DPMs:\n - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.\n order: A `int`. The order of DPM-Solver.\n skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.\n method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.\n denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.\n Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).\n\n This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and\n score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID\n for diffusion models sampling by diffusion SDEs for low-resolutional images\n (such as CIFAR-10). However, we observed that such trick does not matter for\n high-resolutional images. As it needs an additional NFE, we do not recommend\n it for high-resolutional images.\n lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.\n Only valid for `method=multistep` and `steps < 15`. We empirically find that\n this trick is a key to stabilizing the sampling by DPM-Solver with very few steps\n (especially for steps <= 10). So we recommend to set it to be `True`.\n solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`.\n atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n Returns:\n x_end: A pytorch tensor. The approximated solution at time `t_end`.\n\n \"\"\"\n t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end\n t_T = self.noise_schedule.T if t_start is None else t_start\n device = x.device\n if method == 'adaptive':\n with torch.no_grad():\n x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type)\n elif method == 'multistep':\n assert steps >= order\n timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)\n assert timesteps.shape[0] - 1 == steps\n with torch.no_grad():\n vec_t = timesteps[0].expand((x.shape[0]))\n model_prev_list = [self.model_fn(x, vec_t)]\n t_prev_list = [vec_t]\n # Init the first `order` values by lower order multistep DPM-Solver.\n for init_order in range(1, order):\n vec_t = timesteps[init_order].expand(x.shape[0])\n x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, solver_type=solver_type)\n model_prev_list.append(self.model_fn(x, vec_t))\n t_prev_list.append(vec_t)\n # Compute the remaining values by `order`-th order multistep DPM-Solver.\n for step in range(order, steps + 1):\n vec_t = timesteps[step].expand(x.shape[0])\n if lower_order_final and steps < 15:\n step_order = min(order, steps + 1 - step)\n else:\n step_order = order\n x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, solver_type=solver_type)\n for i in range(order - 1):\n t_prev_list[i] = t_prev_list[i + 1]\n model_prev_list[i] = model_prev_list[i + 1]\n t_prev_list[-1] = vec_t\n # We do not need to evaluate the final model value.\n if step < steps:\n model_prev_list[-1] = self.model_fn(x, vec_t)\n elif method in ['singlestep', 'singlestep_fixed']:\n if method == 'singlestep':\n timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device)\n elif method == 'singlestep_fixed':\n K = steps // order\n orders = [order,] * K\n timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)\n for i, order in enumerate(orders):\n t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1]\n timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), N=order, device=device)\n lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)\n vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0])\n h = lambda_inner[-1] - lambda_inner[0]\n r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h\n r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h\n x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2)\n if denoise_to_zero:\n x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0)\n return x" } ]
import torch from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver
17,743
"""SAMPLING ONLY.""" class DPMSolverSampler(object): def __init__(self, model, **kwargs): super().__init__() self.model = model to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0., mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1., unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... **kwargs ): if conditioning is not None: if isinstance(conditioning, dict): cbs = conditioning[list(conditioning.keys())[0]].shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") else: if conditioning.shape[0] != batch_size: print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") # sampling C, H, W = shape size = (batch_size, C, H, W) # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') device = self.model.betas.device if x_T is None: img = torch.randn(size, device=device) else: img = x_T ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
"""SAMPLING ONLY.""" class DPMSolverSampler(object): def __init__(self, model, **kwargs): super().__init__() self.model = model to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) def register_buffer(self, name, attr): if type(attr) == torch.Tensor: if attr.device != torch.device("cuda"): attr = attr.to(torch.device("cuda")) setattr(self, name, attr) @torch.no_grad() def sample(self, S, batch_size, shape, conditioning=None, callback=None, normals_sequence=None, img_callback=None, quantize_x0=False, eta=0., mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, verbose=True, x_T=None, log_every_t=100, unconditional_guidance_scale=1., unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... **kwargs ): if conditioning is not None: if isinstance(conditioning, dict): cbs = conditioning[list(conditioning.keys())[0]].shape[0] if cbs != batch_size: print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") else: if conditioning.shape[0] != batch_size: print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") # sampling C, H, W = shape size = (batch_size, C, H, W) # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') device = self.model.betas.device if x_T is None: img = torch.randn(size, device=device) else: img = x_T ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)
model_fn = model_wrapper(
1
2023-10-10 09:40:10+00:00
24k
spla-tam/SplaTAM
scripts/post_splatam_opt.py
[ { "identifier": "AzureKinectDataset", "path": "datasets/gradslam_datasets/azure.py", "snippet": "class AzureKinectDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = None\n\n # # check if a file named 'poses_global_dvo.txt' exists in the basedir / sequence folder\n # if os.path.isfile(os.path.join(basedir, sequence, \"poses_global_dvo.txt\")):\n # self.pose_path = os.path.join(basedir, sequence, \"poses_global_dvo.txt\")\n\n if \"odomfile\" in kwargs.keys():\n self.pose_path = os.path.join(self.input_folder, kwargs[\"odomfile\"])\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/color/*.jpg\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/depth/*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n if self.pose_path is None:\n print(\"WARNING: Dataset does not contain poses. Returning identity transform.\")\n return [torch.eye(4).float() for _ in range(self.num_imgs)]\n else:\n # Determine whether the posefile ends in \".log\"\n # a .log file has the following format for each frame\n # frame_idx frame_idx+1\n # row 1 of 4x4 transform\n # row 2 of 4x4 transform\n # row 3 of 4x4 transform\n # row 4 of 4x4 transform\n # [repeat for all frames]\n #\n # on the other hand, the \"poses_o3d.txt\" or \"poses_dvo.txt\" files have the format\n # 16 entries of 4x4 transform\n # [repeat for all frames]\n if self.pose_path.endswith(\".log\"):\n # print(\"Loading poses from .log format\")\n poses = []\n lines = None\n with open(self.pose_path, \"r\") as f:\n lines = f.readlines()\n if len(lines) % 5 != 0:\n raise ValueError(\n \"Incorrect file format for .log odom file \" \"Number of non-empty lines must be a multiple of 5\"\n )\n num_lines = len(lines) // 5\n for i in range(0, num_lines):\n _curpose = []\n _curpose.append(list(map(float, lines[5 * i + 1].split())))\n _curpose.append(list(map(float, lines[5 * i + 2].split())))\n _curpose.append(list(map(float, lines[5 * i + 3].split())))\n _curpose.append(list(map(float, lines[5 * i + 4].split())))\n _curpose = np.array(_curpose).reshape(4, 4)\n poses.append(torch.from_numpy(_curpose))\n else:\n poses = []\n lines = None\n with open(self.pose_path, \"r\") as f:\n lines = f.readlines()\n for line in lines:\n if len(line.split()) == 0:\n continue\n c2w = np.array(list(map(float, line.split()))).reshape(4, 4)\n poses.append(torch.from_numpy(c2w))\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding # .permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "load_dataset_config", "path": "datasets/gradslam_datasets/dataconfig.py", "snippet": "def load_dataset_config(path, default_path=None):\n \"\"\"\n Loads config file.\n\n Args:\n path (str): path to config file.\n default_path (str, optional): whether to use default path. Defaults to None.\n\n Returns:\n cfg (dict): config dict.\n\n \"\"\"\n # load configuration from file itself\n with open(path, \"r\") as f:\n cfg_special = yaml.full_load(f)\n\n # check if we should inherit from a config\n inherit_from = cfg_special.get(\"inherit_from\")\n\n # if yes, load this config first as default\n # if no, use the default_path\n if inherit_from is not None:\n cfg = load_dataset_config(inherit_from, default_path)\n elif default_path is not None:\n with open(default_path, \"r\") as f:\n cfg = yaml.full_load(f)\n else:\n cfg = dict()\n\n # include main configuration\n update_recursive(cfg, cfg_special)\n\n return cfg" }, { "identifier": "ICLDataset", "path": "datasets/gradslam_datasets/icl.py", "snippet": "class ICLDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict: Dict,\n basedir: Union[Path, str],\n sequence: Union[Path, str],\n stride: Optional[int] = 1,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[Union[Path, str]] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n embedding_file_extension: Optional[str] = \"pt\",\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n # Attempt to find pose file (*.gt.sim)\n self.pose_path = glob.glob(os.path.join(self.input_folder, \"*.gt.sim\"))\n if self.pose_path == 0:\n raise ValueError(\"Need pose file ending in extension `*.gt.sim`\")\n self.pose_path = self.pose_path[0]\n self.embedding_file_extension = embedding_file_extension\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/rgb/*.png\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/depth/*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(\n glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.{self.embedding_file_extension}\")\n )\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n poses = []\n\n lines = []\n with open(self.pose_path, \"r\") as f:\n lines = f.readlines()\n\n _posearr = []\n for line in lines:\n line = line.strip().split()\n if len(line) == 0:\n continue\n _npvec = np.asarray([float(line[0]), float(line[1]), float(line[2]), float(line[3])])\n _posearr.append(_npvec)\n _posearr = np.stack(_posearr)\n\n for pose_line_idx in range(0, _posearr.shape[0], 3):\n _curpose = np.zeros((4, 4))\n _curpose[3, 3] = 3\n _curpose[0] = _posearr[pose_line_idx]\n _curpose[1] = _posearr[pose_line_idx + 1]\n _curpose[2] = _posearr[pose_line_idx + 2]\n poses.append(torch.from_numpy(_curpose).float())\n\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "ReplicaDataset", "path": "datasets/gradslam_datasets/replica.py", "snippet": "class ReplicaDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = os.path.join(self.input_folder, \"traj.txt\")\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/results/frame*.jpg\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/results/depth*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n poses = []\n with open(self.pose_path, \"r\") as f:\n lines = f.readlines()\n for i in range(self.num_imgs):\n line = lines[i]\n c2w = np.array(list(map(float, line.split()))).reshape(4, 4)\n # c2w[:3, 1] *= -1\n # c2w[:3, 2] *= -1\n c2w = torch.from_numpy(c2w).float()\n poses.append(c2w)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "ScannetDataset", "path": "datasets/gradslam_datasets/scannet.py", "snippet": "class ScannetDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 968,\n desired_width: Optional[int] = 1296,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = None\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/color/*.jpg\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/depth/*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n poses = []\n posefiles = natsorted(glob.glob(f\"{self.input_folder}/pose/*.txt\"))\n for posefile in posefiles:\n _pose = torch.from_numpy(np.loadtxt(posefile))\n poses.append(_pose)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n print(embedding_file_path)\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "Ai2thorDataset", "path": "datasets/gradslam_datasets/ai2thor.py", "snippet": "class Ai2thorDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 968,\n desired_width: Optional[int] = 1296,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(f\"{self.input_folder}/color/*.png\"))\n depth_paths = natsorted(glob.glob(f\"{self.input_folder}/depth/*.png\"))\n embedding_paths = None\n if self.load_embeddings:\n if self.embedding_dir == \"embed_semseg\":\n # embed_semseg is stored as uint16 pngs\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.png\"))\n else:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n poses = []\n posefiles = natsorted(glob.glob(f\"{self.input_folder}/pose/*.txt\"))\n for posefile in posefiles:\n _pose = torch.from_numpy(np.loadtxt(posefile))\n poses.append(_pose)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n if self.embedding_dir == \"embed_semseg\":\n embedding = imageio.imread(embedding_file_path) # (H, W)\n embedding = cv2.resize(\n embedding, (self.desired_width, self.desired_height), interpolation=cv2.INTER_NEAREST\n )\n embedding = torch.from_numpy(embedding).long() # (H, W)\n embedding = F.one_hot(embedding, num_classes=self.embedding_dim) # (H, W, C)\n embedding = embedding.half() # (H, W, C)\n embedding = embedding.permute(2, 0, 1) # (C, H, W)\n embedding = embedding.unsqueeze(0) # (1, C, H, W)\n else:\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "RealsenseDataset", "path": "datasets/gradslam_datasets/realsense.py", "snippet": "class RealsenseDataset(GradSLAMDataset):\n \"\"\"\n Dataset class to process depth images captured by realsense camera on the tabletop manipulator\n \"\"\"\n\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n # only poses/images/depth corresponding to the realsense_camera_order are read/used\n self.pose_path = os.path.join(self.input_folder, \"poses\")\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(os.path.join(self.input_folder, \"rgb\", \"*.jpg\")))\n depth_paths = natsorted(glob.glob(os.path.join(self.input_folder, \"depth\", \"*.png\")))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n posefiles = natsorted(glob.glob(os.path.join(self.pose_path, \"*.npy\")))\n poses = []\n P = torch.tensor([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]).float()\n for posefile in posefiles:\n c2w = torch.from_numpy(np.load(posefile)).float()\n _R = c2w[:3, :3]\n _t = c2w[:3, 3]\n _pose = P @ c2w @ P.T\n poses.append(_pose)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "Record3DDataset", "path": "datasets/gradslam_datasets/record3d.py", "snippet": "class Record3DDataset(GradSLAMDataset):\n \"\"\"\n Dataset class to read in saved files from the structure created by our\n `save_record3d_stream.py` script\n \"\"\"\n\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = os.path.join(self.input_folder, \"poses\")\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def get_filepaths(self):\n color_paths = natsorted(glob.glob(os.path.join(self.input_folder, \"rgb\", \"*.png\")))\n depth_paths = natsorted(glob.glob(os.path.join(self.input_folder, \"depth\", \"*.png\")))\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{self.input_folder}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n posefiles = natsorted(glob.glob(os.path.join(self.pose_path, \"*.npy\")))\n poses = []\n P = torch.tensor([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]).float()\n for posefile in posefiles:\n c2w = torch.from_numpy(np.load(posefile)).float()\n _R = c2w[:3, :3]\n _t = c2w[:3, 3]\n _pose = P @ c2w @ P.T\n poses.append(_pose)\n return poses\n\n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path)\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "TUMDataset", "path": "datasets/gradslam_datasets/tum.py", "snippet": "class TUMDataset(GradSLAMDataset):\n def __init__(\n self,\n config_dict,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 480,\n desired_width: Optional[int] = 640,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n self.pose_path = None\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n )\n\n def parse_list(self, filepath, skiprows=0):\n \"\"\" read list data \"\"\"\n data = np.loadtxt(filepath, delimiter=' ',\n dtype=np.unicode_, skiprows=skiprows)\n return data\n\n def associate_frames(self, tstamp_image, tstamp_depth, tstamp_pose, max_dt=0.08):\n \"\"\" pair images, depths, and poses \"\"\"\n associations = []\n for i, t in enumerate(tstamp_image):\n if tstamp_pose is None:\n j = np.argmin(np.abs(tstamp_depth - t))\n if (np.abs(tstamp_depth[j] - t) < max_dt):\n associations.append((i, j))\n\n else:\n j = np.argmin(np.abs(tstamp_depth - t))\n k = np.argmin(np.abs(tstamp_pose - t))\n\n if (np.abs(tstamp_depth[j] - t) < max_dt) and \\\n (np.abs(tstamp_pose[k] - t) < max_dt):\n associations.append((i, j, k))\n\n return associations\n\n def pose_matrix_from_quaternion(self, pvec):\n \"\"\" convert 4x4 pose matrix to (t, q) \"\"\"\n from scipy.spatial.transform import Rotation\n\n pose = np.eye(4)\n pose[:3, :3] = Rotation.from_quat(pvec[3:]).as_matrix()\n pose[:3, 3] = pvec[:3]\n return pose\n\n def get_filepaths(self):\n\n frame_rate = 32\n \"\"\" read video data in tum-rgbd format \"\"\"\n if os.path.isfile(os.path.join(self.input_folder, 'groundtruth.txt')):\n pose_list = os.path.join(self.input_folder, 'groundtruth.txt')\n elif os.path.isfile(os.path.join(self.input_folder, 'pose.txt')):\n pose_list = os.path.join(self.input_folder, 'pose.txt')\n\n image_list = os.path.join(self.input_folder, 'rgb.txt')\n depth_list = os.path.join(self.input_folder, 'depth.txt')\n\n image_data = self.parse_list(image_list)\n depth_data = self.parse_list(depth_list)\n pose_data = self.parse_list(pose_list, skiprows=1)\n pose_vecs = pose_data[:, 1:].astype(np.float64)\n\n tstamp_image = image_data[:, 0].astype(np.float64)\n tstamp_depth = depth_data[:, 0].astype(np.float64)\n tstamp_pose = pose_data[:, 0].astype(np.float64)\n associations = self.associate_frames(\n tstamp_image, tstamp_depth, tstamp_pose)\n\n indicies = [0]\n for i in range(1, len(associations)):\n t0 = tstamp_image[associations[indicies[-1]][0]]\n t1 = tstamp_image[associations[i][0]]\n if t1 - t0 > 1.0 / frame_rate:\n indicies += [i]\n\n color_paths, depth_paths = [], []\n for ix in indicies:\n (i, j, k) = associations[ix]\n color_paths += [os.path.join(self.input_folder, image_data[i, 1])]\n depth_paths += [os.path.join(self.input_folder, depth_data[j, 1])]\n\n embedding_paths = None\n\n return color_paths, depth_paths, embedding_paths\n \n def load_poses(self):\n \n frame_rate = 32\n \"\"\" read video data in tum-rgbd format \"\"\"\n if os.path.isfile(os.path.join(self.input_folder, 'groundtruth.txt')):\n pose_list = os.path.join(self.input_folder, 'groundtruth.txt')\n elif os.path.isfile(os.path.join(self.input_folder, 'pose.txt')):\n pose_list = os.path.join(self.input_folder, 'pose.txt')\n\n image_list = os.path.join(self.input_folder, 'rgb.txt')\n depth_list = os.path.join(self.input_folder, 'depth.txt')\n\n image_data = self.parse_list(image_list)\n depth_data = self.parse_list(depth_list)\n pose_data = self.parse_list(pose_list, skiprows=1)\n pose_vecs = pose_data[:, 1:].astype(np.float64)\n\n tstamp_image = image_data[:, 0].astype(np.float64)\n tstamp_depth = depth_data[:, 0].astype(np.float64)\n tstamp_pose = pose_data[:, 0].astype(np.float64)\n associations = self.associate_frames(\n tstamp_image, tstamp_depth, tstamp_pose)\n\n indicies = [0]\n for i in range(1, len(associations)):\n t0 = tstamp_image[associations[indicies[-1]][0]]\n t1 = tstamp_image[associations[i][0]]\n if t1 - t0 > 1.0 / frame_rate:\n indicies += [i]\n\n color_paths, poses, depth_paths, intrinsics = [], [], [], []\n inv_pose = None\n for ix in indicies:\n (i, j, k) = associations[ix]\n color_paths += [os.path.join(self.input_folder, image_data[i, 1])]\n depth_paths += [os.path.join(self.input_folder, depth_data[j, 1])]\n c2w = self.pose_matrix_from_quaternion(pose_vecs[k])\n c2w = torch.from_numpy(c2w).float()\n poses += [c2w]\n\n return poses\n \n def read_embedding_from_file(self, embedding_file_path):\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1)" }, { "identifier": "ScannetPPDataset", "path": "datasets/gradslam_datasets/scannetpp.py", "snippet": "class ScannetPPDataset(GradSLAMDataset):\n def __init__(\n self,\n basedir,\n sequence,\n ignore_bad: Optional[bool] = False,\n use_train_split: Optional[bool] = True,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 1168,\n desired_width: Optional[int] = 1752,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n config_dict = {}\n config_dict[\"dataset_name\"] = \"scannetpp\"\n self.pose_path = None\n self.ignore_bad = ignore_bad\n self.use_train_split = use_train_split\n\n # Load Train & Test Split\n self.train_test_split = json.load(open(f\"{self.input_folder}/dslr/train_test_lists.json\", \"r\"))\n if self.use_train_split:\n self.image_names = self.train_test_split[\"train\"]\n else:\n self.image_names = self.train_test_split[\"test\"]\n self.train_image_names = self.train_test_split[\"train\"]\n \n # Load NeRFStudio format camera & poses data\n self.cams_metadata = self.load_cams_metadata()\n if self.use_train_split:\n self.frames_metadata = self.cams_metadata[\"frames\"]\n self.filepath_index_mapping = create_filepath_index_mapping(self.frames_metadata)\n else:\n self.frames_metadata = self.cams_metadata[\"test_frames\"]\n self.train_frames_metadata = self.cams_metadata[\"frames\"]\n self.filepath_index_mapping = create_filepath_index_mapping(self.frames_metadata)\n self.train_filepath_index_mapping = create_filepath_index_mapping(self.train_frames_metadata) \n\n # Init Intrinsics\n config_dict[\"camera_params\"] = {}\n config_dict[\"camera_params\"][\"png_depth_scale\"] = 1000.0 # Depth is in mm\n config_dict[\"camera_params\"][\"image_height\"] = self.cams_metadata[\"h\"]\n config_dict[\"camera_params\"][\"image_width\"] = self.cams_metadata[\"w\"]\n config_dict[\"camera_params\"][\"fx\"] = self.cams_metadata[\"fl_x\"]\n config_dict[\"camera_params\"][\"fy\"] = self.cams_metadata[\"fl_y\"]\n config_dict[\"camera_params\"][\"cx\"] = self.cams_metadata[\"cx\"]\n config_dict[\"camera_params\"][\"cy\"] = self.cams_metadata[\"cy\"]\n\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n ) \n\n def load_cams_metadata(self):\n cams_metadata_path = f\"{self.input_folder}/dslr/nerfstudio/transforms_undistorted.json\"\n cams_metadata = json.load(open(cams_metadata_path, \"r\"))\n return cams_metadata\n \n def get_filepaths(self):\n base_path = f\"{self.input_folder}/dslr\"\n color_paths = []\n depth_paths = []\n self.tmp_poses = []\n P = torch.tensor(\n [\n [1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, 1]\n ]\n ).float()\n if not self.use_train_split:\n self.first_train_image_name = self.train_image_names[0]\n self.first_train_image_index = self.train_filepath_index_mapping.get(self.first_train_image_name)\n self.first_train_frame_metadata = self.train_frames_metadata[self.first_train_image_index]\n # Get path of undistorted image and depth\n color_path = f\"{base_path}/undistorted_images/{self.first_train_image_name}\"\n depth_path = f\"{base_path}/undistorted_depths/{self.first_train_image_name.replace('.JPG', '.png')}\"\n color_paths.append(color_path)\n depth_paths.append(depth_path)\n # Get pose of first train frame in GradSLAM format\n c2w = torch.from_numpy(np.array(self.first_train_frame_metadata[\"transform_matrix\"])).float()\n _pose = P @ c2w @ P.T\n self.tmp_poses.append(_pose)\n for image_name in self.image_names:\n # Search for image name in frames_metadata\n frame_metadata = self.frames_metadata[self.filepath_index_mapping.get(image_name)]\n # Check if frame is blurry and if it needs to be ignored\n if self.ignore_bad and frame_metadata['is_bad']:\n continue\n # Get path of undistorted image and depth\n color_path = f\"{base_path}/undistorted_images/{image_name}\"\n depth_path = f\"{base_path}/undistorted_depths/{image_name.replace('.JPG', '.png')}\"\n color_paths.append(color_path)\n depth_paths.append(depth_path)\n # Get pose of undistorted image in GradSLAM format\n c2w = torch.from_numpy(np.array(frame_metadata[\"transform_matrix\"])).float()\n _pose = P @ c2w @ P.T\n self.tmp_poses.append(_pose)\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{base_path}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n return self.tmp_poses\n\n def read_embedding_from_file(self, embedding_file_path):\n print(embedding_file_path)\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "NeRFCaptureDataset", "path": "datasets/gradslam_datasets/nerfcapture.py", "snippet": "class NeRFCaptureDataset(GradSLAMDataset):\n def __init__(\n self,\n basedir,\n sequence,\n stride: Optional[int] = None,\n start: Optional[int] = 0,\n end: Optional[int] = -1,\n desired_height: Optional[int] = 1440,\n desired_width: Optional[int] = 1920,\n load_embeddings: Optional[bool] = False,\n embedding_dir: Optional[str] = \"embeddings\",\n embedding_dim: Optional[int] = 512,\n **kwargs,\n ):\n self.input_folder = os.path.join(basedir, sequence)\n config_dict = {}\n config_dict[\"dataset_name\"] = \"nerfcapture\"\n self.pose_path = None\n \n # Load NeRFStudio format camera & poses data\n self.cams_metadata = self.load_cams_metadata()\n self.frames_metadata = self.cams_metadata[\"frames\"]\n self.filepath_index_mapping = create_filepath_index_mapping(self.frames_metadata)\n\n # Load RGB & Depth filepaths\n self.image_names = natsorted(os.listdir(f\"{self.input_folder}/rgb\"))\n self.image_names = [f'rgb/{image_name}' for image_name in self.image_names]\n\n # Init Intrinsics\n config_dict[\"camera_params\"] = {}\n config_dict[\"camera_params\"][\"png_depth_scale\"] = 6553.5 # Depth is in mm\n config_dict[\"camera_params\"][\"image_height\"] = self.cams_metadata[\"h\"]\n config_dict[\"camera_params\"][\"image_width\"] = self.cams_metadata[\"w\"]\n config_dict[\"camera_params\"][\"fx\"] = self.cams_metadata[\"fl_x\"]\n config_dict[\"camera_params\"][\"fy\"] = self.cams_metadata[\"fl_y\"]\n config_dict[\"camera_params\"][\"cx\"] = self.cams_metadata[\"cx\"]\n config_dict[\"camera_params\"][\"cy\"] = self.cams_metadata[\"cy\"]\n\n super().__init__(\n config_dict,\n stride=stride,\n start=start,\n end=end,\n desired_height=desired_height,\n desired_width=desired_width,\n load_embeddings=load_embeddings,\n embedding_dir=embedding_dir,\n embedding_dim=embedding_dim,\n **kwargs,\n ) \n\n def load_cams_metadata(self):\n cams_metadata_path = f\"{self.input_folder}/transforms.json\"\n cams_metadata = json.load(open(cams_metadata_path, \"r\"))\n return cams_metadata\n \n def get_filepaths(self):\n base_path = f\"{self.input_folder}\"\n color_paths = []\n depth_paths = []\n self.tmp_poses = []\n P = torch.tensor(\n [\n [1, 0, 0, 0],\n [0, -1, 0, 0],\n [0, 0, -1, 0],\n [0, 0, 0, 1]\n ]\n ).float()\n for image_name in self.image_names:\n # Search for image name in frames_metadata\n frame_metadata = self.frames_metadata[self.filepath_index_mapping.get(image_name)]\n # Get path of image and depth\n color_path = f\"{base_path}/{image_name}\"\n depth_path = f\"{base_path}/{image_name.replace('rgb', 'depth')}\"\n color_paths.append(color_path)\n depth_paths.append(depth_path)\n # Get pose of image in GradSLAM format\n c2w = torch.from_numpy(np.array(frame_metadata[\"transform_matrix\"])).float()\n _pose = P @ c2w @ P.T\n self.tmp_poses.append(_pose)\n embedding_paths = None\n if self.load_embeddings:\n embedding_paths = natsorted(glob.glob(f\"{base_path}/{self.embedding_dir}/*.pt\"))\n return color_paths, depth_paths, embedding_paths\n\n def load_poses(self):\n return self.tmp_poses\n\n def read_embedding_from_file(self, embedding_file_path):\n print(embedding_file_path)\n embedding = torch.load(embedding_file_path, map_location=\"cpu\")\n return embedding.permute(0, 2, 3, 1) # (1, H, W, embedding_dim)" }, { "identifier": "seed_everything", "path": "utils/common_utils.py", "snippet": "def seed_everything(seed=42):\n \"\"\"\n Set the `seed` value for torch and numpy seeds. Also turns on\n deterministic execution for cudnn.\n \n Parameters:\n - seed: A hashable seed value\n \"\"\"\n random.seed(seed)\n os.environ[\"PYTHONHASHSEED\"] = str(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False\n print(f\"Seed set to: {seed} (type: {type(seed)})\")" }, { "identifier": "save_seq_params", "path": "utils/common_utils.py", "snippet": "def save_seq_params(all_params, output_dir):\n params_to_save = {}\n for frame_idx, params in enumerate(all_params):\n params_to_save[f\"frame_{frame_idx}\"] = params2cpu(params)\n # Save the Parameters containing the Sequence of Gaussians\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params.npz\")\n np.savez(save_path, **params_to_save)" }, { "identifier": "save_params", "path": "utils/common_utils.py", "snippet": "def save_params(output_params, output_dir):\n # Convert to CPU Numpy Arrays\n to_save = params2cpu(output_params)\n # Save the Parameters containing the Gaussian Trajectories\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params.npz\")\n np.savez(save_path, **to_save)" }, { "identifier": "save_params_ckpt", "path": "utils/common_utils.py", "snippet": "def save_params_ckpt(output_params, output_dir, time_idx):\n # Convert to CPU Numpy Arrays\n to_save = params2cpu(output_params)\n # Save the Parameters containing the Gaussian Trajectories\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params\"+str(time_idx)+\".npz\")\n np.savez(save_path, **to_save)" }, { "identifier": "save_seq_params_ckpt", "path": "utils/common_utils.py", "snippet": "def save_seq_params_ckpt(all_params, output_dir,time_idx):\n params_to_save = {}\n for frame_idx, params in enumerate(all_params):\n params_to_save[f\"frame_{frame_idx}\"] = params2cpu(params)\n # Save the Parameters containing the Sequence of Gaussians\n os.makedirs(output_dir, exist_ok=True)\n print(f\"Saving parameters to: {output_dir}\")\n save_path = os.path.join(output_dir, \"params\"+str(time_idx)+\".npz\")\n np.savez(save_path, **params_to_save)" }, { "identifier": "setup_camera", "path": "utils/recon_helpers.py", "snippet": "def setup_camera(w, h, k, w2c, near=0.01, far=100):\n fx, fy, cx, cy = k[0][0], k[1][1], k[0][2], k[1][2]\n w2c = torch.tensor(w2c).cuda().float()\n cam_center = torch.inverse(w2c)[:3, 3]\n w2c = w2c.unsqueeze(0).transpose(1, 2)\n opengl_proj = torch.tensor([[2 * fx / w, 0.0, -(w - 2 * cx) / w, 0.0],\n [0.0, 2 * fy / h, -(h - 2 * cy) / h, 0.0],\n [0.0, 0.0, far / (far - near), -(far * near) / (far - near)],\n [0.0, 0.0, 1.0, 0.0]]).cuda().float().unsqueeze(0).transpose(1, 2)\n full_proj = w2c.bmm(opengl_proj)\n cam = Camera(\n image_height=h,\n image_width=w,\n tanfovx=w / (2 * fx),\n tanfovy=h / (2 * fy),\n bg=torch.tensor([0, 0, 0], dtype=torch.float32, device=\"cuda\"),\n scale_modifier=1.0,\n viewmatrix=w2c,\n projmatrix=full_proj,\n sh_degree=0,\n campos=cam_center,\n prefiltered=False\n )\n return cam" }, { "identifier": "params2rendervar", "path": "utils/gs_helpers.py", "snippet": "def params2rendervar(params):\n rendervar = {\n 'means3D': params['means3D'],\n 'colors_precomp': params['rgb_colors'],\n 'rotations': F.normalize(params['unnorm_rotations']),\n 'opacities': torch.sigmoid(params['logit_opacities']),\n 'scales': torch.exp(torch.tile(params['log_scales'], (1, 3))),\n 'means2D': torch.zeros_like(params['means3D'], requires_grad=True, device=\"cuda\") + 0\n }\n return rendervar" }, { "identifier": "params2depthplussilhouette", "path": "utils/gs_helpers.py", "snippet": "def params2depthplussilhouette(params, w2c):\n rendervar = {\n 'means3D': params['means3D'],\n 'colors_precomp': get_depth_and_silhouette(params['means3D'], w2c),\n 'rotations': F.normalize(params['unnorm_rotations']),\n 'opacities': torch.sigmoid(params['logit_opacities']),\n 'scales': torch.exp(torch.tile(params['log_scales'], (1, 3))),\n 'means2D': torch.zeros_like(params['means3D'], requires_grad=True, device=\"cuda\") + 0\n }\n return rendervar" }, { "identifier": "transformed_params2depthplussilhouette", "path": "utils/gs_helpers.py", "snippet": "def transformed_params2depthplussilhouette(params, w2c, transformed_pts):\n rendervar = {\n 'means3D': transformed_pts,\n 'colors_precomp': get_depth_and_silhouette(transformed_pts, w2c),\n 'rotations': F.normalize(params['unnorm_rotations']),\n 'opacities': torch.sigmoid(params['logit_opacities']),\n 'scales': torch.exp(torch.tile(params['log_scales'], (1, 3))),\n 'means2D': torch.zeros_like(params['means3D'], requires_grad=True, device=\"cuda\") + 0\n }\n return rendervar" }, { "identifier": "transform_to_frame", "path": "utils/gs_helpers.py", "snippet": "def transform_to_frame(params, time_idx, gaussians_grad, camera_grad):\n \"\"\"\n Function to transform Isotropic Gaussians from world frame to camera frame.\n \n Args:\n params: dict of parameters\n time_idx: time index to transform to\n gaussians_grad: enable gradients for Gaussians\n camera_grad: enable gradients for camera pose\n \n Returns:\n transformed_pts: Transformed Centers of Gaussians\n \"\"\"\n # Get Frame Camera Pose\n if camera_grad:\n cam_rot = F.normalize(params['cam_unnorm_rots'][..., time_idx])\n cam_tran = params['cam_trans'][..., time_idx]\n else:\n cam_rot = F.normalize(params['cam_unnorm_rots'][..., time_idx].detach())\n cam_tran = params['cam_trans'][..., time_idx].detach()\n rel_w2c = torch.eye(4).cuda().float()\n rel_w2c[:3, :3] = build_rotation(cam_rot)\n rel_w2c[:3, 3] = cam_tran\n\n # Get Centers and norm Rots of Gaussians in World Frame\n if gaussians_grad:\n pts = params['means3D']\n else:\n pts = params['means3D'].detach()\n \n # Transform Centers and Unnorm Rots of Gaussians to Camera Frame\n pts_ones = torch.ones(pts.shape[0], 1).cuda().float()\n pts4 = torch.cat((pts, pts_ones), dim=1)\n transformed_pts = (rel_w2c @ pts4.T).T[:, :3]\n\n return transformed_pts" }, { "identifier": "report_progress", "path": "utils/gs_helpers.py", "snippet": "def report_progress(params, data, i, progress_bar, iter_time_idx, sil_thres, every_i=1, qual_every_i=1, \n tracking=False, mapping=False, wandb_run=None, wandb_step=None, wandb_save_qual=False, online_time_idx=None):\n if i % every_i == 0 or i == 1:\n if wandb_run is not None:\n if tracking:\n stage = \"Tracking\"\n elif mapping:\n stage = \"Mapping\"\n else:\n stage = \"Current Frame Optimization\"\n\n # Initialize Render Variables\n rendervar = params2rendervar(params)\n depth_sil_rendervar = params2depthplussilhouette(params, data['w2c'])\n\n # Initialize Render Variables\n depth_sil, _, _, = Renderer(raster_settings=data['cam'])(**depth_sil_rendervar)\n rastered_depth = depth_sil[0, :, :].unsqueeze(0)\n valid_depth_mask = (data['depth'] > 0)\n silhouette = depth_sil[1, :, :]\n presence_sil_mask = (silhouette > sil_thres)\n\n im, _, _, = Renderer(raster_settings=data['cam'])(**rendervar)\n if tracking:\n psnr = calc_psnr(im * presence_sil_mask, data['im'] * presence_sil_mask).mean()\n else:\n psnr = calc_psnr(im, data['im']).mean()\n\n if tracking:\n diff_depth_rmse = torch.sqrt((((rastered_depth - data['depth']) * presence_sil_mask) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n else:\n diff_depth_rmse = torch.sqrt(((rastered_depth - data['depth']) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n\n if not mapping:\n progress_bar.set_postfix({f\"Time-Step: {iter_time_idx} | Frame {data['id']} | PSNR: {psnr:.{7}} | RMSE\": f\"{rmse:.{7}}\"})\n progress_bar.update(every_i)\n else:\n progress_bar.set_postfix({f\"Time-Step: {online_time_idx} | Frame {data['id']} | PSNR: {psnr:.{7}} | RMSE\": f\"{rmse:.{7}}\"})\n progress_bar.update(every_i)\n \n if wandb_run is not None:\n wandb_run.log({f\"{stage} PSNR\": psnr, f\"{stage} RMSE\": rmse}, step=wandb_step)\n \n if wandb_save_qual and (i % qual_every_i == 0 or i == 1):\n # Silhouette Mask\n presence_sil_mask = presence_sil_mask.detach().cpu().numpy()\n\n # Log plot to wandb\n if not mapping:\n fig_title = f\"Time-Step: {iter_time_idx} | Iter: {i} | Frame: {data['id']}\"\n else:\n fig_title = f\"Time-Step: {online_time_idx} | Iter: {i} | Frame: {data['id']}\"\n plot_rgbd_silhouette(data['im'], data['depth'], im, rastered_depth, presence_sil_mask, diff_depth_rmse,\n psnr, rmse, fig_title, wandb_run=wandb_run, wandb_step=wandb_step, \n wandb_title=f\"{stage} Qual Viz\")" }, { "identifier": "eval", "path": "utils/gs_helpers.py", "snippet": "def eval(dataset, final_params, num_frames, eval_dir, sil_thres, mapping_iters, add_new_gaussians, wandb_run=None, wandb_save_qual=False):\n print(\"Evaluating Final Parameters ...\")\n psnr_list = []\n rmse_list = []\n lpips_list = []\n ssim_list = []\n plot_dir = os.path.join(eval_dir, \"plots\")\n os.makedirs(plot_dir, exist_ok=True)\n\n gt_w2c_list = []\n for time_idx in tqdm(range(num_frames)):\n # Get RGB-D Data & Camera Parameters\n color, depth, intrinsics, pose = dataset[time_idx]\n gt_w2c = torch.linalg.inv(pose)\n gt_w2c_list.append(gt_w2c)\n intrinsics = intrinsics[:3, :3]\n\n # Process RGB-D Data\n color = color.permute(2, 0, 1) / 255 # (H, W, C) -> (C, H, W)\n depth = depth.permute(2, 0, 1) # (H, W, C) -> (C, H, W)\n\n # Process Camera Parameters\n w2c = torch.linalg.inv(pose)\n if time_idx == 0:\n first_frame_w2c = w2c\n # Setup Camera\n cam = setup_camera(color.shape[2], color.shape[1], intrinsics.cpu().numpy(), w2c.detach().cpu().numpy())\n \n # Define current frame data\n curr_data = {'cam': cam, 'im': color, 'depth': depth, 'id': time_idx, 'intrinsics': intrinsics, 'w2c': w2c}\n\n # Initialize Render Variables\n rendervar = params2rendervar(final_params)\n depth_sil_rendervar = params2depthplussilhouette(final_params, w2c)\n\n # Render Depth & Silhouette\n depth_sil, _, _, = Renderer(raster_settings=curr_data['cam'])(**depth_sil_rendervar)\n rastered_depth = depth_sil[0, :, :].unsqueeze(0)\n valid_depth_mask = (curr_data['depth'] > 0)\n silhouette = depth_sil[1, :, :]\n presence_sil_mask = (silhouette > sil_thres)\n \n # Render RGB and Calculate PSNR\n im, radius, _, = Renderer(raster_settings=curr_data['cam'])(**rendervar)\n if mapping_iters==0 and not add_new_gaussians:\n weighted_im = im * presence_sil_mask\n weighted_gt_im = curr_data['im'] * presence_sil_mask\n psnr = calc_psnr(weighted_im, weighted_gt_im).mean()\n ssim = ms_ssim(weighted_im.unsqueeze(0).cpu(), weighted_gt_im.unsqueeze(0).cpu(), \n data_range=1.0, size_average=True)\n lpips_score = loss_fn_alex(torch.clamp(weighted_im.unsqueeze(0), 0.0, 1.0),\n torch.clamp(weighted_gt_im.unsqueeze(0), 0.0, 1.0)).item()\n else:\n psnr = calc_psnr(im, curr_data['im']).mean()\n ssim = ms_ssim(im.unsqueeze(0).cpu(), curr_data['im'].unsqueeze(0).cpu(), \n data_range=1.0, size_average=True)\n lpips_score = loss_fn_alex(torch.clamp(im.unsqueeze(0), 0.0, 1.0),\n torch.clamp(curr_data['im'].unsqueeze(0), 0.0, 1.0)).item()\n\n psnr_list.append(psnr.cpu().numpy())\n ssim_list.append(ssim.cpu().numpy())\n lpips_list.append(lpips_score)\n\n # Compute Depth RMSE\n if mapping_iters==0 and not add_new_gaussians:\n diff_depth_rmse = torch.sqrt((((rastered_depth - curr_data['depth']) * presence_sil_mask) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n else:\n diff_depth_rmse = torch.sqrt(((rastered_depth - curr_data['depth']) ** 2))\n diff_depth_rmse = diff_depth_rmse * valid_depth_mask\n rmse = diff_depth_rmse.sum() / valid_depth_mask.sum()\n rmse_list.append(rmse.cpu().numpy())\n\n # Plot the Ground Truth and Rasterized RGB & Depth, along with Silhouette\n fig_title = \"Time Step: {}\".format(time_idx)\n plot_name = \"%04d\" % time_idx\n presence_sil_mask = presence_sil_mask.detach().cpu().numpy()\n if wandb_run is None:\n plot_rgbd_silhouette(color, depth, im, rastered_depth, presence_sil_mask, diff_depth_rmse,\n psnr, rmse, fig_title, plot_dir, \n plot_name=plot_name, save_plot=True)\n elif wandb_save_qual:\n plot_rgbd_silhouette(color, depth, im, rastered_depth, presence_sil_mask, diff_depth_rmse,\n psnr, rmse, fig_title, plot_dir, \n plot_name=plot_name, save_plot=True,\n wandb_run=wandb_run, wandb_step=None, \n wandb_title=\"Eval Qual Viz\")\n\n # Compute Average Metrics\n psnr_list = np.array(psnr_list)\n rmse_list = np.array(rmse_list)\n ssim_list = np.array(ssim_list)\n lpips_list = np.array(lpips_list)\n avg_psnr = psnr_list.mean()\n avg_rmse = rmse_list.mean()\n avg_ssim = ssim_list.mean()\n avg_lpips = lpips_list.mean()\n print(\"Average PSNR: {:.2f}\".format(avg_psnr))\n print(\"Average Depth RMSE: {:.2f}\".format(avg_rmse))\n print(\"Average MS-SSIM: {:.2f}\".format(avg_ssim))\n print(\"Average LPIPS: {:.2f}\".format(avg_lpips))\n\n if wandb_run is not None:\n wandb_run.log({\"Average PSNR\": avg_psnr, \"Average Depth RMSE\": avg_rmse, \"Average MS-SSIM\": avg_ssim, \"Average LPIPS\": avg_lpips})\n\n # # Save metric lists as text files\n # np.savetxt(os.path.join(eval_dir, \"psnr.txt\"), psnr_list)\n # np.savetxt(os.path.join(eval_dir, \"rmse.txt\"), rmse_list)\n # np.savetxt(os.path.join(eval_dir, \"ssim.txt\"), ssim_list)\n # np.savetxt(os.path.join(eval_dir, \"lpips.txt\"), lpips_list)\n\n # # Plot PSNR & RMSE as line plots\n # fig, axs = plt.subplots(1, 2, figsize=(12, 4))\n # axs[0].plot(np.arange(num_frames), psnr_list)\n # axs[0].set_title(\"RGB PSNR\")\n # axs[0].set_xlabel(\"Time Step\")\n # axs[0].set_ylabel(\"PSNR\")\n # axs[1].plot(np.arange(num_frames), rmse_list)\n # axs[1].set_title(\"Depth RMSE\")\n # axs[1].set_xlabel(\"Time Step\")\n # axs[1].set_ylabel(\"RMSE\")\n # fig.suptitle(\"Average PSNR: {:.2f}, Average Depth RMSE: {:.2f}\".format(avg_psnr, avg_rmse), y=1.05, fontsize=16)\n # plt.savefig(os.path.join(eval_dir, \"metrics.png\"), bbox_inches='tight')\n # if wandb_run is not None:\n # wandb_run.log({\"Eval Metrics\": fig})\n # plt.close()" }, { "identifier": "l1_loss_v1", "path": "utils/gs_helpers.py", "snippet": "def l1_loss_v1(x, y):\n return torch.abs((x - y)).mean()" }, { "identifier": "matrix_to_quaternion", "path": "utils/gs_helpers.py", "snippet": "def matrix_to_quaternion(matrix: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Convert rotations given as rotation matrices to quaternions.\n\n Args:\n matrix: Rotation matrices as tensor of shape (..., 3, 3).\n\n Returns:\n quaternions with real part first, as tensor of shape (..., 4).\n Source: https://pytorch3d.readthedocs.io/en/latest/_modules/pytorch3d/transforms/rotation_conversions.html#matrix_to_quaternion\n \"\"\"\n if matrix.size(-1) != 3 or matrix.size(-2) != 3:\n raise ValueError(f\"Invalid rotation matrix shape {matrix.shape}.\")\n\n batch_dim = matrix.shape[:-2]\n m00, m01, m02, m10, m11, m12, m20, m21, m22 = torch.unbind(\n matrix.reshape(batch_dim + (9,)), dim=-1\n )\n\n q_abs = _sqrt_positive_part(\n torch.stack(\n [\n 1.0 + m00 + m11 + m22,\n 1.0 + m00 - m11 - m22,\n 1.0 - m00 + m11 - m22,\n 1.0 - m00 - m11 + m22,\n ],\n dim=-1,\n )\n )\n\n # we produce the desired quaternion multiplied by each of r, i, j, k\n quat_by_rijk = torch.stack(\n [\n # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and\n # `int`.\n torch.stack([q_abs[..., 0] ** 2, m21 - m12, m02 - m20, m10 - m01], dim=-1),\n # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and\n # `int`.\n torch.stack([m21 - m12, q_abs[..., 1] ** 2, m10 + m01, m02 + m20], dim=-1),\n # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and\n # `int`.\n torch.stack([m02 - m20, m10 + m01, q_abs[..., 2] ** 2, m12 + m21], dim=-1),\n # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and\n # `int`.\n torch.stack([m10 - m01, m20 + m02, m21 + m12, q_abs[..., 3] ** 2], dim=-1),\n ],\n dim=-2,\n )\n\n # We floor here at 0.1 but the exact level is not important; if q_abs is small,\n # the candidate won't be picked.\n flr = torch.tensor(0.1).to(dtype=q_abs.dtype, device=q_abs.device)\n quat_candidates = quat_by_rijk / (2.0 * q_abs[..., None].max(flr))\n\n # if not for numerical problems, quat_candidates[i] should be same (up to a sign),\n # forall i; we pick the best-conditioned one (with the largest denominator)\n\n return quat_candidates[\n F.one_hot(q_abs.argmax(dim=-1), num_classes=4) > 0.5, :\n ].reshape(batch_dim + (4,))" }, { "identifier": "calc_ssim", "path": "utils/gs_external.py", "snippet": "def calc_ssim(img1, img2, window_size=11, size_average=True):\n channel = img1.size(-3)\n window = create_window(window_size, channel)\n\n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n\n return _ssim(img1, img2, window, window_size, channel, size_average)" }, { "identifier": "build_rotation", "path": "utils/gs_external.py", "snippet": "def build_rotation(q):\n norm = torch.sqrt(q[:, 0] * q[:, 0] + q[:, 1] * q[:, 1] + q[:, 2] * q[:, 2] + q[:, 3] * q[:, 3])\n q = q / norm[:, None]\n rot = torch.zeros((q.size(0), 3, 3), device='cuda')\n r = q[:, 0]\n x = q[:, 1]\n y = q[:, 2]\n z = q[:, 3]\n rot[:, 0, 0] = 1 - 2 * (y * y + z * z)\n rot[:, 0, 1] = 2 * (x * y - r * z)\n rot[:, 0, 2] = 2 * (x * z + r * y)\n rot[:, 1, 0] = 2 * (x * y + r * z)\n rot[:, 1, 1] = 1 - 2 * (x * x + z * z)\n rot[:, 1, 2] = 2 * (y * z - r * x)\n rot[:, 2, 0] = 2 * (x * z - r * y)\n rot[:, 2, 1] = 2 * (y * z + r * x)\n rot[:, 2, 2] = 1 - 2 * (x * x + y * y)\n return rot" }, { "identifier": "densify", "path": "utils/gs_external.py", "snippet": "def densify(params, variables, optimizer, iter, densify_dict):\n if iter <= densify_dict['stop_after']:\n variables = accumulate_mean2d_gradient(variables)\n grad_thresh = densify_dict['grad_thresh']\n if (iter >= densify_dict['start_after']) and (iter % densify_dict['densify_every'] == 0):\n grads = variables['means2D_gradient_accum'] / variables['denom']\n grads[grads.isnan()] = 0.0\n to_clone = torch.logical_and(grads >= grad_thresh, (\n torch.max(torch.exp(params['log_scales']), dim=1).values <= 0.01 * variables['scene_radius']))\n new_params = {k: v[to_clone] for k, v in params.items() if k not in ['cam_unnorm_rots', 'cam_trans']}\n\n new_timestep_vars = torch.zeros(new_params['means3D'].shape[0], device=\"cuda\")\n new_timestep_vars = variables['timestep'][to_clone] \n variables['timestep'] = torch.cat((variables['timestep'], new_timestep_vars), dim=0)\n params = cat_params_to_optimizer(new_params, params, optimizer)\n num_pts = params['means3D'].shape[0]\n\n padded_grad = torch.zeros(num_pts, device=\"cuda\")\n padded_grad[:grads.shape[0]] = grads\n to_split = torch.logical_and(padded_grad >= grad_thresh,\n torch.max(torch.exp(params['log_scales']), dim=1).values > 0.01 * variables[\n 'scene_radius'])\n n = densify_dict['num_to_split_into'] # number to split into\n new_params = {k: v[to_split].repeat(n, 1) for k, v in params.items() if k not in ['cam_unnorm_rots', 'cam_trans']}\n #track new variables for new formed points\n new_timestep_vars = torch.zeros(new_params['means3D'].shape[0], device=\"cuda\")\n new_timestep_vars = variables['timestep'][to_split].repeat(n)\n variables['timestep'] = torch.cat((variables['timestep'], new_timestep_vars), dim=0)\n\n stds = torch.exp(params['log_scales'])[to_split].repeat(n, 3)\n means = torch.zeros((stds.size(0), 3), device=\"cuda\")\n samples = torch.normal(mean=means, std=stds)\n rots = build_rotation(params['unnorm_rotations'][to_split]).repeat(n, 1, 1)\n new_params['means3D'] += torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1)\n new_params['log_scales'] = torch.log(torch.exp(new_params['log_scales']) / (0.8 * n))\n params = cat_params_to_optimizer(new_params, params, optimizer)\n num_pts = params['means3D'].shape[0]\n \n variables['means2D_gradient_accum'] = torch.zeros(num_pts, device=\"cuda\")\n variables['denom'] = torch.zeros(num_pts, device=\"cuda\")\n variables['max_2D_radius'] = torch.zeros(num_pts, device=\"cuda\")\n\n to_remove = torch.cat((to_split, torch.zeros(n * to_split.sum(), dtype=torch.bool, device=\"cuda\")))\n params, variables = remove_points(to_remove, params, variables, optimizer)\n\n if iter == densify_dict['stop_after']:\n remove_threshold = densify_dict['final_removal_opacity_threshold']\n else:\n remove_threshold = densify_dict['removal_opacity_threshold']\n to_remove = (torch.sigmoid(params['logit_opacities']) < remove_threshold).squeeze()\n if iter >= densify_dict['remove_big_after']:\n big_points_ws = torch.exp(params['log_scales']).max(dim=1).values > 0.1 * variables['scene_radius']\n to_remove = torch.logical_or(to_remove, big_points_ws)\n params, variables = remove_points(to_remove, params, variables, optimizer)\n\n torch.cuda.empty_cache()\n\n # Reset Opacities for all Gaussians (This is not desired for mapping on only current frame)\n if iter > 0 and iter % densify_dict['reset_opacities_every'] == 0 and densify_dict['reset_opacities']:\n new_params = {'logit_opacities': inverse_sigmoid(torch.ones_like(params['logit_opacities']) * 0.01)}\n params = update_params_and_optimizer(new_params, params, optimizer)\n\n return params, variables" }, { "identifier": "get_expon_lr_func", "path": "utils/gs_external.py", "snippet": "def get_expon_lr_func(\n lr_init, lr_final, lr_delay_steps=0, lr_delay_mult=1.0, max_steps=1000000\n):\n \"\"\"\n Copied from Plenoxels\n\n Continuous learning rate decay function. Adapted from JaxNeRF\n The returned rate is lr_init when step=0 and lr_final when step=max_steps, and\n is log-linearly interpolated elsewhere (equivalent to exponential decay).\n If lr_delay_steps>0 then the learning rate will be scaled by some smooth\n function of lr_delay_mult, such that the initial learning rate is\n lr_init*lr_delay_mult at the beginning of optimization but will be eased back\n to the normal learning rate when steps>lr_delay_steps.\n :param conf: config subtree 'lr' or similar\n :param max_steps: int, the number of steps during optimization.\n :return HoF which takes step as input\n \"\"\"\n\n def helper(step):\n if step < 0 or (lr_init == 0.0 and lr_final == 0.0):\n # Disable this parameter\n return 0.0\n if lr_delay_steps > 0:\n # A kind of reverse cosine decay.\n delay_rate = lr_delay_mult + (1 - lr_delay_mult) * np.sin(\n 0.5 * np.pi * np.clip(step / lr_delay_steps, 0, 1)\n )\n else:\n delay_rate = 1.0\n t = np.clip(step / max_steps, 0, 1)\n log_lerp = np.exp(np.log(lr_init) * (1 - t) + np.log(lr_final) * t)\n return delay_rate * log_lerp\n\n return helper" }, { "identifier": "update_learning_rate", "path": "utils/gs_external.py", "snippet": "def update_learning_rate(optimizer, means3D_scheduler, iteration):\n ''' Learning rate scheduling per step '''\n for param_group in optimizer.param_groups:\n if param_group[\"name\"] == \"means3D\":\n lr = means3D_scheduler(iteration)\n param_group['lr'] = lr\n return lr" } ]
import argparse import os import random import sys import shutil import cv2 import numpy as np import torch import wandb from importlib.machinery import SourceFileLoader from tqdm import tqdm from datasets.gradslam_datasets import ( load_dataset_config, ICLDataset, ReplicaDataset, AzureKinectDataset, ScannetDataset, Ai2thorDataset, Record3DDataset, RealsenseDataset, TUMDataset, ScannetPPDataset, NeRFCaptureDataset ) from utils.common_utils import seed_everything, save_seq_params, save_params, save_params_ckpt, save_seq_params_ckpt from utils.recon_helpers import setup_camera from utils.gs_helpers import ( params2rendervar, params2depthplussilhouette, transformed_params2depthplussilhouette, transform_to_frame, report_progress, eval, l1_loss_v1, matrix_to_quaternion ) from utils.gs_external import ( calc_ssim, build_rotation, densify, get_expon_lr_func, update_learning_rate ) from diff_gaussian_rasterization import GaussianRasterizer as Renderer
17,050
_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) print("System Paths:") for p in sys.path: print(p) def get_dataset(config_dict, basedir, sequence, **kwargs): if config_dict["dataset_name"].lower() in ["icl"]: return ICLDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["replica"]: return ReplicaDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["azure", "azurekinect"]: return AzureKinectDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannet"]: return ScannetDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["ai2thor"]: return Ai2thorDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["record3d"]:
_BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, _BASE_DIR) print("System Paths:") for p in sys.path: print(p) def get_dataset(config_dict, basedir, sequence, **kwargs): if config_dict["dataset_name"].lower() in ["icl"]: return ICLDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["replica"]: return ReplicaDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["azure", "azurekinect"]: return AzureKinectDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["scannet"]: return ScannetDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["ai2thor"]: return Ai2thorDataset(config_dict, basedir, sequence, **kwargs) elif config_dict["dataset_name"].lower() in ["record3d"]:
return Record3DDataset(config_dict, basedir, sequence, **kwargs)
7
2023-11-30 20:26:47+00:00
24k
zhyever/PatchFusion
zoedepth/trainers/zoedepth_custom_trainer.py
[ { "identifier": "SILogLoss", "path": "zoedepth/trainers/loss_sample.py", "snippet": "class SILogLoss(nn.Module):\n \"\"\"SILog loss (pixel-wise)\"\"\"\n def __init__(self, beta=0.15):\n super(SILogLoss, self).__init__()\n self.name = 'SILog'\n self.beta = beta\n\n def forward(self, input, target, mask=None):\n input = extract_key(input, KEY_OUTPUT)\n \n if mask is not None:\n input_filtered = input[mask]\n target_filtered = target[mask]\n\n with amp.autocast(enabled=False): # amp causes NaNs in this loss function\n alpha = 1e-7\n g = torch.log(input_filtered + alpha) - torch.log(target_filtered + alpha)\n Dg = torch.var(g) + self.beta * torch.pow(torch.mean(g), 2)\n loss = 10 * torch.sqrt(Dg)\n\n if torch.isnan(loss):\n print(\"Nan SILog loss\")\n print(\"input:\", input.shape)\n print(\"target:\", target.shape)\n print(\"G\", torch.sum(torch.isnan(g)))\n print(\"Input min max\", torch.min(input), torch.max(input))\n print(\"Target min max\", torch.min(target), torch.max(target))\n print(\"Dg\", torch.isnan(Dg))\n print(\"loss\", torch.isnan(loss))\n\n return loss" }, { "identifier": "DistributionLoss", "path": "zoedepth/trainers/loss_sample.py", "snippet": "class DistributionLoss(nn.Module):\n def __init__(self, max_depth):\n super(DistributionLoss, self).__init__()\n self.name = 'DistributionLoss'\n self.max_depth = max_depth\n\n def forward(self, input, target, mask=None, dist='biLaplacian'):\n \n \n mu0 = input['mu0']\n mu1 = input['mu1']\n sigma0 = input['sigma0']\n sigma1 = input['sigma1']\n pi0 = input['pi0']\n pi1 = input['pi1']\n \n pred_mask = (pi0 / sigma0 > pi1 / sigma1).float()\n pred_depth = (mu0 * pred_mask + mu1 * (1. - pred_mask))\n pred_metric_depth = (1 - pred_depth) * self.max_depth\n\n\n if mask is not None:\n mu0 = mu0[mask]\n mu1 = mu1[mask]\n sigma0 = sigma0[mask]\n sigma1 = sigma1[mask]\n pi0 = pi0[mask]\n pi1 = pi1[mask]\n\n # real_input = real_depth[mask]\n \n real_input = mu0\n pred_metric_depth = pred_metric_depth[mask]\n record_target = target[mask]\n\n\n target_filtered = 1 - target[mask] / self.max_depth\n bi_loss = bimodal_loss(mu0, mu1, sigma0, sigma1, pi0, pi1, target_filtered, dist=dist).mean()\n # print(bi_loss) \n\n alpha = 1e-7\n beta = 0.15\n g = torch.log(real_input + alpha) - torch.log(record_target + alpha)\n Dg = torch.var(g) + beta * torch.pow(torch.mean(g), 2)\n sig_loss = 10 * torch.sqrt(Dg)\n # print(sig_loss)\n \n return bi_loss, sig_loss" }, { "identifier": "SILogLoss", "path": "zoedepth/trainers/loss.py", "snippet": "class SILogLoss(nn.Module):\n \"\"\"SILog loss (pixel-wise)\"\"\"\n def __init__(self, beta=0.15):\n super(SILogLoss, self).__init__()\n self.name = 'SILog'\n self.beta = beta\n\n def forward(self, input, target, mask=None, interpolate=True, return_interpolated=False):\n hack_input = input\n\n input = extract_key(input, KEY_OUTPUT)\n if input.shape[-1] != target.shape[-1] and interpolate:\n input = nn.functional.interpolate(\n input, target.shape[-2:], mode='bilinear', align_corners=True)\n intr_input = input\n else:\n intr_input = input\n\n if target.ndim == 3:\n target = target.unsqueeze(1)\n\n if mask is not None:\n if mask.ndim == 3:\n mask = mask.unsqueeze(1)\n\n input = input[mask]\n target = target[mask]\n\n with amp.autocast(enabled=False): # amp causes NaNs in this loss function\n alpha = 1e-7\n g = torch.log(input + alpha) - torch.log(target + alpha)\n\n # n, c, h, w = g.shape\n # norm = 1/(h*w)\n # Dg = norm * torch.sum(g**2) - (0.85/(norm**2)) * (torch.sum(g))**2\n\n Dg = torch.var(g) + self.beta * torch.pow(torch.mean(g), 2)\n\n loss = 10 * torch.sqrt(Dg)\n\n if torch.isnan(loss):\n if input.numel() == 0:\n loss = torch.mean(hack_input) * 0\n if not return_interpolated:\n return loss\n return loss, intr_input\n \n print(\"Nan SILog loss\")\n print(\"input:\", input.shape)\n print(\"target:\", target.shape)\n print(\"G\", torch.sum(torch.isnan(g)))\n print(\"Input min max\", torch.min(input), torch.max(input))\n print(\"Target min max\", torch.min(target), torch.max(target))\n print(\"Dg\", torch.isnan(Dg))\n print(\"loss\", torch.isnan(loss))\n\n if not return_interpolated:\n return loss\n\n return loss, intr_input" }, { "identifier": "BudgetConstraint", "path": "zoedepth/trainers/loss.py", "snippet": "class BudgetConstraint(nn.Module):\n \"\"\"\n Given budget constraint to reduce expected inference FLOPs in the Dynamic Network.\n \"\"\"\n def __init__(self, loss_mu, flops_all, warm_up=True):\n super().__init__()\n self.loss_mu = loss_mu\n self.flops_all = flops_all\n self.warm_up = warm_up\n\n def forward(self, flops_expt, warm_up_rate=1.0):\n if self.warm_up:\n warm_up_rate = min(1.0, warm_up_rate)\n else:\n warm_up_rate = 1.0\n losses = warm_up_rate * ((flops_expt / self.flops_all - self.loss_mu)**2)\n return losses" }, { "identifier": "HistogramMatchingLoss", "path": "zoedepth/trainers/loss.py", "snippet": "class HistogramMatchingLoss(nn.Module):\n def __init__(self, min_depth, max_depth, bins=512):\n super(HistogramMatchingLoss, self).__init__()\n self.name = 'HistogramMatchingLoss'\n self.min_depth = min_depth\n self.max_depth = max_depth\n self.bins = bins\n\n def forward(self, input, target, mask, interpolate=True):\n if input.shape[-1] != mask.shape[-1] and interpolate:\n input = nn.functional.interpolate(\n input, mask.shape[-2:], mode='bilinear', align_corners=True)\n \n if target.shape[-1] != mask.shape[-1] and interpolate:\n target = nn.functional.interpolate(\n target, mask.shape[-2:], mode='bilinear', align_corners=True)\n\n input[~mask] = 0\n target[~mask] = 0\n\n\n pred_hist = torch.histc(input, bins=self.bins, min=self.min_depth, max=self.max_depth)\n gt_hist = torch.histc(target, bins=self.bins, min=self.min_depth, max=self.max_depth)\n\n pred_hist /= pred_hist.sum(dim=0, keepdim=True)\n gt_hist /= gt_hist.sum(dim=0, keepdim=True)\n\n # print(pred_hist.shape)\n # print(pred_hist)\n # _pred_hist = pred_hist.detach().cpu().numpy()\n # _gt_hist = gt_hist.detach().cpu().numpy()\n # plt.subplot(2, 1, 1)\n # plt.bar(range(len(_pred_hist)), _pred_hist)\n # plt.subplot(2, 1, 2)\n # plt.bar(range(len(_gt_hist)), _gt_hist)\n # plt.savefig('./debug_scale.png')\n\n # Compute cumulative histograms (CDF)\n cdf_pred = torch.cumsum(pred_hist, dim=0)\n cdf_gt = torch.cumsum(gt_hist, dim=0)\n\n # Compute Earth Mover's Distance (EMD) between the CDFs\n loss = torch.mean(torch.abs(cdf_pred - cdf_gt))\n # loss = torch.mean(torch.sqrt((pred_hist - gt_hist)**2))\n # loss = F.kl_div(torch.log(pred_hist + 1e-10), gt_hist, reduction='mean')\n \n return loss" }, { "identifier": "SSIM", "path": "zoedepth/trainers/loss.py", "snippet": "class SSIM(torch.nn.Module):\n def __init__(self, window_size = 11, size_average = True):\n super(SSIM, self).__init__()\n self.window_size = window_size\n self.size_average = size_average\n self.channel = 1\n self.window = create_window(window_size, self.channel)\n\n def forward(self, img1, img2, mask, interpolate=True):\n if img1.shape[-1] != mask.shape[-1] and interpolate:\n img1 = nn.functional.interpolate(\n img1, mask.shape[-2:], mode='bilinear', align_corners=True)\n \n if img2.shape[-1] != mask.shape[-1] and interpolate:\n img2 = nn.functional.interpolate(\n img2, mask.shape[-2:], mode='bilinear', align_corners=True)\n\n img1[~mask] = 0\n img2[~mask] = 0\n\n (_, channel, _, _) = img1.size()\n\n if channel == self.channel and self.window.data.type() == img1.data.type():\n window = self.window\n else:\n window = create_window(self.window_size, channel)\n \n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n \n self.window = window\n self.channel = channel\n\n\n loss = _ssim(img1, img2, window, self.window_size, channel, self.size_average)\n return loss" }, { "identifier": "ConsistencyLoss", "path": "zoedepth/trainers/loss.py", "snippet": "class ConsistencyLoss(nn.Module):\n def __init__(self, target, focus_flatten=False, wp=1) -> None:\n super().__init__()\n self.name = 'Consistency'\n self.target = target\n self.mode = 'no-resize'\n # self.mode = 'resize'\n self.focus_flatten = focus_flatten\n self.wp = wp\n\n def gradient_y(self, img):\n # gy = torch.cat([F.conv2d(img[:, i, :, :].unsqueeze(0), torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]).view((1, 1, 3, 3)).to(img.device), padding=1) for i in range(img.shape[1])], 1)\n gy = F.conv2d(img, torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]).view((1, 1, 3, 3)).to(img.device), padding=1)\n return gy\n\n def gradient_x(self, img):\n # gx = torch.cat([F.conv2d(img[:, i, :, :].unsqueeze(0), torch.Tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]]).view((1, 1, 3, 3)).to(img.device), padding=1) for i in range(img.shape[1])], 1)\n gx = F.conv2d(img, torch.Tensor([[1, 0, -1], [2, 0, -2], [1, 0, -1]]).view((1, 1, 3, 3)).to(img.device), padding=1)\n return gx\n\n def forward(self, depth_preds, shifts, mask, temp_features, pred_f=None):\n\n common_area_1_list = []\n common_area_2_list = []\n\n if self.focus_flatten:\n # only consider flatten place\n grad = kornia.filters.spatial_gradient(pred_f.detach())\n grad_x, grad_y = grad[:, :, 0, :, :], grad[:, :, 1, :, :]\n grad = torch.sqrt(grad_x ** 2 + grad_y ** 2)\n grad_ext = grad > 0.05 # over 5cm\n grad_ext = grad_ext.float()\n grad_blur = kornia.filters.gaussian_blur2d(grad_ext, (11, 11), (3, 3))\n grad_ext = grad_blur > 0 # over 5cm\n grad_ext = grad_blur == 0 \n mask = torch.logical_and(mask, grad_ext)\n\n\n if self.target == \"mix\":\n ## for feature\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n split_mask = torch.split(F.interpolate(mask.float(), (384, 512)).bool(), bs//2, dim=0)\n\n feat_ori_list = []\n feat_shift_list = []\n multi_level_mask = []\n\n for idx, feature in enumerate(temp_features): # multi-level\n split_feat = torch.split(feature, bs//2, dim=0)\n\n _, _, h, w = split_feat[0].shape\n feat_ori_list.append(split_feat[0])\n feat_shift_list.append(split_feat[1])\n\n mask_ori_cur_scale = F.interpolate(split_mask[0].float(), (h, w)).bool()\n multi_level_mask.append(mask_ori_cur_scale)\n\n for idx_out, (feat_ori_cur_level, feat_shift_cur_level, mask_ori_cur_level) in enumerate(zip(feat_ori_list, feat_shift_list, multi_level_mask)): # iter multi-scale\n scale_factor = 2 ** (5 - idx_out)\n _, _, cur_scale_h, cur_scale_w = feat_ori_cur_level.shape\n scale_factor = int(384 / cur_scale_h)\n\n for idx_in, (feat_ori, feat_shift, mask_ori, shift_bs) in enumerate(zip(feat_ori_cur_level, feat_shift_cur_level, mask_ori_cur_level, shifts)): # iter bs (paired feat)\n c, _, _ = feat_ori.shape\n mask_ori = mask_ori.repeat(c, 1, 1)\n shift_h, shift_w = int(shift_bs[0] * (384/540) / scale_factor), int(shift_bs[1]* (512/960) / scale_factor)\n\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, shift_h:, shift_w:]\n common_area_2 = feat_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:] \n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = feat_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = feat_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = feat_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n else:\n print(\"can you really reach here?\")\n\n common_area_masked_1 = common_area_1[mask_common].flatten()\n common_area_masked_2 = common_area_2[mask_common].flatten()\n common_area_1_list.append(common_area_masked_1)\n common_area_2_list.append(common_area_masked_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n consistency_loss = F.mse_loss(common_area_1, common_area_2)\n consistency_loss_feat = consistency_loss\n\n \n common_area_1_list = []\n common_area_2_list = []\n\n ## for pred\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n split_mask = torch.split(mask, bs//2, dim=0)\n \n for shift, depth_ori, depth_shift, mask_ori, mask_shift in zip(shifts, split_depth[0], split_depth[1], split_mask[0], split_mask[1]):\n shift_h, shift_w = shift[0], shift[1]\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = depth_ori[:, shift_h:, shift_w:]\n common_area_2 = depth_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:]\n # mask_debug = mask_shift[:, :-shift_h, :-shift_w]\n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = depth_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = depth_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n # mask_debug = mask_shift[:, :-shift_h, abs(shift_w):]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = depth_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = depth_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n # mask_debug = mask_shift[:, abs(shift_h):, abs(shift_w):]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = depth_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = depth_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n # mask_debug = mask_shift[:, abs(shift_h):, :-shift_w]\n else:\n print(\"can you really reach here?\")\n \n common_area_1 = common_area_1[mask_common].flatten()\n common_area_2 = common_area_2[mask_common].flatten()\n common_area_1_list.append(common_area_1)\n common_area_2_list.append(common_area_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n # pred_hist = torch.histc(common_area_1, bins=512, min=0, max=80)\n # gt_hist = torch.histc(common_area_2, bins=512, min=0, max=80)\n\n # pred_hist /= pred_hist.sum(dim=0, keepdim=True)\n # gt_hist /= gt_hist.sum(dim=0, keepdim=True)\n\n # # Compute cumulative histograms (CDF)\n # cdf_pred = torch.cumsum(pred_hist, dim=0)\n # cdf_gt = torch.cumsum(gt_hist, dim=0)\n\n # # Compute Earth Mover's Distance (EMD) between the CDFs\n # consistency_loss = torch.mean(torch.abs(cdf_pred - cdf_gt))\n consistency_loss = F.mse_loss(common_area_1, common_area_2) \n consistency_loss_pred = consistency_loss\n\n consistency_loss = consistency_loss_pred * self.wp + consistency_loss_feat\n return consistency_loss\n \n elif 'feat' in self.target:\n if self.mode == 'resize':\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n split_mask = torch.split(mask, bs//2, dim=0)\n \n feat_ori_list = []\n feat_shift_list = []\n\n for idx, feature in enumerate(temp_features): # multi-level\n if idx < 4:\n continue\n \n split_feat = torch.split(feature, bs//2, dim=0)\n f = F.interpolate(split_feat[0], (h, w), mode='bilinear', align_corners=True)\n feat_ori_list.append(f)\n f = F.interpolate(split_feat[1], (h, w), mode='bilinear', align_corners=True)\n feat_shift_list.append(f)\n\n\n for idx_out, (feat_ori_cur_level, feat_shift_cur_level) in enumerate(zip(feat_ori_list, feat_shift_list)): # iter multi-scale\n scale_factor = 2 ** (5 - idx_out)\n\n for idx_in, (feat_ori, feat_shift, mask_ori, shift_bs) in enumerate(zip(feat_ori_cur_level, feat_shift_cur_level, split_mask[0], shifts)): # iter bs (paired feat)\n c, h, w = feat_ori.shape\n mask_ori = mask_ori.repeat(c, 1, 1)\n shift_h, shift_w = shift_bs[0], shift_bs[1]\n\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, shift_h:, shift_w:]\n common_area_2 = feat_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:] \n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = feat_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = feat_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = feat_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n else:\n print(\"can you really reach here?\")\n\n common_area_masked_1 = common_area_1[mask_common].flatten()\n common_area_masked_2 = common_area_2[mask_common].flatten()\n # common_area_masked_1 = common_area_1.flatten()\n # common_area_masked_2 = common_area_2.flatten()\n common_area_1_list.append(common_area_masked_1)\n common_area_2_list.append(common_area_masked_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n consistency_loss = F.mse_loss(common_area_1, common_area_2)\n\n return consistency_loss\n \n\n else:\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n mask = F.interpolate(mask.float(), (384, 512)).bool() # back to 384, 512\n split_mask = torch.split(mask, bs//2, dim=0)\n\n feat_ori_list = []\n feat_shift_list = []\n multi_level_mask = []\n\n for idx, feature in enumerate(temp_features): # multi-level\n split_feat = torch.split(feature, bs//2, dim=0)\n\n _, _, h, w = split_feat[0].shape\n feat_ori_list.append(split_feat[0])\n feat_shift_list.append(split_feat[1])\n\n mask_ori_cur_scale = F.interpolate(split_mask[0].float(), (h, w)).bool()\n multi_level_mask.append(mask_ori_cur_scale)\n\n for idx_out, (feat_ori_cur_level, feat_shift_cur_level, mask_ori_cur_level) in enumerate(zip(feat_ori_list, feat_shift_list, multi_level_mask)): # iter multi-scale\n scale_factor = 2 ** (5 - idx_out)\n _, _, cur_scale_h, cur_scale_w = feat_ori_cur_level.shape\n scale_factor = int(384 / cur_scale_h)\n\n for idx_in, (feat_ori, feat_shift, mask_ori, shift_bs) in enumerate(zip(feat_ori_cur_level, feat_shift_cur_level, mask_ori_cur_level, shifts)): # iter bs (paired feat)\n c, _, _ = feat_ori.shape\n mask_ori = mask_ori.repeat(c, 1, 1)\n shift_h, shift_w = int(shift_bs[0] * (384/540) / scale_factor), int(shift_bs[1]* (512/960) / scale_factor)\n\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, shift_h:, shift_w:]\n common_area_2 = feat_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:] \n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = feat_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = feat_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = feat_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = feat_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n else:\n print(\"can you really reach here?\")\n\n common_area_masked_1 = common_area_1[mask_common].flatten()\n common_area_masked_2 = common_area_2[mask_common].flatten()\n common_area_1_list.append(common_area_masked_1)\n common_area_2_list.append(common_area_masked_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n consistency_loss = F.mse_loss(common_area_1, common_area_2)\n return consistency_loss\n \n elif self.target == 'pred':\n bs, c, h, w = depth_preds.shape\n split_depth = torch.split(depth_preds, bs//2, dim=0)\n split_mask = torch.split(mask, bs//2, dim=0)\n \n for shift, depth_ori, depth_shift, mask_ori, mask_shift in zip(shifts, split_depth[0], split_depth[1], split_mask[0], split_mask[1]):\n shift_h, shift_w = shift[0], shift[1]\n if shift_h >= 0 and shift_w >= 0:\n common_area_1 = depth_ori[:, shift_h:, shift_w:]\n common_area_2 = depth_shift[:, :-shift_h, :-shift_w]\n mask_common = mask_ori[:, shift_h:, shift_w:]\n # mask_debug = mask_shift[:, :-shift_h, :-shift_w]\n elif shift_h >= 0 and shift_w <= 0:\n common_area_1 = depth_ori[:, shift_h:, :-abs(shift_w)]\n common_area_2 = depth_shift[:, :-shift_h, abs(shift_w):]\n mask_common = mask_ori[:, shift_h:, :-abs(shift_w)]\n # mask_debug = mask_shift[:, :-shift_h, abs(shift_w):]\n elif shift_h <= 0 and shift_w <= 0:\n common_area_1 = depth_ori[:, :-abs(shift_h), :-abs(shift_w)]\n common_area_2 = depth_shift[:, abs(shift_h):, abs(shift_w):]\n mask_common = mask_ori[:, :-abs(shift_h), :-abs(shift_w)]\n # mask_debug = mask_shift[:, abs(shift_h):, abs(shift_w):]\n elif shift_h <= 0 and shift_w >= 0:\n common_area_1 = depth_ori[:, :-abs(shift_h):, shift_w:]\n common_area_2 = depth_shift[:, abs(shift_h):, :-shift_w]\n mask_common = mask_ori[:, :-abs(shift_h):, shift_w:]\n # mask_debug = mask_shift[:, abs(shift_h):, :-shift_w]\n else:\n print(\"can you really reach here?\")\n \n common_area_1 = common_area_1[mask_common].flatten()\n common_area_2 = common_area_2[mask_common].flatten()\n common_area_1_list.append(common_area_1)\n common_area_2_list.append(common_area_2)\n\n common_area_1 = torch.cat(common_area_1_list)\n common_area_2 = torch.cat(common_area_2_list)\n if common_area_1.numel() == 0 or common_area_2.numel() == 0:\n consistency_loss = torch.Tensor([0]).squeeze()\n else:\n # pred_hist = torch.histc(common_area_1, bins=512, min=0, max=80)\n # gt_hist = torch.histc(common_area_2, bins=512, min=0, max=80)\n\n # pred_hist /= pred_hist.sum(dim=0, keepdim=True)\n # gt_hist /= gt_hist.sum(dim=0, keepdim=True)\n\n # # Compute cumulative histograms (CDF)\n # cdf_pred = torch.cumsum(pred_hist, dim=0)\n # cdf_gt = torch.cumsum(gt_hist, dim=0)\n\n # # Compute Earth Mover's Distance (EMD) between the CDFs\n # consistency_loss = torch.mean(torch.abs(cdf_pred - cdf_gt))\n consistency_loss = F.mse_loss(common_area_1, common_area_2)\n \n return consistency_loss\n \n else:\n raise NotImplementedError" }, { "identifier": "DATASETS_CONFIG", "path": "zoedepth/utils/config.py", "snippet": "DATASETS_CONFIG = {\n \"kitti\": {\n \"dataset\": \"kitti\",\n \"min_depth\": 0.001,\n \"max_depth\": 80,\n \"data_path\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/raw\"),\n \"gt_path\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/gts\"),\n \"filenames_file\": \"./train_test_inputs/kitti_eigen_train_files_with_gt.txt\",\n \"input_height\": 352,\n \"input_width\": 1216, # 704\n \"data_path_eval\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/raw\"),\n \"gt_path_eval\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/gts\"),\n \"filenames_file_eval\": \"./train_test_inputs/kitti_eigen_test_files_with_gt.txt\",\n\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": True,\n \"garg_crop\": True,\n \"eigen_crop\": False,\n \"use_right\": False\n },\n \"kitti_test\": {\n \"dataset\": \"kitti\",\n \"min_depth\": 0.001,\n \"max_depth\": 80,\n \"data_path\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/raw\"),\n \"gt_path\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/gts\"),\n \"filenames_file\": \"./train_test_inputs/kitti_eigen_train_files_with_gt.txt\",\n \"input_height\": 352,\n \"input_width\": 1216,\n \"data_path_eval\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/raw\"),\n \"gt_path_eval\": os.path.join(HOME_DIR, \"shortcuts/datasets/kitti/gts\"),\n \"filenames_file_eval\": \"./train_test_inputs/kitti_eigen_test_files_with_gt.txt\",\n\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n\n \"do_random_rotate\": False,\n \"degree\": 1.0,\n \"do_kb_crop\": True,\n \"garg_crop\": True,\n \"eigen_crop\": False,\n \"use_right\": False\n },\n \"nyu\": {\n \"dataset\": \"nyu\",\n \"avoid_boundary\": False,\n \"min_depth\": 1e-3, # originally 0.1\n \"max_depth\": 10,\n \"data_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder\"),\n \"gt_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder\"),\n \"filenames_file\": \"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder/nyu_train.txt\",\n \"input_height\": 480,\n \"input_width\": 640,\n \"data_path_eval\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder\"),\n \"gt_path_eval\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder\"),\n \"filenames_file_eval\": \"/ibex/ai/home/liz0l/codes/datasets/nyu/data_folder/nyu_test.txt\",\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 10,\n \"min_depth_diff\": -10,\n \"max_depth_diff\": 10,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": False,\n \"garg_crop\": False,\n \"eigen_crop\": False,\n },\n \"u4k\": {\n \"dataset\": \"u4k\",\n \"min_depth\": 1e-3, # originally 0.1\n \"max_depth\": 80,\n \"data_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/u4k\"),\n \"filenames_train\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/train.txt\",\n \"input_height\": 480, # ? will not be used (random crop)\n \"input_width\": 640, # ? will not be used (random crop)\n \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/val.txt\",\n # \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/test.txt\",\n \"filenames_test\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/test.txt\",\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth_diff\": -10,\n \"max_depth_diff\": 10,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": False,\n \"garg_crop\": False,\n \"eigen_crop\": False,\n \n \"num_sample_inout\": 50000,\n # \"num_sample_inout\": 40000,\n \"sampling_strategy\": 'random',\n # \"sampling_strategy\": 'dda',\n \"dilation_factor\": 10,\n\n \"use_rgb\": False,\n \"do_normalize\": True, # do normalize in dataloader\n \"do_input_resize\": True\n },\n \"mid\": {\n \"dataset\": \"mid\",\n \"min_depth\": 1e-3, # originally 0.1\n \"max_depth\": 10,\n \"data_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/middlebury\"),\n \"filenames_train\": \"/ibex/ai/home/liz0l/codes/datasets/middlebury/splits/train.txt\",\n \"input_height\": 480, # ? will not be used (random crop)\n \"input_width\": 640, # ? will not be used (random crop)\n \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/middlebury/splits/val.txt\",\n \"filenames_test\": \"/ibex/ai/home/liz0l/codes/datasets/middlebury/splits/test.txt\",\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 10,\n \"min_depth_diff\": -10,\n \"max_depth_diff\": 10,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": False,\n \"garg_crop\": False,\n \"eigen_crop\": False,\n \n \"num_sample_inout\": 50000,\n # \"num_sample_inout\": 40000,\n \"sampling_strategy\": 'random',\n # \"sampling_strategy\": 'dda',\n \"dilation_factor\": 10,\n\n \"use_rgb\": False,\n \"do_normalize\": True, # do normalize in dataloader\n \"do_input_resize\": True\n },\n \"gta\": {\n \"dataset\": \"gta\",\n \"min_depth\": 1e-3, # originally 0.1\n \"max_depth\": 80,\n \"data_path\": os.path.join(\"/ibex/ai/home/liz0l/codes/datasets/gta/GTAV_1080\"),\n \"filenames_train\": \"/ibex/ai/home/liz0l/codes/datasets/gta/GTAV_1080/train.txt\",\n \"input_height\": 480, # ? will not be used (random crop)\n \"input_width\": 640, # ? will not be used (random crop)\n \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/gta/GTAV_1080/val.txt\",\n # \"filenames_val\": \"/ibex/ai/home/liz0l/codes/datasets/u4k/splits/test.txt\",\n \"filenames_test\": \"/ibex/ai/home/liz0l/codes/datasets/gta/GTAV_1080/test.txt\",\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth_diff\": -10,\n \"max_depth_diff\": 10,\n\n \"do_random_rotate\": True,\n \"degree\": 1.0,\n \"do_kb_crop\": False,\n \"garg_crop\": False,\n \"eigen_crop\": False,\n \n \"num_sample_inout\": 50000,\n # \"num_sample_inout\": 40000,\n \"sampling_strategy\": 'random',\n # \"sampling_strategy\": 'dda',\n \"dilation_factor\": 10,\n\n \"use_rgb\": False,\n \"do_normalize\": True, # do normalize in dataloader\n \"do_input_resize\": True\n },\n \"ibims\": {\n \"dataset\": \"ibims\",\n \"ibims_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/ibims/ibims1_core_raw/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 0,\n \"max_depth_eval\": 10,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"sunrgbd\": {\n \"dataset\": \"sunrgbd\",\n \"sunrgbd_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/SUNRGBD/test/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 0,\n \"max_depth_eval\": 8,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"diml_indoor\": {\n \"dataset\": \"diml_indoor\",\n \"diml_indoor_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/diml_indoor_test/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 0,\n \"max_depth_eval\": 10,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"diml_outdoor\": {\n \"dataset\": \"diml_outdoor\",\n \"diml_outdoor_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/diml_outdoor_test/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 2,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80\n },\n \"diode_indoor\": {\n \"dataset\": \"diode_indoor\",\n \"diode_indoor_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/diode_indoor/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 10,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"diode_outdoor\": {\n \"dataset\": \"diode_outdoor\",\n \"diode_outdoor_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/diode_outdoor/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80\n },\n \"hypersim_test\": {\n \"dataset\": \"hypersim_test\",\n \"hypersim_test_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/hypersim_test/\"),\n \"eigen_crop\": True,\n \"garg_crop\": False,\n \"do_kb_crop\": False,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 10\n },\n \"vkitti\": {\n \"dataset\": \"vkitti\",\n \"vkitti_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/vkitti_test/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": True,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80\n },\n \"vkitti2\": {\n \"dataset\": \"vkitti2\",\n \"vkitti2_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/vkitti2/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": True,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80,\n },\n \"ddad\": {\n \"dataset\": \"ddad\",\n \"ddad_root\": os.path.join(HOME_DIR, \"shortcuts/datasets/ddad/ddad_val/\"),\n \"eigen_crop\": False,\n \"garg_crop\": True,\n \"do_kb_crop\": True,\n \"min_depth_eval\": 1e-3,\n \"max_depth_eval\": 80,\n \"min_depth\": 1e-3,\n \"max_depth\": 80,\n },\n}" }, { "identifier": "compute_metrics", "path": "zoedepth/utils/misc.py", "snippet": "def compute_metrics(gt, pred, interpolate=True, garg_crop=False, eigen_crop=True, dataset='nyu', min_depth_eval=0.1, max_depth_eval=10, disp_gt_edges=None, pred_depths=None, **kwargs):\n \"\"\"Compute metrics of predicted depth maps. Applies cropping and masking as necessary or specified via arguments. Refer to compute_errors for more details on metrics.\n \"\"\"\n if 'config' in kwargs:\n config = kwargs['config']\n garg_crop = config.garg_crop\n eigen_crop = config.eigen_crop\n min_depth_eval = config.min_depth_eval\n max_depth_eval = config.max_depth_eval\n\n if gt.shape[-2:] != pred.shape[-2:] and interpolate:\n pred = nn.functional.interpolate(\n pred.unsqueeze(dim=0).unsqueeze(dim=0), gt.shape[-2:], mode='bilinear', align_corners=True).squeeze()\n\n pred = pred.squeeze().cpu().numpy()\n pred[pred < min_depth_eval] = min_depth_eval\n pred[pred > max_depth_eval] = max_depth_eval\n pred[np.isinf(pred)] = max_depth_eval\n pred[np.isnan(pred)] = min_depth_eval\n\n gt_depth = gt.squeeze().cpu().numpy()\n valid_mask = np.logical_and(\n gt_depth > min_depth_eval, gt_depth < max_depth_eval)\n\n eval_mask = np.ones(valid_mask.shape)\n if garg_crop or eigen_crop:\n gt_height, gt_width = gt_depth.shape\n eval_mask = np.zeros(valid_mask.shape)\n\n if garg_crop:\n eval_mask[int(0.40810811 * gt_height):int(0.99189189 * gt_height),\n int(0.03594771 * gt_width):int(0.96405229 * gt_width)] = 1\n\n elif eigen_crop:\n # print(\"-\"*10, \" EIGEN CROP \", \"-\"*10)\n if dataset == 'kitti':\n eval_mask[int(0.3324324 * gt_height):int(0.91351351 * gt_height),\n int(0.0359477 * gt_width):int(0.96405229 * gt_width)] = 1\n else:\n # assert gt_depth.shape == (480, 640), \"Error: Eigen crop is currently only valid for (480, 640) images\"\n eval_mask[45:471, 41:601] = 1\n else:\n eval_mask = np.ones(valid_mask.shape)\n valid_mask = np.logical_and(valid_mask, eval_mask)\n\n # if dataset == 'nyu':\n # # pred = scale_shift_linear(torch.tensor(pred_depths), torch.tensor(pred), torch.tensor(valid_mask), fuse=False).numpy()\n # pred = scale_shift_linear(torch.tensor(gt), torch.tensor(pred), torch.tensor(valid_mask), fuse=False).numpy()\n \n metrics = compute_errors(gt_depth[valid_mask], pred[valid_mask])\n\n mask = valid_mask.squeeze() # squeeze\n gt = gt_depth\n pred = pred\n see_depth = 0\n if disp_gt_edges is None:\n print(\"Maybe we need edge maps from origin disp!\")\n edges = get_boundaries(gt, th=0.08, dilation=0)\n else:\n edges = disp_gt_edges\n \n mask = np.logical_and(mask, edges)\n import matplotlib.pyplot as plt\n if mask.sum() > 0:\n see_depth = soft_edge_error(pred, gt)[mask].mean()\n metrics['see'] = see_depth\n \n return metrics" }, { "identifier": "get_black_border", "path": "zoedepth/data/preprocess.py", "snippet": "def get_black_border(rgb_image, **kwargs) -> CropParams:\n \"\"\"Crops the black border of the RGB.\n\n Args:\n rgb: RGB image, shape (H, W, 3).\n\n Returns:\n Crop parameters.\n \"\"\"\n\n return get_border_params(rgb_image, value=0, **kwargs)" }, { "identifier": "BaseTrainer", "path": "zoedepth/trainers/base_trainer.py", "snippet": "def is_rank_zero(args):\n def __init__(self, config, model, train_loader, test_loader=None, device=None):\n def resize_to_target(self, prediction, target):\n def load_ckpt(self, checkpoint_dir=\"./checkpoints\", ckpt_type=\"best\"):\n def init_optimizer(self):\n def init_scheduler(self):\n def train_on_batch(self, batch, train_step):\n def validate_on_batch(self, batch, val_step):\n def raise_if_nan(self, losses):\n def iters_per_epoch(self):\n def total_iters(self):\n def should_early_stop(self):\n def train(self):\n def stringify_losses(L): return \"; \".join(map(\n def validate(self):\n def save_checkpoint(self, filename):\n def log_images(self, rgb: Dict[str, list] = {}, depth: Dict[str, list] = {}, scalar_field: Dict[str, list] = {}, prefix=\"\", scalar_cmap=\"turbo_r\", min_depth=None, max_depth=None):\n def log_line_plot(self, data):\n def log_bar_plot(self, title, labels, values):\nclass BaseTrainer:" }, { "identifier": "generatemask", "path": "zoedepth/utils/misc.py", "snippet": "def generatemask(size, k_size=-1, sigma=-1, h_factor=0.03, w_factor=0.02):\n # Generates a Guassian mask\n mask = np.zeros(size, dtype=np.float32)\n if sigma == -1:\n sigma = int(size[0]/16)\n if k_size == -1:\n k_size = int(2 * np.ceil(2 * int(size[0]/16)) + 1)\n # mask[int(0.02*size[0]):size[0] - int(0.02*size[0]), int(0.015*size[1]): size[1] - int(0.015*size[1])] = 1\n mask[int(h_factor*size[0]):size[0] - int(h_factor*size[0]), int(w_factor*size[1]): size[1] - int(w_factor*size[1])] = 1\n mask = cv2.GaussianBlur(mask, (int(k_size), int(k_size)), sigma)\n mask = (mask - mask.min()) / (mask.max() - mask.min())\n mask = mask.astype(np.float32)\n return mask" } ]
import os import torch import torch.cuda.amp as amp import torch.nn as nn import numpy as np import wandb import uuid import torch.distributed as dist import copy import torch.optim as optim import matplotlib.pyplot as plt from zoedepth.trainers.loss_sample import SILogLoss, DistributionLoss from zoedepth.trainers.loss import SILogLoss as DenseSILogLoss from zoedepth.trainers.loss import BudgetConstraint, HistogramMatchingLoss, SSIM, ConsistencyLoss from zoedepth.utils.config import DATASETS_CONFIG from zoedepth.utils.misc import compute_metrics from zoedepth.data.preprocess import get_black_border from .base_trainer import BaseTrainer, is_rank_zero, colors, flatten from torchvision import transforms from PIL import Image from tqdm import tqdm from datetime import datetime as dt from zoedepth.utils.misc import generatemask
15,571
plt.savefig('debug.png') pass else: pred = nn.functional.interpolate( pred[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] depths_gt[torch.logical_not(mask)] = DATASETS_CONFIG[dataset]['max_depth'] if self.consistency_training: split_images = torch.split(images, 3, dim=1) images = torch.cat(split_images, dim=0) self.log_images(rgb={"Input": images[0, ...]}, depth={"GT": depths_gt[0], "PredictedMono": pred}, prefix="Train", min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth']) return losses @torch.no_grad() def eval_infer(self, x, image_raw, bboxs=None, crop_area=None, dataset='u4k', bbox_raw=None): m = self.model.module if self.config.multigpu else self.model if dataset == 'u4k': base_h = 540 base_w = 960 elif dataset == 'gta': base_h = 270 base_w = 480 elif dataset == 'nyu': base_h = 120 * 2 base_w = 160 * 2 else: raise NotImplementedError if dataset == 'nyu': if self.sec_stage: images_crops = torch.split(x, 3, dim=1) bboxs_list = torch.split(bboxs, 1, dim=1) crop_areas = torch.split(crop_area, 1, dim=1) pred_depth_crops = [] for i, (img, bbox, crop_area) in enumerate(zip(images_crops, bboxs_list, crop_areas)): with amp.autocast(enabled=self.config.use_amp): if i == 0: out_dict = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None) # whole_depth_pred = out_dict['coarse_depth_pred'] pred_depth_crop = out_dict['metric_depth'] else: pred_depth_crop = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None)['metric_depth'] pred_depth_crop = nn.functional.interpolate( pred_depth_crop, (base_h, base_w), mode='bilinear', align_corners=True) pred_depth_crops.append(pred_depth_crop) x_start, y_start = [0, base_h], [0, base_w] pred_depth = torch.zeros((base_h*2, base_w*2)).cuda() inner_idx = 0 for ii, x in enumerate(x_start): for jj, y in enumerate(y_start): if self.use_blur: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() # do not care about boundry during validation else: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() inner_idx += 1 pred_depth = pred_depth.squeeze(dim=0) else: with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] else: if self.sec_stage: images_crops = torch.split(x, 3, dim=1) bboxs_list = torch.split(bboxs, 1, dim=1) crop_areas = torch.split(crop_area, 1, dim=1) pred_depth_crops = [] for i, (img, bbox, crop_area) in enumerate(zip(images_crops, bboxs_list, crop_areas)): with amp.autocast(enabled=self.config.use_amp): if i == 0: out_dict = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None) # whole_depth_pred = out_dict['coarse_depth_pred'] pred_depth_crop = out_dict['metric_depth'] else: pred_depth_crop = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None)['metric_depth'] pred_depth_crop = nn.functional.interpolate( pred_depth_crop, (base_h, base_w), mode='bilinear', align_corners=True) pred_depth_crops.append(pred_depth_crop) x_start, y_start = [0, base_h], [0, base_w] pred_depth = torch.zeros((base_h*2, base_w*2)).cuda() inner_idx = 0 for ii, x in enumerate(x_start): for jj, y in enumerate(y_start): if self.use_blur: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() # do not care about boundry during validation else: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() inner_idx += 1 pred_depth = pred_depth.squeeze(dim=0) else: with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] return pred_depth @torch.no_grad() def crop_aware_infer(self, x, image_raw): # if we are not avoiding the black border, we can just use the normal inference if not self.config.get("avoid_boundary", False): return self.eval_infer(x) # otherwise, we need to crop the image to avoid the black border # For now, this may be a bit slow due to converting to numpy and back # We assume no normalization is done on the input image # get the black border assert x.shape[0] == 1, "Only batch size 1 is supported for now" x_pil = transforms.ToPILImage()(x[0].cpu()) x_np = np.array(x_pil, dtype=np.uint8)
# MIT License # Copyright (c) 2022 Intelligent Systems Lab Org # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # File author: Zhenyu Li # This file is partly inspired from ZoeDepth (https://github.com/isl-org/ZoeDepth/blob/main/zoedepth/trainers/zoedepth_trainer.py); author: Shariq Farooq Bhat class Trainer(BaseTrainer): def __init__(self, config, model, train_loader, test_loader=None, device=None): self.addf = config.get("addf", False) self.lazy_epoch = -1 self.boostingdepth = config.get("boostingdepth", False) super().__init__(config, model, train_loader, test_loader=test_loader, device=device) self.device = device self.silog_loss = SILogLoss(beta=config.get("beta", 0.15)) self.dense_silog_loss = DenseSILogLoss(beta=config.get("beta", 0.15)) print("sigloss's beta is set to {}".format(config.get("beta", 0.15))) self.scaler = amp.GradScaler(enabled=self.config.use_amp) self.distribution_loss = DistributionLoss(max_depth=self.config.max_depth) self.sampled_training = config.get("sampled_training", False) self.sec_stage = config.get("sec_stage", False) self.multi_consistency = config.get("multi_consistency", False) self.use_blur = config.get("use_blur", False) self.dynamic = config.get("dynamic", False) if self.dynamic: self.dynamic_unupdate_rate = config.get("dynamic_unupdate_rate", 0.0) self.budget_loss = BudgetConstraint(loss_mu=0.0, flops_all=21552.5684, warm_up=True) self.use_scale_loss = config.get("use_scale_loss", False) if self.use_scale_loss: if config.get("scale_type", "ssim"): self.scale_loss = SSIM(window_size=config.get("window_size", int(11))) else: self.scale_loss = HistogramMatchingLoss(min_depth=self.config.min_depth, max_depth=self.config.max_depth) self.scale_target = config.get("scale_target", None) self.consistency_training = config.get("consistency_training", False) if self.consistency_training: self.consistency_target = config.get("consistency_target", None) self.consistency_loss = ConsistencyLoss(self.consistency_target, config.get("focus_flatten", False), config.get("w_p", 1.0)) print("current weight for consistency loss is {}. focus_flatten is {}. w_p is {}".format(self.config.w_consistency, config.get("focus_flatten", False), config.get("w_p", 1.0))) def train_on_batch(self, batch, train_step, step_rate): """ Expects a batch of images and depth as input batch["image"].shape : batch_size, c, h, w batch["depth"].shape : batch_size, 1, h, w """ images, depths_gt = batch['image'].to(self.device), batch['depth'].to(self.device) image_raw = batch.get("image_raw", None) if image_raw is not None: image_raw = image_raw.to(self.device) sample_points = None if self.sampled_training: sample_points = batch['sample_points'].to(self.device) bbox = batch.get("bbox", None) if bbox is not None: bbox = bbox.to(self.device) bbox_raw = batch.get("bbox_raw", None) if bbox_raw is not None: bbox_raw = bbox_raw.to(self.device) depth_raw = batch.get("depth_raw", None) if depth_raw is not None: depth_raw = depth_raw.to(self.device) crop_area = batch.get("crop_area", None) if crop_area is not None: crop_area = crop_area.to(self.device) shift = batch.get("shift", None) if shift is not None: shift = shift.to(self.device) dataset = batch['dataset'][0] b, c, h, w = images.size() mask = batch["mask"].to(self.device).to(torch.bool) sample_mask = batch.get("sample_mask", None) if sample_mask is not None: sample_mask = sample_mask.to(self.device).to(torch.bool) mask_raw = batch.get("mask_raw", None) if mask_raw is not None: mask_raw = mask_raw.to(self.device).to(torch.bool) losses = {} with amp.autocast(enabled=self.config.use_amp): if self.sampled_training: output = self.model(images, sample_points, mode='train', image_raw=image_raw, bbox=bbox, depth_raw=depth_raw, crop_area=crop_area, shift=shift, bbox_raw=bbox_raw) else: output = self.model(images, None, mode='train', image_raw=image_raw, bbox=bbox, depth_raw=depth_raw, crop_area=crop_area, shift=shift, bbox_raw=bbox_raw) if self.boostingdepth: if self.lazy_epoch < self.epoch: output.update_learning_rate() self.lazy_epoch = self.epoch input_dict = dict() input_dict['data_gtfake'] = depths_gt output.set_input_train_gt(input_dict) output.optimize_parameters() pred_depths = output.fake_B pred = output.fake_B # print(torch.min(pred), torch.max(pred)) losses = output.get_current_losses() else: pred_depths = output['metric_depth'] if self.sampled_training: sampled_depth_gt = sample_points[:, :, -1].float().unsqueeze(dim=-1) sampled_depth_gt = sampled_depth_gt.permute(0, 2, 1) if self.config.get("representation", "") == 'biLaplacian': # only for sampled training for now l_dist, l_si = self.distribution_loss(output, sampled_depth_gt, mask=sample_mask) loss = self.config.w_dist * l_dist + self.config.w_si * l_si losses['distribution_loss'] = l_dist losses['sigloss'] = l_si if self.multi_consistency: coarse, fine = output['coarse_depth_pred'], output['fine_depth_pred'] l_si_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=False) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c loss += self.config.w_si * (l_si_f + l_si_c) else: if self.sampled_training: l_si = self.silog_loss( pred_depths, sampled_depth_gt, mask=sample_mask) loss = self.config.w_si * l_si losses[self.silog_loss.name] = l_si if self.multi_consistency: coarse, fine = output['coarse_depth_pred'], output['fine_depth_pred'] l_si_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=False) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c loss += self.config.w_si * (l_si_f + l_si_c) else: if self.multi_consistency: #### here here here pred_depths, coarse, fine = output['metric_depth'], output['coarse_depth_pred'], output['fine_depth_pred'] if self.consistency_training: depths_gt = torch.split(depths_gt, 1, dim=1) depths_gt = torch.cat(depths_gt, dim=0) mask = torch.split(mask, 1, dim=-1) mask = torch.cat(mask, dim=0).permute(0, 3, 1, 2) mask_raw = torch.cat([mask_raw, mask_raw], dim=0) depth_raw = torch.cat([depth_raw, depth_raw], dim=0) temp_features = output.get('temp_features', None) l_si_1, pred = self.dense_silog_loss( pred_depths, depths_gt, mask=mask, interpolate=True, return_interpolated=True) l_si_f, pred_f = self.dense_silog_loss( fine, depths_gt, mask=mask, interpolate=True, return_interpolated=True) l_si_c = self.dense_silog_loss( coarse, depth_raw, mask=mask_raw, interpolate=True, return_interpolated=False) losses[self.silog_loss.name] = l_si_1 losses['sigloss_f'] = l_si_f losses['l_si_c'] = l_si_c # loss = l_si_1 + l_si_f + l_si_c loss = l_si_1 if self.consistency_training: try: # depths_gt? pred_f? l_consistency = self.consistency_loss(pred, shift, mask, temp_features, pred_f=depths_gt) # use the resized pred except RuntimeError as e: print(e) print("some runtime error here! Hack with 0") l_consistency = torch.Tensor([0]).squeeze() losses[self.consistency_loss.name] = l_consistency loss += l_consistency * self.config.w_consistency else: l_si, pred = self.dense_silog_loss( pred_depths, depths_gt, mask=mask, interpolate=True, return_interpolated=True) loss = self.config.w_si * l_si losses[self.silog_loss.name] = l_si if self.dynamic: if step_rate > self.dynamic_unupdate_rate: warm_up_rate = min(1.0, (step_rate - self.dynamic_unupdate_rate) / 0.02) flop_cost = self.budget_loss(output['all_cell_flops'], warm_up_rate=warm_up_rate) loss += self.config.w_flop * flop_cost losses['flop_loss'] = flop_cost else: flop_cost = self.budget_loss(output['all_cell_flops'], warm_up_rate=1) loss += 0 * flop_cost losses['flop_loss'] = flop_cost if self.use_scale_loss: if self.scale_target == 'coarse': h_loss = self.scale_loss(pred_depths, output['coarse_depth_pred_roi'], mask, interpolate=True) else: h_loss = self.scale_loss(pred_depths, depths_gt, mask, interpolate=True) loss += self.config.w_scale * h_loss losses['scale_loss'] = h_loss # self.scaler.scale(loss).backward() # if self.config.clip_grad > 0: # self.scaler.unscale_(self.optimizer) # nn.utils.clip_grad_norm_( # self.model.parameters(), self.config.clip_grad) # self.scaler.step(self.optimizer) # self.scaler.update() # self.optimizer.zero_grad() self.scaler.scale(loss).backward() if self.config.clip_grad > 0: self.scaler.unscale_(self.optimizer) nn.utils.clip_grad_norm_( self.model.parameters(), self.config.clip_grad) self.scaler.step(self.optimizer) self.scaler.update() self.optimizer.zero_grad() if self.should_log and (self.step % int(self.config.log_images_every * self.iters_per_epoch)) == 0: if self.config.get("debug", False): pred = nn.functional.interpolate( pred[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] plt.imshow(pred.squeeze().detach().cpu().numpy()) plt.savefig('debug.png') pass else: pred = nn.functional.interpolate( pred[0:1], depths_gt.shape[-2:], mode='bilinear', align_corners=True)[0] depths_gt[torch.logical_not(mask)] = DATASETS_CONFIG[dataset]['max_depth'] if self.consistency_training: split_images = torch.split(images, 3, dim=1) images = torch.cat(split_images, dim=0) self.log_images(rgb={"Input": images[0, ...]}, depth={"GT": depths_gt[0], "PredictedMono": pred}, prefix="Train", min_depth=DATASETS_CONFIG[dataset]['min_depth'], max_depth=DATASETS_CONFIG[dataset]['max_depth']) return losses @torch.no_grad() def eval_infer(self, x, image_raw, bboxs=None, crop_area=None, dataset='u4k', bbox_raw=None): m = self.model.module if self.config.multigpu else self.model if dataset == 'u4k': base_h = 540 base_w = 960 elif dataset == 'gta': base_h = 270 base_w = 480 elif dataset == 'nyu': base_h = 120 * 2 base_w = 160 * 2 else: raise NotImplementedError if dataset == 'nyu': if self.sec_stage: images_crops = torch.split(x, 3, dim=1) bboxs_list = torch.split(bboxs, 1, dim=1) crop_areas = torch.split(crop_area, 1, dim=1) pred_depth_crops = [] for i, (img, bbox, crop_area) in enumerate(zip(images_crops, bboxs_list, crop_areas)): with amp.autocast(enabled=self.config.use_amp): if i == 0: out_dict = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None) # whole_depth_pred = out_dict['coarse_depth_pred'] pred_depth_crop = out_dict['metric_depth'] else: pred_depth_crop = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None)['metric_depth'] pred_depth_crop = nn.functional.interpolate( pred_depth_crop, (base_h, base_w), mode='bilinear', align_corners=True) pred_depth_crops.append(pred_depth_crop) x_start, y_start = [0, base_h], [0, base_w] pred_depth = torch.zeros((base_h*2, base_w*2)).cuda() inner_idx = 0 for ii, x in enumerate(x_start): for jj, y in enumerate(y_start): if self.use_blur: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() # do not care about boundry during validation else: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() inner_idx += 1 pred_depth = pred_depth.squeeze(dim=0) else: with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] else: if self.sec_stage: images_crops = torch.split(x, 3, dim=1) bboxs_list = torch.split(bboxs, 1, dim=1) crop_areas = torch.split(crop_area, 1, dim=1) pred_depth_crops = [] for i, (img, bbox, crop_area) in enumerate(zip(images_crops, bboxs_list, crop_areas)): with amp.autocast(enabled=self.config.use_amp): if i == 0: out_dict = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None) # whole_depth_pred = out_dict['coarse_depth_pred'] pred_depth_crop = out_dict['metric_depth'] else: pred_depth_crop = m(img, mode='eval', image_raw=image_raw, bbox=bbox[0], crop_area=crop_area, bbox_raw=bbox_raw[:, i, :] if bbox_raw is not None else None)['metric_depth'] pred_depth_crop = nn.functional.interpolate( pred_depth_crop, (base_h, base_w), mode='bilinear', align_corners=True) pred_depth_crops.append(pred_depth_crop) x_start, y_start = [0, base_h], [0, base_w] pred_depth = torch.zeros((base_h*2, base_w*2)).cuda() inner_idx = 0 for ii, x in enumerate(x_start): for jj, y in enumerate(y_start): if self.use_blur: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() # do not care about boundry during validation else: pred_depth[x: x+base_h, y: y+base_w] = pred_depth_crops[inner_idx].squeeze() inner_idx += 1 pred_depth = pred_depth.squeeze(dim=0) else: with amp.autocast(enabled=self.config.use_amp): pred_depth = m(x, mode='eval', image_raw=image_raw)['metric_depth'] return pred_depth @torch.no_grad() def crop_aware_infer(self, x, image_raw): # if we are not avoiding the black border, we can just use the normal inference if not self.config.get("avoid_boundary", False): return self.eval_infer(x) # otherwise, we need to crop the image to avoid the black border # For now, this may be a bit slow due to converting to numpy and back # We assume no normalization is done on the input image # get the black border assert x.shape[0] == 1, "Only batch size 1 is supported for now" x_pil = transforms.ToPILImage()(x[0].cpu()) x_np = np.array(x_pil, dtype=np.uint8)
black_border_params = get_black_border(x_np)
9
2023-12-04 08:43:15+00:00
24k
baaivision/GeoDream
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" }, { "identifier": "GeodreamGeometryVolume", "path": "threestudio/models/geometry/geodream_geometry_volume.py", "snippet": "class GeodreamGeometryVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n init_volume_path: str = \"con_volume_lod0.pth\"\n one2345_weight: str = \"pretrain.pth\"\n sdf_network_grad: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n \n \n self.sdf_layers = SdfLayer()\n self.deviation_network = SingleVarianceNetwork(self.cfg.one2345_weight)\n\n # sdf_layers weight\n sdf_layers_weight = torch.load(self.cfg.one2345_weight)['sdf_network_lod0']\n selected_state_dict = {}\n prefix = 'sdf_layer'\n for key, value in sdf_layers_weight.items():\n if key.startswith(prefix):\n selected_state_dict[key[10:]] = value# key need remove sdf_layer prefix\n self.sdf_layers.load_state_dict(selected_state_dict)\n print(\"sdf_layers is loading weight at \" + self.cfg.one2345_weight)\n \n # sdf_layers freeze \n if self.cfg.sdf_network_grad:\n print(\"sdf_layers network is training\")\n else:\n for p in self.sdf_layers.parameters():\n p.requires_grad_(False)\n print(\"sdf_layers network is freezeing\")\n\n # volume weight\n volume_weight = torch.load(self.cfg.init_volume_path)\n\n self.volume = nn.Parameter(volume_weight, requires_grad=True)\n print(\"volume network is loading weight at \" + self.cfg.init_volume_path)\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], viewdirs, dists, output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n \n sdf, feature_vector = self.sdf(points.view(-1, self.cfg.n_input_dims))\n\n output = {\n \"density\": sdf,\n }\n \n g = self.gradient(points.view(-1, self.cfg.n_input_dims))\n alphas = self.get_alpha(points.view(-1, self.cfg.n_input_dims), viewdirs, dists, feature_vector, sdf, g)\n output.update({\"ALPHA\": alphas})\n\n \n points_norm = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n \n enc = self.encoding(points_norm.view(-1, self.cfg.n_input_dims))\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n \n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n density, _ = self.sdf(points.view(-1, self.cfg.n_input_dims))\n density = density.reshape(*points.shape[:-1], 1)\n return density\n \n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n sdf, _ = self.sdf(points.view(-1, self.cfg.n_input_dims))\n sdf = sdf.reshape(*points.shape[:-1], 1)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n \n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"GeodreamGeometryVolume\":\n if isinstance(other, GeodreamGeometryVolume):\n instance = GeodreamGeometryVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {GeodreamGeometryVolume.__name__} from {other.__class__.__name__}\"\n )\n \n def forward_sdf(self, pts):\n sdf, _ = self.sdf(pts)\n return sdf\n \n def sdf(self, pts, lod=0):\n conditional_volume = self.volume\n num_pts = pts.shape[0]\n device = pts.device\n pts_ = pts.clone()\n pts = pts.view(1, 1, 1, num_pts, 3) # - should be in range (-1, 1)\n\n pts = torch.flip(pts, dims=[-1])\n sampled_feature = grid_sample_3d(conditional_volume, pts) # [1, c, 1, 1, num_pts]\n sampled_feature = sampled_feature.view(-1, num_pts).permute(1, 0).contiguous().to(device)\n\n sdf_pts = self.sdf_layers(pts_, sampled_feature)\n\n return sdf_pts[:, :1], sdf_pts[:, 1:]\n \n def get_alpha(self, ray_samples, rays_d, dists, feature_vector, sdf=None, gradients=None):\n \"\"\"compute alpha from sdf as in NeuS\"\"\"\n inv_variance = self.deviation_network(feature_vector)[:, :1].clip(1e-6, 1e6) # Single parameter\n\n \n #gradients = torch.ones_like(rays_d, requires_grad=False, device=rays_d.device)\n true_dot_val = (rays_d * gradients).sum(-1, keepdim=True) # * calculate\n alpha_inter_ratio = 0.0 \n iter_cos = -(F.relu(-true_dot_val * 0.5 + 0.5) * (1.0 - alpha_inter_ratio) + F.relu(\n -true_dot_val) * alpha_inter_ratio) # always non-positive\n\n true_estimate_sdf_half_next = sdf + iter_cos.clip(-10.0, 10.0) * dists.reshape(-1, 1) * 0.5\n true_estimate_sdf_half_prev = sdf - iter_cos.clip(-10.0, 10.0) * dists.reshape(-1, 1) * 0.5\n\n prev_cdf = torch.sigmoid(true_estimate_sdf_half_prev * inv_variance)\n next_cdf = torch.sigmoid(true_estimate_sdf_half_next * inv_variance)\n\n p = prev_cdf - next_cdf\n c = prev_cdf\n\n alpha = ((p + 1e-5) / (c + 1e-5)).clip(0.0, 1.0)\n\n return alpha\n \n def gradient(self, x):\n \n x.requires_grad_(True)\n with torch.enable_grad():\n sdf, _ = self.sdf(x)\n y = sdf\n\n d_output = torch.ones_like(y, requires_grad=False, device=y.device)\n # ! Distributed Data Parallel doesn’t work with torch.autograd.grad()\n # ! (i.e. it will only work if gradients are to be accumulated in .grad attributes of parameters).\n gradients = torch.autograd.grad(\n outputs=y,\n inputs=x,\n grad_outputs=d_output,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n return gradients" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.models.geometry.geodream_geometry_volume import GeodreamGeometryVolume from threestudio.utils.typing import * from pysdf import SDF
16,631
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, )
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, )
self.mesh: Optional[Mesh] = None
6
2023-12-01 01:59:42+00:00
24k
horseee/DeepCache
DeepCache/sd/pipeline_text_to_video_zero.py
[ { "identifier": "UNet2DConditionModel", "path": "DeepCache/sd/unet_2d_condition.py", "snippet": "class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):\n r\"\"\"\n A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample\n shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented\n for all models (such as downloading or saving).\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.\n center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.\n flip_sin_to_cos (`bool`, *optional*, defaults to `False`):\n Whether to flip the sin to cos in the time embedding.\n freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n mid_block_type (`str`, *optional*, defaults to `\"UNetMidBlock2DCrossAttn\"`):\n Block type for middle of UNet, it can be either `UNetMidBlock2DCrossAttn` or\n `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\")`):\n The tuple of upsample blocks to use.\n only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):\n Whether to include self-attention in the basic transformer blocks, see\n [`~models.attention.BasicTransformerBlock`].\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, normalization and activation layers is skipped in post-processing.\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):\n The dimension of the cross attention features.\n transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1):\n The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for\n [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],\n [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].\n encoder_hid_dim (`int`, *optional*, defaults to None):\n If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`\n dimension to `cross_attention_dim`.\n encoder_hid_dim_type (`str`, *optional*, defaults to `None`):\n If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text\n embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n num_attention_heads (`int`, *optional*):\n The number of attention heads. If not defined, defaults to `attention_head_dim`\n resnet_time_scale_shift (`str`, *optional*, defaults to `\"default\"`): Time scale shift config\n for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.\n class_embed_type (`str`, *optional*, defaults to `None`):\n The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,\n `\"timestep\"`, `\"identity\"`, `\"projection\"`, or `\"simple_projection\"`.\n addition_embed_type (`str`, *optional*, defaults to `None`):\n Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or\n \"text\". \"text\" will use the `TextTimeEmbedding` layer.\n addition_time_embed_dim: (`int`, *optional*, defaults to `None`):\n Dimension for the timestep embeddings.\n num_class_embeds (`int`, *optional*, defaults to `None`):\n Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing\n class conditioning with `class_embed_type` equal to `None`.\n time_embedding_type (`str`, *optional*, defaults to `positional`):\n The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.\n time_embedding_dim (`int`, *optional*, defaults to `None`):\n An optional override for the dimension of the projected time embedding.\n time_embedding_act_fn (`str`, *optional*, defaults to `None`):\n Optional activation function to use only once on the time embeddings before they are passed to the rest of\n the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.\n timestep_post_act (`str`, *optional*, defaults to `None`):\n The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.\n time_cond_proj_dim (`int`, *optional*, defaults to `None`):\n The dimension of `cond_proj` layer in the timestep embedding.\n conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.\n conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.\n projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when\n `class_embed_type=\"projection\"`. Required when `class_embed_type=\"projection\"`.\n class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time\n embeddings with the class embeddings.\n mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):\n Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If\n `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the\n `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`\n otherwise.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"DownBlock2D\",\n ),\n mid_block_type: Optional[str] = \"UNetMidBlock2DCrossAttn\",\n up_block_types: Tuple[str] = (\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\"),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: Union[int, Tuple[int]] = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n dropout: float = 0.0,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: Union[int, Tuple[int]] = 1280,\n transformer_layers_per_block: Union[int, Tuple[int]] = 1,\n encoder_hid_dim: Optional[int] = None,\n encoder_hid_dim_type: Optional[str] = None,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n num_attention_heads: Optional[Union[int, Tuple[int]]] = None,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n addition_embed_type: Optional[str] = None,\n addition_time_embed_dim: Optional[int] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n resnet_skip_time_act: bool = False,\n resnet_out_scale_factor: int = 1.0,\n time_embedding_type: str = \"positional\",\n time_embedding_dim: Optional[int] = None,\n time_embedding_act_fn: Optional[str] = None,\n timestep_post_act: Optional[str] = None,\n time_cond_proj_dim: Optional[int] = None,\n conv_in_kernel: int = 3,\n conv_out_kernel: int = 3,\n projection_class_embeddings_input_dim: Optional[int] = None,\n attention_type: str = \"default\",\n class_embeddings_concat: bool = False,\n mid_block_only_cross_attention: Optional[bool] = None,\n cross_attention_norm: Optional[str] = None,\n addition_embed_type_num_heads=64,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n\n if num_attention_heads is not None:\n raise ValueError(\n \"At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.\"\n )\n\n # If `num_attention_heads` is not defined (which is the case for most models)\n # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.\n # The reason for this behavior is to correct for incorrectly named variables that were introduced\n # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131\n # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking\n # which is why we correct for the naming here.\n num_attention_heads = num_attention_heads or attention_head_dim\n\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n if time_embedding_type == \"fourier\":\n time_embed_dim = time_embedding_dim or block_out_channels[0] * 2\n if time_embed_dim % 2 != 0:\n raise ValueError(f\"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.\")\n self.time_proj = GaussianFourierProjection(\n time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos\n )\n timestep_input_dim = time_embed_dim\n elif time_embedding_type == \"positional\":\n time_embed_dim = time_embedding_dim or block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n else:\n raise ValueError(\n f\"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`.\"\n )\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n post_act_fn=timestep_post_act,\n cond_proj_dim=time_cond_proj_dim,\n )\n\n if encoder_hid_dim_type is None and encoder_hid_dim is not None:\n encoder_hid_dim_type = \"text_proj\"\n self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)\n logger.info(\"encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.\")\n\n if encoder_hid_dim is None and encoder_hid_dim_type is not None:\n raise ValueError(\n f\"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}.\"\n )\n\n if encoder_hid_dim_type == \"text_proj\":\n self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)\n elif encoder_hid_dim_type == \"text_image_proj\":\n # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much\n # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use\n # case when `addition_embed_type == \"text_image_proj\"` (Kadinsky 2.1)`\n self.encoder_hid_proj = TextImageProjection(\n text_embed_dim=encoder_hid_dim,\n image_embed_dim=cross_attention_dim,\n cross_attention_dim=cross_attention_dim,\n )\n elif encoder_hid_dim_type == \"image_proj\":\n # Kandinsky 2.2\n self.encoder_hid_proj = ImageProjection(\n image_embed_dim=encoder_hid_dim,\n cross_attention_dim=cross_attention_dim,\n )\n elif encoder_hid_dim_type is not None:\n raise ValueError(\n f\"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'.\"\n )\n else:\n self.encoder_hid_proj = None\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n elif class_embed_type == \"simple_projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n if addition_embed_type == \"text\":\n if encoder_hid_dim is not None:\n text_time_embedding_from_dim = encoder_hid_dim\n else:\n text_time_embedding_from_dim = cross_attention_dim\n\n self.add_embedding = TextTimeEmbedding(\n text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads\n )\n elif addition_embed_type == \"text_image\":\n # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much\n # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use\n # case when `addition_embed_type == \"text_image\"` (Kadinsky 2.1)`\n self.add_embedding = TextImageTimeEmbedding(\n text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim\n )\n elif addition_embed_type == \"text_time\":\n self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)\n self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n elif addition_embed_type == \"image\":\n # Kandinsky 2.2\n self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)\n elif addition_embed_type == \"image_hint\":\n # Kandinsky 2.2 ControlNet\n self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)\n elif addition_embed_type is not None:\n raise ValueError(f\"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.\")\n\n if time_embedding_act_fn is None:\n self.time_embed_act = None\n else:\n self.time_embed_act = get_activation(time_embedding_act_fn)\n\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n if mid_block_only_cross_attention is None:\n mid_block_only_cross_attention = only_cross_attention\n\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if mid_block_only_cross_attention is None:\n mid_block_only_cross_attention = False\n\n if isinstance(num_attention_heads, int):\n num_attention_heads = (num_attention_heads,) * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n if isinstance(cross_attention_dim, int):\n cross_attention_dim = (cross_attention_dim,) * len(down_block_types)\n\n if isinstance(layers_per_block, int):\n layers_per_block = [layers_per_block] * len(down_block_types)\n\n if isinstance(transformer_layers_per_block, int):\n transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)\n\n if class_embeddings_concat:\n # The time embeddings are concatenated with the class embeddings. The dimension of the\n # time embeddings passed to the down, middle, and up blocks is twice the dimension of the\n # regular time embeddings\n blocks_time_embed_dim = time_embed_dim * 2\n else:\n blocks_time_embed_dim = time_embed_dim\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block[i],\n transformer_layers_per_block=transformer_layers_per_block[i],\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=blocks_time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim[i],\n num_attention_heads=num_attention_heads[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n attention_type=attention_type,\n resnet_skip_time_act=resnet_skip_time_act,\n resnet_out_scale_factor=resnet_out_scale_factor,\n cross_attention_norm=cross_attention_norm,\n attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,\n dropout=dropout,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock2DCrossAttn\":\n self.mid_block = UNetMidBlock2DCrossAttn(\n transformer_layers_per_block=transformer_layers_per_block[-1],\n in_channels=block_out_channels[-1],\n temb_channels=blocks_time_embed_dim,\n dropout=dropout,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim[-1],\n num_attention_heads=num_attention_heads[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n attention_type=attention_type,\n )\n elif mid_block_type == \"UNetMidBlock2DSimpleCrossAttn\":\n self.mid_block = UNetMidBlock2DSimpleCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=blocks_time_embed_dim,\n dropout=dropout,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim[-1],\n attention_head_dim=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n resnet_time_scale_shift=resnet_time_scale_shift,\n skip_time_act=resnet_skip_time_act,\n only_cross_attention=mid_block_only_cross_attention,\n cross_attention_norm=cross_attention_norm,\n )\n elif mid_block_type is None:\n self.mid_block = None\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_num_attention_heads = list(reversed(num_attention_heads))\n reversed_layers_per_block = list(reversed(layers_per_block))\n reversed_cross_attention_dim = list(reversed(cross_attention_dim))\n reversed_transformer_layers_per_block = list(reversed(transformer_layers_per_block))\n only_cross_attention = list(reversed(only_cross_attention))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=reversed_layers_per_block[i] + 1,\n transformer_layers_per_block=reversed_transformer_layers_per_block[i],\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=blocks_time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=reversed_cross_attention_dim[i],\n num_attention_heads=reversed_num_attention_heads[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n attention_type=attention_type,\n resnet_skip_time_act=resnet_skip_time_act,\n resnet_out_scale_factor=resnet_out_scale_factor,\n cross_attention_norm=cross_attention_norm,\n attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,\n dropout=dropout,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n\n self.conv_act = get_activation(act_fn)\n\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n if attention_type in [\"gated\", \"gated-text-image\"]:\n positive_len = 768\n if isinstance(cross_attention_dim, int):\n positive_len = cross_attention_dim\n elif isinstance(cross_attention_dim, tuple) or isinstance(cross_attention_dim, list):\n positive_len = cross_attention_dim[0]\n\n feature_type = \"text-only\" if attention_type == \"gated\" else \"text-image\"\n self.position_net = PositionNet(\n positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type\n )\n\n @property\n def attn_processors(self) -> Dict[str, AttentionProcessor]:\n r\"\"\"\n Returns:\n `dict` of attention processors: A dictionary containing all attention processors used in the model with\n indexed by its weight name.\n \"\"\"\n # set recursively\n processors = {}\n\n def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n if hasattr(module, \"get_processor\"):\n processors[f\"{name}.processor\"] = module.get_processor(return_deprecated_lora=True)\n\n for sub_name, child in module.named_children():\n fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n return processors\n\n for name, module in self.named_children():\n fn_recursive_add_processors(name, module, processors)\n\n return processors\n\n def set_attn_processor(\n self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False\n ):\n r\"\"\"\n Sets the attention processor to use to compute attention.\n\n Parameters:\n processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):\n The instantiated processor class or a dictionary of processor classes that will be set as the processor\n for **all** `Attention` layers.\n\n If `processor` is a dict, the key needs to define the path to the corresponding cross attention\n processor. This is strongly recommended when setting trainable attention processors.\n\n \"\"\"\n count = len(self.attn_processors.keys())\n\n if isinstance(processor, dict) and len(processor) != count:\n raise ValueError(\n f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n )\n\n def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n if hasattr(module, \"set_processor\"):\n if not isinstance(processor, dict):\n module.set_processor(processor, _remove_lora=_remove_lora)\n else:\n module.set_processor(processor.pop(f\"{name}.processor\"), _remove_lora=_remove_lora)\n\n for sub_name, child in module.named_children():\n fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n for name, module in self.named_children():\n fn_recursive_attn_processor(name, module, processor)\n\n def set_default_attn_processor(self):\n \"\"\"\n Disables custom attention processors and sets the default attention implementation.\n \"\"\"\n if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):\n processor = AttnAddedKVProcessor()\n elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):\n processor = AttnProcessor()\n else:\n raise ValueError(\n f\"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}\"\n )\n\n self.set_attn_processor(processor, _remove_lora=True)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module splits the input tensor in slices to compute attention in\n several steps. This is useful for saving some memory in exchange for a small decrease in speed.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, input to the attention heads is halved, so attention is computed in two steps. If\n `\"max\"`, maximum amount of memory is saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_sliceable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_sliceable_dims(module)\n\n num_sliceable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_sliceable_layers * [1]\n\n slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if hasattr(module, \"gradient_checkpointing\"):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.Tensor] = None,\n quick_replicate: bool = False,\n replicate_prv_feature: Optional[List[torch.Tensor]] = None,\n cache_layer_id: Optional[int] = None,\n cache_block_id: Optional[int] = None,\n return_dict: bool = True,\n ) -> Union[UNet2DConditionOutput, Tuple]:\n r\"\"\"\n The [`UNet2DConditionModel`] forward method.\n\n Args:\n sample (`torch.FloatTensor`):\n The noisy input tensor with the following shape `(batch, channel, height, width)`.\n timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.\n encoder_hidden_states (`torch.FloatTensor`):\n The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.\n encoder_attention_mask (`torch.Tensor`):\n A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If\n `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,\n which adds large negative values to the attention scores corresponding to \"discard\" tokens.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain\n tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].\n added_cond_kwargs: (`dict`, *optional*):\n A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that\n are passed along to the UNet blocks.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise\n a `tuple` is returned where the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # ensure attention_mask is a bias, and give it a singleton query_tokens dimension\n # expects mask of shape:\n # [batch, key_tokens]\n # adds singleton query_tokens dimension:\n # [batch, 1, key_tokens]\n # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:\n # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)\n # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)\n if attention_mask is not None:\n # assume that mask is expressed as:\n # (1 = keep, 0 = discard)\n # convert mask into a bias that can be added to attention scores:\n # (keep = +0, discard = -10000.0)\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # convert encoder_attention_mask to a bias the same way we do for attention_mask\n if encoder_attention_mask is not None:\n encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0\n encoder_attention_mask = encoder_attention_mask.unsqueeze(1)\n\n # 0. center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # `Timesteps` does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=sample.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n aug_emb = None\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n # `Timesteps` does not contain any weights and will always return f32 tensors\n # there might be better ways to encapsulate this.\n class_labels = class_labels.to(dtype=sample.dtype)\n\n class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)\n\n if self.config.class_embeddings_concat:\n emb = torch.cat([emb, class_emb], dim=-1)\n else:\n emb = emb + class_emb\n\n if self.config.addition_embed_type == \"text\":\n aug_emb = self.add_embedding(encoder_hidden_states)\n elif self.config.addition_embed_type == \"text_image\":\n # Kandinsky 2.1 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`\"\n )\n\n image_embs = added_cond_kwargs.get(\"image_embeds\")\n text_embs = added_cond_kwargs.get(\"text_embeds\", encoder_hidden_states)\n aug_emb = self.add_embedding(text_embs, image_embs)\n elif self.config.addition_embed_type == \"text_time\":\n # SDXL - style\n if \"text_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`\"\n )\n text_embeds = added_cond_kwargs.get(\"text_embeds\")\n if \"time_ids\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`\"\n )\n time_ids = added_cond_kwargs.get(\"time_ids\")\n time_embeds = self.add_time_proj(time_ids.flatten())\n time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))\n\n add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)\n add_embeds = add_embeds.to(emb.dtype)\n aug_emb = self.add_embedding(add_embeds)\n elif self.config.addition_embed_type == \"image\":\n # Kandinsky 2.2 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`\"\n )\n image_embs = added_cond_kwargs.get(\"image_embeds\")\n aug_emb = self.add_embedding(image_embs)\n elif self.config.addition_embed_type == \"image_hint\":\n # Kandinsky 2.2 - style\n if \"image_embeds\" not in added_cond_kwargs or \"hint\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`\"\n )\n image_embs = added_cond_kwargs.get(\"image_embeds\")\n hint = added_cond_kwargs.get(\"hint\")\n aug_emb, hint = self.add_embedding(image_embs, hint)\n sample = torch.cat([sample, hint], dim=1)\n\n emb = emb + aug_emb if aug_emb is not None else emb\n\n if self.time_embed_act is not None:\n emb = self.time_embed_act(emb)\n\n if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == \"text_proj\":\n encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)\n elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == \"text_image_proj\":\n # Kadinsky 2.1 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`\"\n )\n\n image_embeds = added_cond_kwargs.get(\"image_embeds\")\n encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)\n elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == \"image_proj\":\n # Kandinsky 2.2 - style\n if \"image_embeds\" not in added_cond_kwargs:\n raise ValueError(\n f\"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`\"\n )\n image_embeds = added_cond_kwargs.get(\"image_embeds\")\n encoder_hidden_states = self.encoder_hid_proj(image_embeds)\n # 2. pre-process\n sample = self.conv_in(sample)\n\n # 2.5 GLIGEN position net\n if cross_attention_kwargs is not None and cross_attention_kwargs.get(\"gligen\", None) is not None:\n cross_attention_kwargs = cross_attention_kwargs.copy()\n gligen_args = cross_attention_kwargs.pop(\"gligen\")\n cross_attention_kwargs[\"gligen\"] = {\"objs\": self.position_net(**gligen_args)}\n\n # 3. down\n lora_scale = cross_attention_kwargs.get(\"scale\", 1.0) if cross_attention_kwargs is not None else 1.0\n\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n is_adapter = mid_block_additional_residual is None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n if quick_replicate and replicate_prv_feature is not None:\n # Down\n for i, downsample_block in enumerate(self.down_blocks):\n if i > cache_layer_id:\n break\n\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n # For t2i-adapter CrossAttnDownBlock2D\n additional_residuals = {}\n if is_adapter and len(down_block_additional_residuals) > 0:\n additional_residuals[\"additional_residuals\"] = down_block_additional_residuals.pop(0)\n\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n cross_attention_kwargs=cross_attention_kwargs,\n encoder_attention_mask=encoder_attention_mask,\n exist_block_number=cache_block_id if i == cache_layer_id else None,\n **additional_residuals,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, scale=lora_scale)\n\n if is_adapter and len(down_block_additional_residuals) > 0:\n sample += down_block_additional_residuals.pop(0)\n\n down_block_res_samples += res_samples\n\n # No Middle\n # Up\n #print(\"down_block_res_samples:\", [res_sample.shape for res_sample in down_block_res_samples])\n sample = replicate_prv_feature\n #down_block_res_samples = down_block_res_samples[:-1]\n if cache_block_id == len(self.down_blocks[cache_layer_id].attentions) :\n cache_block_id = 0\n cache_layer_id += 1\n else:\n cache_block_id += 1\n\n for i, upsample_block in enumerate(self.up_blocks):\n if i < len(self.up_blocks) - 1 - cache_layer_id:\n continue\n\n if i == len(self.up_blocks) - 1 - cache_layer_id:\n trunc_upsample_block = cache_block_id + 1\n else:\n trunc_upsample_block = len(upsample_block.resnets)\n\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-trunc_upsample_block:]\n down_block_res_samples = down_block_res_samples[: -trunc_upsample_block]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n #print(sample.shape, [res_sample.shape for res_sample in res_samples])\n sample, _ = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n enter_block_number=cache_block_id if i == len(self.up_blocks) - 1 - cache_layer_id else None,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n scale=lora_scale,\n )\n \n prv_f = replicate_prv_feature\n else:\n for i, downsample_block in enumerate(self.down_blocks):\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n # For t2i-adapter CrossAttnDownBlock2D\n additional_residuals = {}\n if is_adapter and len(down_block_additional_residuals) > 0:\n additional_residuals[\"additional_residuals\"] = down_block_additional_residuals.pop(0)\n\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n cross_attention_kwargs=cross_attention_kwargs,\n encoder_attention_mask=encoder_attention_mask,\n **additional_residuals,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, scale=lora_scale)\n\n if is_adapter and len(down_block_additional_residuals) > 0:\n sample += down_block_additional_residuals.pop(0)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n cross_attention_kwargs=cross_attention_kwargs,\n encoder_attention_mask=encoder_attention_mask,\n )\n # To support T2I-Adapter-XL\n if (\n is_adapter\n and len(down_block_additional_residuals) > 0\n and sample.shape == down_block_additional_residuals[0].shape\n ):\n sample += down_block_additional_residuals.pop(0)\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n if cache_block_id is not None:\n if cache_block_id == len(self.down_blocks[cache_layer_id].attentions) :\n cache_block_id = 0\n cache_layer_id += 1\n else:\n cache_block_id += 1\n #print(\"down_block_res_samples:\", [res_sample.shape for res_sample in down_block_res_samples])\n #print(cache_block_id, cache_layer_id)\n prv_f = None\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n #print(sample.shape, [res_sample.shape for res_sample in res_samples])\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample, current_record_f = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n scale=lora_scale,\n )\n current_record_f = None\n\n #print(\"Append prv_feature with shape:\", sample.shape)\n if cache_layer_id is not None and current_record_f is not None and i == len(self.up_blocks) - cache_layer_id - 1:\n prv_f = current_record_f[-cache_block_id-1]\n \n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n if not return_dict:\n return (sample, prv_f,)\n \n return UNet2DConditionOutput(sample=sample)" }, { "identifier": "StableDiffusionPipeline", "path": "DeepCache/sd/pipeline_stable_diffusion.py", "snippet": "EXAMPLE_DOC_STRING = \"\"\"\n Examples:\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionPipeline\n\n >>> pipe = StableDiffusionPipeline.from_pretrained(\"runwayml/stable-diffusion-v1-5\", torch_dtype=torch.float16)\n >>> pipe = pipe.to(\"cuda\")\n\n >>> prompt = \"a photo of an astronaut riding a horse on mars\"\n >>> image = pipe(prompt).images[0]\n ```\n\"\"\"\ndef sample_gaussian_centered(n=1000, sample_size=100, std_dev=100):\ndef sample_from_quad(total_numbers, n_samples, pow=1.2):\ndef sample_from_quad_center(total_numbers, n_samples, center, pow=1.2):\ndef rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n scheduler: KarrasDiffusionSchedulers,\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPImageProcessor,\n requires_safety_checker: bool = True,\n ):\n def enable_vae_slicing(self):\n def disable_vae_slicing(self):\n def enable_vae_tiling(self):\n def disable_vae_tiling(self):\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt=None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n lora_scale: Optional[float] = None,\n ):\n def encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt=None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n lora_scale: Optional[float] = None,\n ):\n def run_safety_checker(self, image, device, dtype):\n def decode_latents(self, latents):\n def prepare_extra_step_kwargs(self, generator, eta):\n def check_inputs(\n self,\n prompt,\n height,\n width,\n callback_steps,\n negative_prompt=None,\n prompt_embeds=None,\n negative_prompt_embeds=None,\n ):\n def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n guidance_rescale: float = 0.0,\n cache_interval: int = 1,\n cache_layer_id: int = None,\n cache_block_id: int = None,\n uniform: bool = True,\n pow: float = None,\n center: int = None,\n output_all_sequence: bool = False,\n ):\nclass StableDiffusionPipeline(DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin):" } ]
import copy import numpy as np import PIL.Image import torch import torch.nn.functional as F from dataclasses import dataclass from typing import Callable, List, Optional, Union from torch.nn.functional import grid_sample from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer from diffusers.models import AutoencoderKL from .unet_2d_condition import UNet2DConditionModel from .pipeline_stable_diffusion import StableDiffusionPipeline, StableDiffusionSafetyChecker from diffusers.schedulers import KarrasDiffusionSchedulers from diffusers.utils import BaseOutput from diffusers.utils.torch_utils import randn_tensor
17,099
images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents class TextToVideoZeroPipeline(StableDiffusionPipeline): r""" Pipeline for zero-shot text-to-video generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet2DConditionModel`]): A [`UNet3DConditionModel`] to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`CLIPImageProcessor`]): A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
def sample_gaussian_centered(n=1000, sample_size=100, std_dev=100): samples = [] while len(samples) < sample_size: # Sample from a Gaussian centered at n/2 sample = int(np.random.normal(loc=n/2, scale=std_dev)) # Check if the sample is in bounds if 1 <= sample < n and sample not in samples: samples.append(sample) return samples def sample_from_quad(total_numbers, n_samples, pow=1.2): while pow > 1: # Generate linearly spaced values between 0 and a max value x_values = np.linspace(0, total_numbers**(1/pow), n_samples+1) # Raise these values to the power of 1.5 to get a non-linear distribution indices = np.unique(np.int32(x_values**pow))[:-1] if len(indices) == n_samples: break pow -=0.02 if pow <= 1: raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.") return indices, pow def sample_from_quad_center(total_numbers, n_samples, center, pow=1.2): while pow > 1: # Generate linearly spaced values between 0 and a max value x_values = np.linspace((-center)**(1/pow), (total_numbers-center)**(1/pow), n_samples+1) indices = [0] + [x+center for x in np.unique(np.int32(x_values**pow))[1:-1]] if len(indices) == n_samples: break pow -=0.02 if pow <= 1: raise ValueError("Cannot find suitable pow. Please adjust n_samples or decrease center.") return indices, pow def rearrange_0(tensor, f): F, C, H, W = tensor.size() tensor = torch.permute(torch.reshape(tensor, (F // f, f, C, H, W)), (0, 2, 1, 3, 4)) return tensor def rearrange_1(tensor): B, C, F, H, W = tensor.size() return torch.reshape(torch.permute(tensor, (0, 2, 1, 3, 4)), (B * F, C, H, W)) def rearrange_3(tensor, f): F, D, C = tensor.size() return torch.reshape(tensor, (F // f, f, D, C)) def rearrange_4(tensor): B, F, D, C = tensor.size() return torch.reshape(tensor, (B * F, D, C)) class CrossFrameAttnProcessor: """ Cross frame attention processor. Each frame attends the first frame. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = hidden_states.shape attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = key.size()[0] // self.batch_size first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) query = attn.head_to_batch_dim(query) key = attn.head_to_batch_dim(key) value = attn.head_to_batch_dim(value) attention_probs = attn.get_attention_scores(query, key, attention_mask) hidden_states = torch.bmm(attention_probs, value) hidden_states = attn.batch_to_head_dim(hidden_states) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states class CrossFrameAttnProcessor2_0: """ Cross frame attention processor with scaled_dot_product attention of Pytorch 2.0. Args: batch_size: The number that represents actual batch size, other than the frames. For example, calling unet with a single prompt and num_images_per_prompt=1, batch_size should be equal to 2, due to classifier-free guidance. """ def __init__(self, batch_size=2): if not hasattr(F, "scaled_dot_product_attention"): raise ImportError("AttnProcessor2_0 requires PyTorch 2.0, to use it, please upgrade PyTorch to 2.0.") self.batch_size = batch_size def __call__(self, attn, hidden_states, encoder_hidden_states=None, attention_mask=None): batch_size, sequence_length, _ = ( hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape ) inner_dim = hidden_states.shape[-1] if attention_mask is not None: attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size) # scaled_dot_product_attention expects attention_mask shape to be # (batch, heads, source_length, target_length) attention_mask = attention_mask.view(batch_size, attn.heads, -1, attention_mask.shape[-1]) query = attn.to_q(hidden_states) is_cross_attention = encoder_hidden_states is not None if encoder_hidden_states is None: encoder_hidden_states = hidden_states elif attn.norm_cross: encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states) key = attn.to_k(encoder_hidden_states) value = attn.to_v(encoder_hidden_states) # Cross Frame Attention if not is_cross_attention: video_length = max(1, key.size()[0] // self.batch_size) first_frame_index = [0] * video_length # rearrange keys to have batch and frames in the 1st and 2nd dims respectively key = rearrange_3(key, video_length) key = key[:, first_frame_index] # rearrange values to have batch and frames in the 1st and 2nd dims respectively value = rearrange_3(value, video_length) value = value[:, first_frame_index] # rearrange back to original shape key = rearrange_4(key) value = rearrange_4(value) head_dim = inner_dim // attn.heads query = query.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) key = key.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) value = value.view(batch_size, -1, attn.heads, head_dim).transpose(1, 2) # the output of sdp = (batch, num_heads, seq_len, head_dim) # TODO: add support for attn.scale when we move to Torch 2.1 hidden_states = F.scaled_dot_product_attention( query, key, value, attn_mask=attention_mask, dropout_p=0.0, is_causal=False ) hidden_states = hidden_states.transpose(1, 2).reshape(batch_size, -1, attn.heads * head_dim) hidden_states = hidden_states.to(query.dtype) # linear proj hidden_states = attn.to_out[0](hidden_states) # dropout hidden_states = attn.to_out[1](hidden_states) return hidden_states @dataclass class TextToVideoPipelineOutput(BaseOutput): r""" Output class for zero-shot text-to-video pipeline. Args: images (`[List[PIL.Image.Image]`, `np.ndarray`]): List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width, num_channels)`. nsfw_content_detected (`[List[bool]]`): List indicating whether the corresponding generated image contains "not-safe-for-work" (nsfw) content or `None` if safety checking could not be performed. """ images: Union[List[PIL.Image.Image], np.ndarray] nsfw_content_detected: Optional[List[bool]] def coords_grid(batch, ht, wd, device): # Adapted from https://github.com/princeton-vl/RAFT/blob/master/core/utils/utils.py coords = torch.meshgrid(torch.arange(ht, device=device), torch.arange(wd, device=device)) coords = torch.stack(coords[::-1], dim=0).float() return coords[None].repeat(batch, 1, 1, 1) def warp_single_latent(latent, reference_flow): """ Warp latent of a single frame with given flow Args: latent: latent code of a single frame reference_flow: flow which to warp the latent with Returns: warped: warped latent """ _, _, H, W = reference_flow.size() _, _, h, w = latent.size() coords0 = coords_grid(1, H, W, device=latent.device).to(latent.dtype) coords_t0 = coords0 + reference_flow coords_t0[:, 0] /= W coords_t0[:, 1] /= H coords_t0 = coords_t0 * 2.0 - 1.0 coords_t0 = F.interpolate(coords_t0, size=(h, w), mode="bilinear") coords_t0 = torch.permute(coords_t0, (0, 2, 3, 1)) warped = grid_sample(latent, coords_t0, mode="nearest", padding_mode="reflection") return warped def create_motion_field(motion_field_strength_x, motion_field_strength_y, frame_ids, device, dtype): """ Create translation motion field Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference device: device dtype: dtype Returns: """ seq_length = len(frame_ids) reference_flow = torch.zeros((seq_length, 2, 512, 512), device=device, dtype=dtype) for fr_idx in range(seq_length): reference_flow[fr_idx, 0, :, :] = motion_field_strength_x * (frame_ids[fr_idx]) reference_flow[fr_idx, 1, :, :] = motion_field_strength_y * (frame_ids[fr_idx]) return reference_flow def create_motion_field_and_warp_latents(motion_field_strength_x, motion_field_strength_y, frame_ids, latents): """ Creates translation motion and warps the latents accordingly Args: motion_field_strength_x: motion strength along x-axis motion_field_strength_y: motion strength along y-axis frame_ids: indexes of the frames the latents of which are being processed. This is needed when we perform chunk-by-chunk inference latents: latent codes of frames Returns: warped_latents: warped latents """ motion_field = create_motion_field( motion_field_strength_x=motion_field_strength_x, motion_field_strength_y=motion_field_strength_y, frame_ids=frame_ids, device=latents.device, dtype=latents.dtype, ) warped_latents = latents.clone().detach() for i in range(len(warped_latents)): warped_latents[i] = warp_single_latent(latents[i][None], motion_field[i][None]) return warped_latents class TextToVideoZeroPipeline(StableDiffusionPipeline): r""" Pipeline for zero-shot text-to-video generation using Stable Diffusion. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Args: vae ([`AutoencoderKL`]): Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. text_encoder ([`CLIPTextModel`]): Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)). tokenizer (`CLIPTokenizer`): A [`~transformers.CLIPTokenizer`] to tokenize text. unet ([`UNet2DConditionModel`]): A [`UNet3DConditionModel`] to denoise the encoded video latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. safety_checker ([`StableDiffusionSafetyChecker`]): Classification module that estimates whether generated images could be considered offensive or harmful. Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details about a model's potential harms. feature_extractor ([`CLIPImageProcessor`]): A [`CLIPImageProcessor`] to extract features from generated images; used as inputs to the `safety_checker`. """ def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer,
unet: UNet2DConditionModel,
0
2023-12-01 10:54:04+00:00
24k
alvinliu0/HumanGaussian
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,020
).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from(
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from(
other: BaseGeometry,
1
2023-11-27 02:39:39+00:00
24k
EricGuo5513/momask-codes
eval_t2m_trans_res.py
[ { "identifier": "MaskTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class MaskTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8,\n num_heads=4, dropout=0.1, clip_dim=512, cond_drop_prob=0.1,\n clip_version=None, opt=None, **kargs):\n super(MaskTransformer, self).__init__()\n print(f'latent_dim: {latent_dim}, ff_size: {ff_size}, nlayers: {num_layers}, nheads: {num_heads}, dropout: {dropout}')\n\n self.code_dim = code_dim\n self.latent_dim = latent_dim\n self.clip_dim = clip_dim\n self.dropout = dropout\n self.opt = opt\n\n self.cond_mode = cond_mode\n self.cond_drop_prob = cond_drop_prob\n\n if self.cond_mode == 'action':\n assert 'num_actions' in kargs\n self.num_actions = kargs.get('num_actions', 1)\n\n '''\n Preparing Networks\n '''\n self.input_process = InputProcess(self.code_dim, self.latent_dim)\n self.position_enc = PositionalEncoding(self.latent_dim, self.dropout)\n\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=num_heads,\n dim_feedforward=ff_size,\n dropout=dropout,\n activation='gelu')\n\n self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer,\n num_layers=num_layers)\n\n self.encode_action = partial(F.one_hot, num_classes=self.num_actions)\n\n # if self.cond_mode != 'no_cond':\n if self.cond_mode == 'text':\n self.cond_emb = nn.Linear(self.clip_dim, self.latent_dim)\n elif self.cond_mode == 'action':\n self.cond_emb = nn.Linear(self.num_actions, self.latent_dim)\n elif self.cond_mode == 'uncond':\n self.cond_emb = nn.Identity()\n else:\n raise KeyError(\"Unsupported condition mode!!!\")\n\n\n _num_tokens = opt.num_tokens + 2 # two dummy tokens, one for masking, one for padding\n self.mask_id = opt.num_tokens\n self.pad_id = opt.num_tokens + 1\n\n self.output_process = OutputProcess_Bert(out_feats=opt.num_tokens, latent_dim=latent_dim)\n\n self.token_emb = nn.Embedding(_num_tokens, self.code_dim)\n\n self.apply(self.__init_weights)\n\n '''\n Preparing frozen weights\n '''\n\n if self.cond_mode == 'text':\n print('Loading CLIP...')\n self.clip_version = clip_version\n self.clip_model = self.load_and_freeze_clip(clip_version)\n\n self.noise_schedule = cosine_schedule\n\n def load_and_freeze_token_emb(self, codebook):\n '''\n :param codebook: (c, d)\n :return:\n '''\n assert self.training, 'Only necessary in training mode'\n c, d = codebook.shape\n self.token_emb.weight = nn.Parameter(torch.cat([codebook, torch.zeros(size=(2, d), device=codebook.device)], dim=0)) #add two dummy tokens, 0 vectors\n self.token_emb.requires_grad_(False)\n # self.token_emb.weight.requires_grad = False\n # self.token_emb_ready = True\n print(\"Token embedding initialized!\")\n\n def __init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def parameters_wo_clip(self):\n return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n\n def load_and_freeze_clip(self, clip_version):\n clip_model, clip_preprocess = clip.load(clip_version, device='cpu',\n jit=False) # Must set jit=False for training\n # Cannot run on cpu\n clip.model.convert_weights(\n clip_model) # Actually this line is unnecessary since clip by default already on float16\n # Date 0707: It's necessary, only unecessary when load directly to gpu. Disable if need to run on cpu\n\n # Freeze CLIP weights\n clip_model.eval()\n for p in clip_model.parameters():\n p.requires_grad = False\n\n return clip_model\n\n def encode_text(self, raw_text):\n device = next(self.parameters()).device\n text = clip.tokenize(raw_text, truncate=True).to(device)\n feat_clip_text = self.clip_model.encode_text(text).float()\n return feat_clip_text\n\n def mask_cond(self, cond, force_mask=False):\n bs, d = cond.shape\n if force_mask:\n return torch.zeros_like(cond)\n elif self.training and self.cond_drop_prob > 0.:\n mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.cond_drop_prob).view(bs, 1)\n return cond * (1. - mask)\n else:\n return cond\n\n def trans_forward(self, motion_ids, cond, padding_mask, force_mask=False):\n '''\n :param motion_ids: (b, seqlen)\n :padding_mask: (b, seqlen), all pad positions are TRUE else FALSE\n :param cond: (b, embed_dim) for text, (b, num_actions) for action\n :param force_mask: boolean\n :return:\n -logits: (b, num_token, seqlen)\n '''\n\n cond = self.mask_cond(cond, force_mask=force_mask)\n\n # print(motion_ids.shape)\n x = self.token_emb(motion_ids)\n # print(x.shape)\n # (b, seqlen, d) -> (seqlen, b, latent_dim)\n x = self.input_process(x)\n\n cond = self.cond_emb(cond).unsqueeze(0) #(1, b, latent_dim)\n\n x = self.position_enc(x)\n xseq = torch.cat([cond, x], dim=0) #(seqlen+1, b, latent_dim)\n\n padding_mask = torch.cat([torch.zeros_like(padding_mask[:, 0:1]), padding_mask], dim=1) #(b, seqlen+1)\n # print(xseq.shape, padding_mask.shape)\n\n # print(padding_mask.shape, xseq.shape)\n\n output = self.seqTransEncoder(xseq, src_key_padding_mask=padding_mask)[1:] #(seqlen, b, e)\n logits = self.output_process(output) #(seqlen, b, e) -> (b, ntoken, seqlen)\n return logits\n\n def forward(self, ids, y, m_lens):\n '''\n :param ids: (b, n)\n :param y: raw text for cond_mode=text, (b, ) for cond_mode=action\n :m_lens: (b,)\n :return:\n '''\n\n bs, ntokens = ids.shape\n device = ids.device\n\n # Positions that are PADDED are ALL FALSE\n non_pad_mask = lengths_to_mask(m_lens, ntokens) #(b, n)\n ids = torch.where(non_pad_mask, ids, self.pad_id)\n\n force_mask = False\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(y)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(y).to(device).float()\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(bs, self.latent_dim).float().to(device)\n force_mask = True\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n\n '''\n Prepare mask\n '''\n rand_time = uniform((bs,), device=device)\n rand_mask_probs = self.noise_schedule(rand_time)\n num_token_masked = (ntokens * rand_mask_probs).round().clamp(min=1)\n\n batch_randperm = torch.rand((bs, ntokens), device=device).argsort(dim=-1)\n # Positions to be MASKED are ALL TRUE\n mask = batch_randperm < num_token_masked.unsqueeze(-1)\n\n # Positions to be MASKED must also be NON-PADDED\n mask &= non_pad_mask\n\n # Note this is our training target, not input\n labels = torch.where(mask, ids, self.mask_id)\n\n x_ids = ids.clone()\n\n # Further Apply Bert Masking Scheme\n # Step 1: 10% replace with an incorrect token\n mask_rid = get_mask_subset_prob(mask, 0.1)\n rand_id = torch.randint_like(x_ids, high=self.opt.num_tokens)\n x_ids = torch.where(mask_rid, rand_id, x_ids)\n # Step 2: 90% x 10% replace with correct token, and 90% x 88% replace with mask token\n mask_mid = get_mask_subset_prob(mask & ~mask_rid, 0.88)\n\n # mask_mid = mask\n\n x_ids = torch.where(mask_mid, self.mask_id, x_ids)\n\n logits = self.trans_forward(x_ids, cond_vector, ~non_pad_mask, force_mask)\n ce_loss, pred_id, acc = cal_performance(logits, labels, ignore_index=self.mask_id)\n\n return ce_loss, pred_id, acc\n\n def forward_with_cond_scale(self,\n motion_ids,\n cond_vector,\n padding_mask,\n cond_scale=3,\n force_mask=False):\n # bs = motion_ids.shape[0]\n # if cond_scale == 1:\n if force_mask:\n return self.trans_forward(motion_ids, cond_vector, padding_mask, force_mask=True)\n\n logits = self.trans_forward(motion_ids, cond_vector, padding_mask)\n if cond_scale == 1:\n return logits\n\n aux_logits = self.trans_forward(motion_ids, cond_vector, padding_mask, force_mask=True)\n\n scaled_logits = aux_logits + (logits - aux_logits) * cond_scale\n return scaled_logits\n\n @torch.no_grad()\n @eval_decorator\n def generate(self,\n conds,\n m_lens,\n timesteps: int,\n cond_scale: int,\n temperature=1,\n topk_filter_thres=0.9,\n gsample=False,\n force_mask=False\n ):\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n\n device = next(self.parameters()).device\n seq_len = max(m_lens)\n batch_size = len(m_lens)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, )\n\n # Start from all tokens being masked\n ids = torch.where(padding_mask, self.pad_id, self.mask_id)\n scores = torch.where(padding_mask, 1e5, 0.)\n starting_temperature = temperature\n\n for timestep, steps_until_x0 in zip(torch.linspace(0, 1, timesteps, device=device), reversed(range(timesteps))):\n # 0 < timestep < 1\n rand_mask_prob = self.noise_schedule(timestep) # Tensor\n\n '''\n Maskout, and cope with variable length\n '''\n # fix: the ratio regarding lengths, instead of seq_len\n num_token_masked = torch.round(rand_mask_prob * m_lens).clamp(min=1) # (b, )\n\n # select num_token_masked tokens with lowest scores to be masked\n sorted_indices = scores.argsort(\n dim=1) # (b, k), sorted_indices[i, j] = the index of j-th lowest element in scores on dim=1\n ranks = sorted_indices.argsort(dim=1) # (b, k), rank[i, j] = the rank (0: lowest) of scores[i, j] on dim=1\n is_mask = (ranks < num_token_masked.unsqueeze(-1))\n ids = torch.where(is_mask, self.mask_id, ids)\n\n '''\n Preparing input\n '''\n # (b, num_token, seqlen)\n logits = self.forward_with_cond_scale(ids, cond_vector=cond_vector,\n padding_mask=padding_mask,\n cond_scale=cond_scale,\n force_mask=force_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # print(logits.shape, self.opt.num_tokens)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n '''\n Update ids\n '''\n # if force_mask:\n temperature = starting_temperature\n # else:\n # temperature = starting_temperature * (steps_until_x0 / timesteps)\n # temperature = max(temperature, 1e-4)\n # print(filtered_logits.shape)\n # temperature is annealed, gradually reducing temperature as well as randomness\n if gsample: # use gumbel_softmax sampling\n # print(\"1111\")\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n else: # use multinomial sampling\n # print(\"2222\")\n probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # print(probs / temperature)\n pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n # print(pred_ids.max(), pred_ids.min())\n # if pred_ids.\n ids = torch.where(is_mask, pred_ids, ids)\n\n '''\n Updating scores\n '''\n probs_without_temperature = logits.softmax(dim=-1) # (b, seqlen, ntoken)\n scores = probs_without_temperature.gather(2, pred_ids.unsqueeze(dim=-1)) # (b, seqlen, 1)\n scores = scores.squeeze(-1) # (b, seqlen)\n\n # We do not want to re-mask the previously kept tokens, or pad tokens\n scores = scores.masked_fill(~is_mask, 1e5)\n\n ids = torch.where(padding_mask, -1, ids)\n # print(\"Final\", ids.max(), ids.min())\n return ids\n\n\n @torch.no_grad()\n @eval_decorator\n def edit(self,\n conds,\n tokens,\n m_lens,\n timesteps: int,\n cond_scale: int,\n temperature=1,\n topk_filter_thres=0.9,\n gsample=False,\n force_mask=False,\n edit_mask=None,\n padding_mask=None,\n ):\n\n assert edit_mask.shape == tokens.shape if edit_mask is not None else True\n device = next(self.parameters()).device\n seq_len = tokens.shape[1]\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(1, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n if padding_mask == None:\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n\n # Start from all tokens being masked\n if edit_mask == None:\n mask_free = True\n ids = torch.where(padding_mask, self.pad_id, tokens)\n edit_mask = torch.ones_like(padding_mask)\n edit_mask = edit_mask & ~padding_mask\n edit_len = edit_mask.sum(dim=-1)\n scores = torch.where(edit_mask, 0., 1e5)\n else:\n mask_free = False\n edit_mask = edit_mask & ~padding_mask\n edit_len = edit_mask.sum(dim=-1)\n ids = torch.where(edit_mask, self.mask_id, tokens)\n scores = torch.where(edit_mask, 0., 1e5)\n starting_temperature = temperature\n\n for timestep, steps_until_x0 in zip(torch.linspace(0, 1, timesteps, device=device), reversed(range(timesteps))):\n # 0 < timestep < 1\n rand_mask_prob = 0.16 if mask_free else self.noise_schedule(timestep) # Tensor\n\n '''\n Maskout, and cope with variable length\n '''\n # fix: the ratio regarding lengths, instead of seq_len\n num_token_masked = torch.round(rand_mask_prob * edit_len).clamp(min=1) # (b, )\n\n # select num_token_masked tokens with lowest scores to be masked\n sorted_indices = scores.argsort(\n dim=1) # (b, k), sorted_indices[i, j] = the index of j-th lowest element in scores on dim=1\n ranks = sorted_indices.argsort(dim=1) # (b, k), rank[i, j] = the rank (0: lowest) of scores[i, j] on dim=1\n is_mask = (ranks < num_token_masked.unsqueeze(-1))\n # is_mask = (torch.rand_like(scores) < 0.8) * ~padding_mask if mask_free else is_mask\n ids = torch.where(is_mask, self.mask_id, ids)\n\n '''\n Preparing input\n '''\n # (b, num_token, seqlen)\n logits = self.forward_with_cond_scale(ids, cond_vector=cond_vector,\n padding_mask=padding_mask,\n cond_scale=cond_scale,\n force_mask=force_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # print(logits.shape, self.opt.num_tokens)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n '''\n Update ids\n '''\n # if force_mask:\n temperature = starting_temperature\n # else:\n # temperature = starting_temperature * (steps_until_x0 / timesteps)\n # temperature = max(temperature, 1e-4)\n # print(filtered_logits.shape)\n # temperature is annealed, gradually reducing temperature as well as randomness\n if gsample: # use gumbel_softmax sampling\n # print(\"1111\")\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n else: # use multinomial sampling\n # print(\"2222\")\n probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # print(probs / temperature)\n pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n # print(pred_ids.max(), pred_ids.min())\n # if pred_ids.\n ids = torch.where(is_mask, pred_ids, ids)\n\n '''\n Updating scores\n '''\n probs_without_temperature = logits.softmax(dim=-1) # (b, seqlen, ntoken)\n scores = probs_without_temperature.gather(2, pred_ids.unsqueeze(dim=-1)) # (b, seqlen, 1)\n scores = scores.squeeze(-1) # (b, seqlen)\n\n # We do not want to re-mask the previously kept tokens, or pad tokens\n scores = scores.masked_fill(~edit_mask, 1e5) if mask_free else scores.masked_fill(~is_mask, 1e5)\n\n ids = torch.where(padding_mask, -1, ids)\n # print(\"Final\", ids.max(), ids.min())\n return ids\n\n @torch.no_grad()\n @eval_decorator\n def edit_beta(self,\n conds,\n conds_og,\n tokens,\n m_lens,\n cond_scale: int,\n force_mask=False,\n ):\n\n device = next(self.parameters()).device\n seq_len = tokens.shape[1]\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n if conds_og is not None:\n cond_vector_og = self.encode_text(conds_og)\n else:\n cond_vector_og = None\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n if conds_og is not None:\n cond_vector_og = self.enc_action(conds_og).to(device)\n else:\n cond_vector_og = None\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n\n # Start from all tokens being masked\n ids = torch.where(padding_mask, self.pad_id, tokens) # Do not mask anything\n\n '''\n Preparing input\n '''\n # (b, num_token, seqlen)\n logits = self.forward_with_cond_scale(ids,\n cond_vector=cond_vector,\n cond_vector_neg=cond_vector_og,\n padding_mask=padding_mask,\n cond_scale=cond_scale,\n force_mask=force_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n\n '''\n Updating scores\n '''\n probs_without_temperature = logits.softmax(dim=-1) # (b, seqlen, ntoken)\n tokens[tokens == -1] = 0 # just to get through an error when index = -1 using gather\n og_tokens_scores = probs_without_temperature.gather(2, tokens.unsqueeze(dim=-1)) # (b, seqlen, 1)\n og_tokens_scores = og_tokens_scores.squeeze(-1) # (b, seqlen)\n\n return og_tokens_scores" }, { "identifier": "ResidualTransformer", "path": "models/mask_transformer/transformer.py", "snippet": "class ResidualTransformer(nn.Module):\n def __init__(self, code_dim, cond_mode, latent_dim=256, ff_size=1024, num_layers=8, cond_drop_prob=0.1,\n num_heads=4, dropout=0.1, clip_dim=512, shared_codebook=False, share_weight=False,\n clip_version=None, opt=None, **kargs):\n super(ResidualTransformer, self).__init__()\n print(f'latent_dim: {latent_dim}, ff_size: {ff_size}, nlayers: {num_layers}, nheads: {num_heads}, dropout: {dropout}')\n\n # assert shared_codebook == True, \"Only support shared codebook right now!\"\n\n self.code_dim = code_dim\n self.latent_dim = latent_dim\n self.clip_dim = clip_dim\n self.dropout = dropout\n self.opt = opt\n\n self.cond_mode = cond_mode\n # self.cond_drop_prob = cond_drop_prob\n\n if self.cond_mode == 'action':\n assert 'num_actions' in kargs\n self.num_actions = kargs.get('num_actions', 1)\n self.cond_drop_prob = cond_drop_prob\n\n '''\n Preparing Networks\n '''\n self.input_process = InputProcess(self.code_dim, self.latent_dim)\n self.position_enc = PositionalEncoding(self.latent_dim, self.dropout)\n\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=num_heads,\n dim_feedforward=ff_size,\n dropout=dropout,\n activation='gelu')\n\n self.seqTransEncoder = nn.TransformerEncoder(seqTransEncoderLayer,\n num_layers=num_layers)\n\n self.encode_quant = partial(F.one_hot, num_classes=self.opt.num_quantizers)\n self.encode_action = partial(F.one_hot, num_classes=self.num_actions)\n\n self.quant_emb = nn.Linear(self.opt.num_quantizers, self.latent_dim)\n # if self.cond_mode != 'no_cond':\n if self.cond_mode == 'text':\n self.cond_emb = nn.Linear(self.clip_dim, self.latent_dim)\n elif self.cond_mode == 'action':\n self.cond_emb = nn.Linear(self.num_actions, self.latent_dim)\n else:\n raise KeyError(\"Unsupported condition mode!!!\")\n\n\n _num_tokens = opt.num_tokens + 1 # one dummy tokens for padding\n self.pad_id = opt.num_tokens\n\n # self.output_process = OutputProcess_Bert(out_feats=opt.num_tokens, latent_dim=latent_dim)\n self.output_process = OutputProcess(out_feats=code_dim, latent_dim=latent_dim)\n\n if shared_codebook:\n token_embed = nn.Parameter(torch.normal(mean=0, std=0.02, size=(_num_tokens, code_dim)))\n self.token_embed_weight = token_embed.expand(opt.num_quantizers-1, _num_tokens, code_dim)\n if share_weight:\n self.output_proj_weight = self.token_embed_weight\n self.output_proj_bias = None\n else:\n output_proj = nn.Parameter(torch.normal(mean=0, std=0.02, size=(_num_tokens, code_dim)))\n output_bias = nn.Parameter(torch.zeros(size=(_num_tokens,)))\n # self.output_proj_bias = 0\n self.output_proj_weight = output_proj.expand(opt.num_quantizers-1, _num_tokens, code_dim)\n self.output_proj_bias = output_bias.expand(opt.num_quantizers-1, _num_tokens)\n\n else:\n if share_weight:\n self.embed_proj_shared_weight = nn.Parameter(torch.normal(mean=0, std=0.02, size=(opt.num_quantizers - 2, _num_tokens, code_dim)))\n self.token_embed_weight_ = nn.Parameter(torch.normal(mean=0, std=0.02, size=(1, _num_tokens, code_dim)))\n self.output_proj_weight_ = nn.Parameter(torch.normal(mean=0, std=0.02, size=(1, _num_tokens, code_dim)))\n self.output_proj_bias = None\n self.registered = False\n else:\n output_proj_weight = torch.normal(mean=0, std=0.02,\n size=(opt.num_quantizers - 1, _num_tokens, code_dim))\n\n self.output_proj_weight = nn.Parameter(output_proj_weight)\n self.output_proj_bias = nn.Parameter(torch.zeros(size=(opt.num_quantizers, _num_tokens)))\n token_embed_weight = torch.normal(mean=0, std=0.02,\n size=(opt.num_quantizers - 1, _num_tokens, code_dim))\n self.token_embed_weight = nn.Parameter(token_embed_weight)\n\n self.apply(self.__init_weights)\n self.shared_codebook = shared_codebook\n self.share_weight = share_weight\n\n if self.cond_mode == 'text':\n print('Loading CLIP...')\n self.clip_version = clip_version\n self.clip_model = self.load_and_freeze_clip(clip_version)\n\n # def\n\n def mask_cond(self, cond, force_mask=False):\n bs, d = cond.shape\n if force_mask:\n return torch.zeros_like(cond)\n elif self.training and self.cond_drop_prob > 0.:\n mask = torch.bernoulli(torch.ones(bs, device=cond.device) * self.cond_drop_prob).view(bs, 1)\n return cond * (1. - mask)\n else:\n return cond\n\n def __init_weights(self, module):\n if isinstance(module, (nn.Linear, nn.Embedding)):\n module.weight.data.normal_(mean=0.0, std=0.02)\n if isinstance(module, nn.Linear) and module.bias is not None:\n module.bias.data.zero_()\n elif isinstance(module, nn.LayerNorm):\n module.bias.data.zero_()\n module.weight.data.fill_(1.0)\n\n def parameters_wo_clip(self):\n return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n\n def load_and_freeze_clip(self, clip_version):\n clip_model, clip_preprocess = clip.load(clip_version, device='cpu',\n jit=False) # Must set jit=False for training\n # Cannot run on cpu\n clip.model.convert_weights(\n clip_model) # Actually this line is unnecessary since clip by default already on float16\n # Date 0707: It's necessary, only unecessary when load directly to gpu. Disable if need to run on cpu\n\n # Freeze CLIP weights\n clip_model.eval()\n for p in clip_model.parameters():\n p.requires_grad = False\n\n return clip_model\n\n def encode_text(self, raw_text):\n device = next(self.parameters()).device\n text = clip.tokenize(raw_text, truncate=True).to(device)\n feat_clip_text = self.clip_model.encode_text(text).float()\n return feat_clip_text\n\n\n def q_schedule(self, bs, low, high):\n noise = uniform((bs,), device=self.opt.device)\n schedule = 1 - cosine_schedule(noise)\n return torch.round(schedule * (high - low)) + low\n\n def process_embed_proj_weight(self):\n if self.share_weight and (not self.shared_codebook):\n # if not self.registered:\n self.output_proj_weight = torch.cat([self.embed_proj_shared_weight, self.output_proj_weight_], dim=0)\n self.token_embed_weight = torch.cat([self.token_embed_weight_, self.embed_proj_shared_weight], dim=0)\n # self.registered = True\n\n def output_project(self, logits, qids):\n '''\n :logits: (bs, code_dim, seqlen)\n :qids: (bs)\n\n :return:\n -logits (bs, ntoken, seqlen)\n '''\n # (num_qlayers-1, num_token, code_dim) -> (bs, ntoken, code_dim)\n output_proj_weight = self.output_proj_weight[qids]\n # (num_qlayers, ntoken) -> (bs, ntoken)\n output_proj_bias = None if self.output_proj_bias is None else self.output_proj_bias[qids]\n\n output = torch.einsum('bnc, bcs->bns', output_proj_weight, logits)\n if output_proj_bias is not None:\n output += output + output_proj_bias.unsqueeze(-1)\n return output\n\n\n\n def trans_forward(self, motion_codes, qids, cond, padding_mask, force_mask=False):\n '''\n :param motion_codes: (b, seqlen, d)\n :padding_mask: (b, seqlen), all pad positions are TRUE else FALSE\n :param qids: (b), quantizer layer ids\n :param cond: (b, embed_dim) for text, (b, num_actions) for action\n :return:\n -logits: (b, num_token, seqlen)\n '''\n cond = self.mask_cond(cond, force_mask=force_mask)\n\n # (b, seqlen, d) -> (seqlen, b, latent_dim)\n x = self.input_process(motion_codes)\n\n # (b, num_quantizer)\n q_onehot = self.encode_quant(qids).float().to(x.device)\n\n q_emb = self.quant_emb(q_onehot).unsqueeze(0) # (1, b, latent_dim)\n cond = self.cond_emb(cond).unsqueeze(0) # (1, b, latent_dim)\n\n x = self.position_enc(x)\n xseq = torch.cat([cond, q_emb, x], dim=0) # (seqlen+2, b, latent_dim)\n\n padding_mask = torch.cat([torch.zeros_like(padding_mask[:, 0:2]), padding_mask], dim=1) # (b, seqlen+2)\n output = self.seqTransEncoder(xseq, src_key_padding_mask=padding_mask)[2:] # (seqlen, b, e)\n logits = self.output_process(output)\n return logits\n\n def forward_with_cond_scale(self,\n motion_codes,\n q_id,\n cond_vector,\n padding_mask,\n cond_scale=3,\n force_mask=False):\n bs = motion_codes.shape[0]\n # if cond_scale == 1:\n qids = torch.full((bs,), q_id, dtype=torch.long, device=motion_codes.device)\n if force_mask:\n logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask, force_mask=True)\n logits = self.output_project(logits, qids-1)\n return logits\n\n logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask)\n logits = self.output_project(logits, qids-1)\n if cond_scale == 1:\n return logits\n\n aux_logits = self.trans_forward(motion_codes, qids, cond_vector, padding_mask, force_mask=True)\n aux_logits = self.output_project(aux_logits, qids-1)\n\n scaled_logits = aux_logits + (logits - aux_logits) * cond_scale\n return scaled_logits\n\n def forward(self, all_indices, y, m_lens):\n '''\n :param all_indices: (b, n, q)\n :param y: raw text for cond_mode=text, (b, ) for cond_mode=action\n :m_lens: (b,)\n :return:\n '''\n\n self.process_embed_proj_weight()\n\n bs, ntokens, num_quant_layers = all_indices.shape\n device = all_indices.device\n\n # Positions that are PADDED are ALL FALSE\n non_pad_mask = lengths_to_mask(m_lens, ntokens) # (b, n)\n\n q_non_pad_mask = repeat(non_pad_mask, 'b n -> b n q', q=num_quant_layers)\n all_indices = torch.where(q_non_pad_mask, all_indices, self.pad_id) #(b, n, q)\n\n # randomly sample quantization layers to work on, [1, num_q)\n active_q_layers = q_schedule(bs, low=1, high=num_quant_layers, device=device)\n\n # print(self.token_embed_weight.shape, all_indices.shape)\n token_embed = repeat(self.token_embed_weight, 'q c d-> b c d q', b=bs)\n gather_indices = repeat(all_indices[..., :-1], 'b n q -> b n d q', d=token_embed.shape[2])\n # print(token_embed.shape, gather_indices.shape)\n all_codes = token_embed.gather(1, gather_indices) # (b, n, d, q-1)\n\n cumsum_codes = torch.cumsum(all_codes, dim=-1) #(b, n, d, q-1)\n\n active_indices = all_indices[torch.arange(bs), :, active_q_layers] # (b, n)\n history_sum = cumsum_codes[torch.arange(bs), :, :, active_q_layers - 1]\n\n force_mask = False\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(y)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(y).to(device).float()\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(bs, self.latent_dim).float().to(device)\n force_mask = True\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n logits = self.trans_forward(history_sum, active_q_layers, cond_vector, ~non_pad_mask, force_mask)\n logits = self.output_project(logits, active_q_layers-1)\n ce_loss, pred_id, acc = cal_performance(logits, active_indices, ignore_index=self.pad_id)\n\n return ce_loss, pred_id, acc\n\n @torch.no_grad()\n @eval_decorator\n def generate(self,\n motion_ids,\n conds,\n m_lens,\n temperature=1,\n topk_filter_thres=0.9,\n cond_scale=2,\n num_res_layers=-1, # If it's -1, use all.\n ):\n\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n self.process_embed_proj_weight()\n\n device = next(self.parameters()).device\n seq_len = motion_ids.shape[1]\n batch_size = len(conds)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n # token_embed = repeat(self.token_embed_weight, 'c d -> b c d', b=batch_size)\n # gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n # history_sum = token_embed.gather(1, gathered_ids)\n\n # print(pa, seq_len)\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, motion_ids.shape)\n motion_ids = torch.where(padding_mask, self.pad_id, motion_ids)\n all_indices = [motion_ids]\n history_sum = 0\n num_quant_layers = self.opt.num_quantizers if num_res_layers==-1 else num_res_layers+1\n\n for i in range(1, num_quant_layers):\n # print(f\"--> Working on {i}-th quantizer\")\n # Start from all tokens being masked\n # qids = torch.full((batch_size,), i, dtype=torch.long, device=motion_ids.device)\n token_embed = self.token_embed_weight[i-1]\n token_embed = repeat(token_embed, 'c d -> b c d', b=batch_size)\n gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n history_sum += token_embed.gather(1, gathered_ids)\n\n logits = self.forward_with_cond_scale(history_sum, i, cond_vector, padding_mask, cond_scale=cond_scale)\n # logits = self.trans_forward(history_sum, qids, cond_vector, padding_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n\n # probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # # print(probs / temperature)\n # pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n ids = torch.where(padding_mask, self.pad_id, pred_ids)\n\n motion_ids = ids\n all_indices.append(ids)\n\n all_indices = torch.stack(all_indices, dim=-1)\n # padding_mask = repeat(padding_mask, 'b n -> b n q', q=all_indices.shape[-1])\n # all_indices = torch.where(padding_mask, -1, all_indices)\n all_indices = torch.where(all_indices==self.pad_id, -1, all_indices)\n # all_indices = all_indices.masked_fill()\n return all_indices\n\n @torch.no_grad()\n @eval_decorator\n def edit(self,\n motion_ids,\n conds,\n m_lens,\n temperature=1,\n topk_filter_thres=0.9,\n cond_scale=2\n ):\n\n # print(self.opt.num_quantizers)\n # assert len(timesteps) >= len(cond_scales) == self.opt.num_quantizers\n self.process_embed_proj_weight()\n\n device = next(self.parameters()).device\n seq_len = motion_ids.shape[1]\n batch_size = len(conds)\n\n if self.cond_mode == 'text':\n with torch.no_grad():\n cond_vector = self.encode_text(conds)\n elif self.cond_mode == 'action':\n cond_vector = self.enc_action(conds).to(device)\n elif self.cond_mode == 'uncond':\n cond_vector = torch.zeros(batch_size, self.latent_dim).float().to(device)\n else:\n raise NotImplementedError(\"Unsupported condition mode!!!\")\n\n # token_embed = repeat(self.token_embed_weight, 'c d -> b c d', b=batch_size)\n # gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n # history_sum = token_embed.gather(1, gathered_ids)\n\n # print(pa, seq_len)\n padding_mask = ~lengths_to_mask(m_lens, seq_len)\n # print(padding_mask.shape, motion_ids.shape)\n motion_ids = torch.where(padding_mask, self.pad_id, motion_ids)\n all_indices = [motion_ids]\n history_sum = 0\n\n for i in range(1, self.opt.num_quantizers):\n # print(f\"--> Working on {i}-th quantizer\")\n # Start from all tokens being masked\n # qids = torch.full((batch_size,), i, dtype=torch.long, device=motion_ids.device)\n token_embed = self.token_embed_weight[i-1]\n token_embed = repeat(token_embed, 'c d -> b c d', b=batch_size)\n gathered_ids = repeat(motion_ids, 'b n -> b n d', d=token_embed.shape[-1])\n history_sum += token_embed.gather(1, gathered_ids)\n\n logits = self.forward_with_cond_scale(history_sum, i, cond_vector, padding_mask, cond_scale=cond_scale)\n # logits = self.trans_forward(history_sum, qids, cond_vector, padding_mask)\n\n logits = logits.permute(0, 2, 1) # (b, seqlen, ntoken)\n # clean low prob token\n filtered_logits = top_k(logits, topk_filter_thres, dim=-1)\n\n pred_ids = gumbel_sample(filtered_logits, temperature=temperature, dim=-1) # (b, seqlen)\n\n # probs = F.softmax(filtered_logits, dim=-1) # (b, seqlen, ntoken)\n # # print(temperature, starting_temperature, steps_until_x0, timesteps)\n # # print(probs / temperature)\n # pred_ids = Categorical(probs / temperature).sample() # (b, seqlen)\n\n ids = torch.where(padding_mask, self.pad_id, pred_ids)\n\n motion_ids = ids\n all_indices.append(ids)\n\n all_indices = torch.stack(all_indices, dim=-1)\n # padding_mask = repeat(padding_mask, 'b n -> b n q', q=all_indices.shape[-1])\n # all_indices = torch.where(padding_mask, -1, all_indices)\n all_indices = torch.where(all_indices==self.pad_id, -1, all_indices)\n # all_indices = all_indices.masked_fill()\n return all_indices" }, { "identifier": "RVQVAE", "path": "models/vq/model.py", "snippet": "class RVQVAE(nn.Module):\n def __init__(self,\n args,\n input_width=263,\n nb_code=1024,\n code_dim=512,\n output_emb_width=512,\n down_t=3,\n stride_t=2,\n width=512,\n depth=3,\n dilation_growth_rate=3,\n activation='relu',\n norm=None):\n\n super().__init__()\n assert output_emb_width == code_dim\n self.code_dim = code_dim\n self.num_code = nb_code\n # self.quant = args.quantizer\n self.encoder = Encoder(input_width, output_emb_width, down_t, stride_t, width, depth,\n dilation_growth_rate, activation=activation, norm=norm)\n self.decoder = Decoder(input_width, output_emb_width, down_t, stride_t, width, depth,\n dilation_growth_rate, activation=activation, norm=norm)\n rvqvae_config = {\n 'num_quantizers': args.num_quantizers,\n 'shared_codebook': args.shared_codebook,\n 'quantize_dropout_prob': args.quantize_dropout_prob,\n 'quantize_dropout_cutoff_index': 0,\n 'nb_code': nb_code,\n 'code_dim':code_dim, \n 'args': args,\n }\n self.quantizer = ResidualVQ(**rvqvae_config)\n\n def preprocess(self, x):\n # (bs, T, Jx3) -> (bs, Jx3, T)\n x = x.permute(0, 2, 1).float()\n return x\n\n def postprocess(self, x):\n # (bs, Jx3, T) -> (bs, T, Jx3)\n x = x.permute(0, 2, 1)\n return x\n\n def encode(self, x):\n N, T, _ = x.shape\n x_in = self.preprocess(x)\n x_encoder = self.encoder(x_in)\n # print(x_encoder.shape)\n code_idx, all_codes = self.quantizer.quantize(x_encoder, return_latent=True)\n # print(code_idx.shape)\n # code_idx = code_idx.view(N, -1)\n # (N, T, Q)\n # print()\n return code_idx, all_codes\n\n def forward(self, x):\n x_in = self.preprocess(x)\n # Encode\n x_encoder = self.encoder(x_in)\n\n ## quantization\n # x_quantized, code_idx, commit_loss, perplexity = self.quantizer(x_encoder, sample_codebook_temp=0.5,\n # force_dropout_index=0) #TODO hardcode\n x_quantized, code_idx, commit_loss, perplexity = self.quantizer(x_encoder, sample_codebook_temp=0.5)\n\n # print(code_idx[0, :, 1])\n ## decoder\n x_out = self.decoder(x_quantized)\n # x_out = self.postprocess(x_decoder)\n return x_out, commit_loss, perplexity\n\n def forward_decoder(self, x):\n x_d = self.quantizer.get_codes_from_indices(x)\n # x_d = x_d.view(1, -1, self.code_dim).permute(0, 2, 1).contiguous()\n x = x_d.sum(dim=0).permute(0, 2, 1)\n\n # decoder\n x_out = self.decoder(x)\n # x_out = self.postprocess(x_decoder)\n return x_out" }, { "identifier": "EvalT2MOptions", "path": "options/eval_option.py", "snippet": "class EvalT2MOptions(BaseOptions):\n def initialize(self):\n BaseOptions.initialize(self)\n self.parser.add_argument('--which_epoch', type=str, default=\"latest\", help='Checkpoint you want to use, {latest, net_best_fid, etc}')\n self.parser.add_argument('--batch_size', type=int, default=32, help='Batch size')\n\n self.parser.add_argument('--ext', type=str, default='text2motion', help='Extension of the result file or folder')\n self.parser.add_argument(\"--num_batch\", default=2, type=int,\n help=\"Number of batch for generation\")\n self.parser.add_argument(\"--repeat_times\", default=1, type=int,\n help=\"Number of repetitions, per sample text prompt\")\n self.parser.add_argument(\"--cond_scale\", default=4, type=float,\n help=\"For classifier-free sampling - specifies the s parameter, as defined in the paper.\")\n self.parser.add_argument(\"--temperature\", default=1., type=float,\n help=\"Sampling Temperature.\")\n self.parser.add_argument(\"--topkr\", default=0.9, type=float,\n help=\"Filter out percentil low prop entries.\")\n self.parser.add_argument(\"--time_steps\", default=18, type=int,\n help=\"Mask Generate steps.\")\n self.parser.add_argument(\"--seed\", default=10107, type=int)\n\n self.parser.add_argument('--gumbel_sample', action=\"store_true\", help='True: gumbel sampling, False: categorical sampling.')\n self.parser.add_argument('--use_res_model', action=\"store_true\", help='Whether to use residual transformer.')\n # self.parser.add_argument('--est_length', action=\"store_true\", help='Training iterations')\n\n self.parser.add_argument('--res_name', type=str, default='tres_nlayer8_ld384_ff1024_rvq6ns_cdp0.2_sw', help='Model name of residual transformer')\n self.parser.add_argument('--text_path', type=str, default=\"\", help='Text prompt file')\n\n\n self.parser.add_argument('-msec', '--mask_edit_section', nargs='*', type=str, help='Indicate sections for editing, use comma to separate the start and end of a section'\n 'type int will specify the token frame, type float will specify the ratio of seq_len')\n self.parser.add_argument('--text_prompt', default='', type=str, help=\"A text prompt to be generated. If empty, will take text prompts from dataset.\")\n self.parser.add_argument('--source_motion', default='example_data/000612.npy', type=str, help=\"Source motion path for editing. (new_joint_vecs format .npy file)\")\n self.parser.add_argument(\"--motion_length\", default=0, type=int,\n help=\"Motion length for generation, only applicable with single text prompt.\")\n self.is_train = False" }, { "identifier": "get_opt", "path": "utils/get_opt.py", "snippet": "def get_opt(opt_path, device, **kwargs):\n opt = Namespace()\n opt_dict = vars(opt)\n\n skip = ('-------------- End ----------------',\n '------------ Options -------------',\n '\\n')\n print('Reading', opt_path)\n with open(opt_path, 'r') as f:\n for line in f:\n if line.strip() not in skip:\n # print(line.strip())\n key, value = line.strip('\\n').split(': ')\n if value in ('True', 'False'):\n opt_dict[key] = (value == 'True')\n # print(key, value)\n elif is_float(value):\n opt_dict[key] = float(value)\n elif is_number(value):\n opt_dict[key] = int(value)\n else:\n opt_dict[key] = str(value)\n\n # print(opt)\n opt_dict['which_epoch'] = 'finest'\n opt.save_root = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name)\n opt.model_dir = pjoin(opt.save_root, 'model')\n opt.meta_dir = pjoin(opt.save_root, 'meta')\n\n if opt.dataset_name == 't2m':\n opt.data_root = './dataset/HumanML3D/'\n opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')\n opt.text_dir = pjoin(opt.data_root, 'texts')\n opt.joints_num = 22\n opt.dim_pose = 263\n opt.max_motion_length = 196\n opt.max_motion_frame = 196\n opt.max_motion_token = 55\n elif opt.dataset_name == 'kit':\n opt.data_root = './dataset/KIT-ML/'\n opt.motion_dir = pjoin(opt.data_root, 'new_joint_vecs')\n opt.text_dir = pjoin(opt.data_root, 'texts')\n opt.joints_num = 21\n opt.dim_pose = 251\n opt.max_motion_length = 196\n opt.max_motion_frame = 196\n opt.max_motion_token = 55\n else:\n raise KeyError('Dataset not recognized')\n if not hasattr(opt, 'unit_length'):\n opt.unit_length = 4\n opt.dim_word = 300\n opt.num_classes = 200 // opt.unit_length\n opt.dim_pos_ohot = len(POS_enumerator)\n opt.is_train = False\n opt.is_continue = False\n opt.device = device\n\n opt_dict.update(kwargs) # Overwrite with kwargs params\n\n return opt" }, { "identifier": "get_dataset_motion_loader", "path": "motion_loaders/dataset_motion_loader.py", "snippet": "def get_dataset_motion_loader(opt_path, batch_size, fname, device):\n opt = get_opt(opt_path, device)\n\n # Configurations of T2M dataset and KIT dataset is almost the same\n if opt.dataset_name == 't2m' or opt.dataset_name == 'kit':\n print('Loading dataset %s ...' % opt.dataset_name)\n\n mean = np.load(pjoin(opt.meta_dir, 'mean.npy'))\n std = np.load(pjoin(opt.meta_dir, 'std.npy'))\n\n w_vectorizer = WordVectorizer('./glove', 'our_vab')\n split_file = pjoin(opt.data_root, '%s.txt'%fname)\n dataset = Text2MotionDatasetEval(opt, mean, std, split_file, w_vectorizer)\n dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=4, drop_last=True,\n collate_fn=collate_fn, shuffle=True)\n else:\n raise KeyError('Dataset not Recognized !!')\n\n print('Ground Truth Dataset Loading Completed!!!')\n return dataloader, dataset" }, { "identifier": "EvaluatorModelWrapper", "path": "models/t2m_eval_wrapper.py", "snippet": "class EvaluatorModelWrapper(object):\n\n def __init__(self, opt):\n\n if opt.dataset_name == 't2m':\n opt.dim_pose = 263\n elif opt.dataset_name == 'kit':\n opt.dim_pose = 251\n else:\n raise KeyError('Dataset not Recognized!!!')\n\n opt.dim_word = 300\n opt.max_motion_length = 196\n opt.dim_pos_ohot = len(POS_enumerator)\n opt.dim_motion_hidden = 1024\n opt.max_text_len = 20\n opt.dim_text_hidden = 512\n opt.dim_coemb_hidden = 512\n\n # print(opt)\n\n self.text_encoder, self.motion_encoder, self.movement_encoder = build_models(opt)\n self.opt = opt\n self.device = opt.device\n\n self.text_encoder.to(opt.device)\n self.motion_encoder.to(opt.device)\n self.movement_encoder.to(opt.device)\n\n self.text_encoder.eval()\n self.motion_encoder.eval()\n self.movement_encoder.eval()\n\n # Please note that the results does not follow the order of inputs\n def get_co_embeddings(self, word_embs, pos_ohot, cap_lens, motions, m_lens):\n with torch.no_grad():\n word_embs = word_embs.detach().to(self.device).float()\n pos_ohot = pos_ohot.detach().to(self.device).float()\n motions = motions.detach().to(self.device).float()\n\n align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()\n motions = motions[align_idx]\n m_lens = m_lens[align_idx]\n\n '''Movement Encoding'''\n movements = self.movement_encoder(motions[..., :-4]).detach()\n m_lens = m_lens // self.opt.unit_length\n motion_embedding = self.motion_encoder(movements, m_lens)\n\n '''Text Encoding'''\n text_embedding = self.text_encoder(word_embs, pos_ohot, cap_lens)\n text_embedding = text_embedding[align_idx]\n return text_embedding, motion_embedding\n\n # Please note that the results does not follow the order of inputs\n def get_motion_embeddings(self, motions, m_lens):\n with torch.no_grad():\n motions = motions.detach().to(self.device).float()\n\n align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()\n motions = motions[align_idx]\n m_lens = m_lens[align_idx]\n\n '''Movement Encoding'''\n movements = self.movement_encoder(motions[..., :-4]).detach()\n m_lens = m_lens // self.opt.unit_length\n motion_embedding = self.motion_encoder(movements, m_lens)\n return motion_embedding" }, { "identifier": "fixseed", "path": "utils/fixseed.py", "snippet": "def fixseed(seed):\n torch.backends.cudnn.benchmark = False\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)" } ]
import os import torch import utils.eval_t2m as eval_t2m import numpy as np from os.path import join as pjoin from models.mask_transformer.transformer import MaskTransformer, ResidualTransformer from models.vq.model import RVQVAE from options.eval_option import EvalT2MOptions from utils.get_opt import get_opt from motion_loaders.dataset_motion_loader import get_dataset_motion_loader from models.t2m_eval_wrapper import EvaluatorModelWrapper from utils.fixseed import fixseed
14,519
def load_vq_model(vq_opt): # opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_model = RVQVAE(vq_opt, dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {vq_opt.name} Completed!') return vq_model, vq_opt def load_trans_model(model_opt, which_model): t2m_transformer = MaskTransformer(code_dim=model_opt.code_dim, cond_mode='text', latent_dim=model_opt.latent_dim, ff_size=model_opt.ff_size, num_layers=model_opt.n_layers, num_heads=model_opt.n_heads, dropout=model_opt.dropout, clip_dim=512, cond_drop_prob=model_opt.cond_drop_prob, clip_version=clip_version, opt=model_opt) ckpt = torch.load(pjoin(model_opt.checkpoints_dir, model_opt.dataset_name, model_opt.name, 'model', which_model), map_location=opt.device) model_key = 't2m_transformer' if 't2m_transformer' in ckpt else 'trans' # print(ckpt.keys()) missing_keys, unexpected_keys = t2m_transformer.load_state_dict(ckpt[model_key], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Mask Transformer {opt.name} from epoch {ckpt["ep"]}!') return t2m_transformer def load_res_model(res_opt): res_opt.num_quantizers = vq_opt.num_quantizers res_opt.num_tokens = vq_opt.nb_code res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=res_opt.latent_dim, ff_size=res_opt.ff_size, num_layers=res_opt.n_layers, num_heads=res_opt.n_heads, dropout=res_opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=res_opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=res_opt.share_weight, clip_version=clip_version, opt=res_opt) ckpt = torch.load(pjoin(res_opt.checkpoints_dir, res_opt.dataset_name, res_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) missing_keys, unexpected_keys = res_transformer.load_state_dict(ckpt['res_transformer'], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Residual Transformer {res_opt.name} from epoch {ckpt["ep"]}!') return res_transformer if __name__ == '__main__': parser = EvalT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) dim_pose = 251 if opt.dataset_name == 'kit' else 263 # out_dir = pjoin(opt.check) root_dir = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) model_dir = pjoin(root_dir, 'model') out_dir = pjoin(root_dir, 'eval') os.makedirs(out_dir, exist_ok=True) out_path = pjoin(out_dir, "%s.log"%opt.ext) f = open(pjoin(out_path), 'w') model_opt_path = pjoin(root_dir, 'opt.txt')
def load_vq_model(vq_opt): # opt_path = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.vq_name, 'opt.txt') vq_model = RVQVAE(vq_opt, dim_pose, vq_opt.nb_code, vq_opt.code_dim, vq_opt.output_emb_width, vq_opt.down_t, vq_opt.stride_t, vq_opt.width, vq_opt.depth, vq_opt.dilation_growth_rate, vq_opt.vq_act, vq_opt.vq_norm) ckpt = torch.load(pjoin(vq_opt.checkpoints_dir, vq_opt.dataset_name, vq_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) model_key = 'vq_model' if 'vq_model' in ckpt else 'net' vq_model.load_state_dict(ckpt[model_key]) print(f'Loading VQ Model {vq_opt.name} Completed!') return vq_model, vq_opt def load_trans_model(model_opt, which_model): t2m_transformer = MaskTransformer(code_dim=model_opt.code_dim, cond_mode='text', latent_dim=model_opt.latent_dim, ff_size=model_opt.ff_size, num_layers=model_opt.n_layers, num_heads=model_opt.n_heads, dropout=model_opt.dropout, clip_dim=512, cond_drop_prob=model_opt.cond_drop_prob, clip_version=clip_version, opt=model_opt) ckpt = torch.load(pjoin(model_opt.checkpoints_dir, model_opt.dataset_name, model_opt.name, 'model', which_model), map_location=opt.device) model_key = 't2m_transformer' if 't2m_transformer' in ckpt else 'trans' # print(ckpt.keys()) missing_keys, unexpected_keys = t2m_transformer.load_state_dict(ckpt[model_key], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Mask Transformer {opt.name} from epoch {ckpt["ep"]}!') return t2m_transformer def load_res_model(res_opt): res_opt.num_quantizers = vq_opt.num_quantizers res_opt.num_tokens = vq_opt.nb_code res_transformer = ResidualTransformer(code_dim=vq_opt.code_dim, cond_mode='text', latent_dim=res_opt.latent_dim, ff_size=res_opt.ff_size, num_layers=res_opt.n_layers, num_heads=res_opt.n_heads, dropout=res_opt.dropout, clip_dim=512, shared_codebook=vq_opt.shared_codebook, cond_drop_prob=res_opt.cond_drop_prob, # codebook=vq_model.quantizer.codebooks[0] if opt.fix_token_emb else None, share_weight=res_opt.share_weight, clip_version=clip_version, opt=res_opt) ckpt = torch.load(pjoin(res_opt.checkpoints_dir, res_opt.dataset_name, res_opt.name, 'model', 'net_best_fid.tar'), map_location=opt.device) missing_keys, unexpected_keys = res_transformer.load_state_dict(ckpt['res_transformer'], strict=False) assert len(unexpected_keys) == 0 assert all([k.startswith('clip_model.') for k in missing_keys]) print(f'Loading Residual Transformer {res_opt.name} from epoch {ckpt["ep"]}!') return res_transformer if __name__ == '__main__': parser = EvalT2MOptions() opt = parser.parse() fixseed(opt.seed) opt.device = torch.device("cpu" if opt.gpu_id == -1 else "cuda:" + str(opt.gpu_id)) torch.autograd.set_detect_anomaly(True) dim_pose = 251 if opt.dataset_name == 'kit' else 263 # out_dir = pjoin(opt.check) root_dir = pjoin(opt.checkpoints_dir, opt.dataset_name, opt.name) model_dir = pjoin(root_dir, 'model') out_dir = pjoin(root_dir, 'eval') os.makedirs(out_dir, exist_ok=True) out_path = pjoin(out_dir, "%s.log"%opt.ext) f = open(pjoin(out_path), 'w') model_opt_path = pjoin(root_dir, 'opt.txt')
model_opt = get_opt(model_opt_path, device=opt.device)
4
2023-11-29 19:21:27+00:00
24k
dvlab-research/LLMGA
llmga/diffusers/src/diffusers/pipelines/ddpm/pipeline_ddpm.py
[ { "identifier": "randn_tensor", "path": "llmga/diffusers/src/diffusers/utils/torch_utils.py", "snippet": "def randn_tensor(\n shape: Union[Tuple, List],\n generator: Optional[Union[List[\"torch.Generator\"], \"torch.Generator\"]] = None,\n device: Optional[\"torch.device\"] = None,\n dtype: Optional[\"torch.dtype\"] = None,\n layout: Optional[\"torch.layout\"] = None,\n):\n \"\"\"A helper function to create random tensors on the desired `device` with the desired `dtype`. When\n passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor\n is always created on the CPU.\n \"\"\"\n # device on which tensor is created defaults to device\n rand_device = device\n batch_size = shape[0]\n\n layout = layout or torch.strided\n device = device or torch.device(\"cpu\")\n\n if generator is not None:\n gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type\n if gen_device_type != device.type and gen_device_type == \"cpu\":\n rand_device = \"cpu\"\n if device != \"mps\":\n logger.info(\n f\"The passed generator was created on 'cpu' even though a tensor on {device} was expected.\"\n f\" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably\"\n f\" slighly speed up this function by passing a generator that was created on the {device} device.\"\n )\n elif gen_device_type != device.type and gen_device_type == \"cuda\":\n raise ValueError(f\"Cannot generate a {device} tensor from a generator of type {gen_device_type}.\")\n\n # make sure generator list of length 1 is treated like a non-list\n if isinstance(generator, list) and len(generator) == 1:\n generator = generator[0]\n\n if isinstance(generator, list):\n shape = (1,) + shape[1:]\n latents = [\n torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout)\n for i in range(batch_size)\n ]\n latents = torch.cat(latents, dim=0).to(device)\n else:\n latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device)\n\n return latents" }, { "identifier": "DiffusionPipeline", "path": "llmga/diffusers/src/diffusers/pipelines/pipeline_utils.py", "snippet": "class DiffusionPipeline(ConfigMixin, PushToHubMixin):\n r\"\"\"\n Base class for all pipelines.\n\n [`DiffusionPipeline`] stores all components (models, schedulers, and processors) for diffusion pipelines and\n provides methods for loading, downloading and saving models. It also includes methods to:\n\n - move all PyTorch modules to the device of your choice\n - enable/disable the progress bar for the denoising iteration\n\n Class attributes:\n\n - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the\n diffusion pipeline's components.\n - **_optional_components** (`List[str]`) -- List of all optional components that don't have to be passed to the\n pipeline to function (should be overridden by subclasses).\n \"\"\"\n config_name = \"model_index.json\"\n model_cpu_offload_seq = None\n _optional_components = []\n _exclude_from_cpu_offload = []\n _load_connected_pipes = False\n _is_onnx = False\n\n def register_modules(self, **kwargs):\n # import it here to avoid circular import\n from diffusers import pipelines\n\n for name, module in kwargs.items():\n # retrieve library\n if module is None:\n register_dict = {name: (None, None)}\n else:\n # register the config from the original module, not the dynamo compiled one\n if is_compiled_module(module):\n not_compiled_module = module._orig_mod\n else:\n not_compiled_module = module\n\n library = not_compiled_module.__module__.split(\".\")[0]\n\n # check if the module is a pipeline module\n module_path_items = not_compiled_module.__module__.split(\".\")\n pipeline_dir = module_path_items[-2] if len(module_path_items) > 2 else None\n\n path = not_compiled_module.__module__.split(\".\")\n is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir)\n\n # if library is not in LOADABLE_CLASSES, then it is a custom module.\n # Or if it's a pipeline module, then the module is inside the pipeline\n # folder so we set the library to module name.\n if is_pipeline_module:\n library = pipeline_dir\n elif library not in LOADABLE_CLASSES:\n library = not_compiled_module.__module__\n\n # retrieve class_name\n class_name = not_compiled_module.__class__.__name__\n\n register_dict = {name: (library, class_name)}\n\n # save model index config\n self.register_to_config(**register_dict)\n\n # set models\n setattr(self, name, module)\n\n def __setattr__(self, name: str, value: Any):\n if name in self.__dict__ and hasattr(self.config, name):\n # We need to overwrite the config if name exists in config\n if isinstance(getattr(self.config, name), (tuple, list)):\n if value is not None and self.config[name][0] is not None:\n class_library_tuple = (value.__module__.split(\".\")[0], value.__class__.__name__)\n else:\n class_library_tuple = (None, None)\n\n self.register_to_config(**{name: class_library_tuple})\n else:\n self.register_to_config(**{name: value})\n\n super().__setattr__(name, value)\n\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n safe_serialization: bool = True,\n variant: Optional[str] = None,\n push_to_hub: bool = False,\n **kwargs,\n ):\n \"\"\"\n Save all saveable variables of the pipeline to a directory. A pipeline variable can be saved and loaded if its\n class implements both a save and loading method. The pipeline is easily reloaded using the\n [`~DiffusionPipeline.from_pretrained`] class method.\n\n Arguments:\n save_directory (`str` or `os.PathLike`):\n Directory to save a pipeline to. Will be created if it doesn't exist.\n safe_serialization (`bool`, *optional*, defaults to `True`):\n Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.\n variant (`str`, *optional*):\n If specified, weights are saved in the format `pytorch_model.<variant>.bin`.\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n model_index_dict = dict(self.config)\n model_index_dict.pop(\"_class_name\", None)\n model_index_dict.pop(\"_diffusers_version\", None)\n model_index_dict.pop(\"_module\", None)\n model_index_dict.pop(\"_name_or_path\", None)\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n expected_modules, optional_kwargs = self._get_signature_keys(self)\n\n def is_saveable_module(name, value):\n if name not in expected_modules:\n return False\n if name in self._optional_components and value[0] is None:\n return False\n return True\n\n model_index_dict = {k: v for k, v in model_index_dict.items() if is_saveable_module(k, v)}\n for pipeline_component_name in model_index_dict.keys():\n sub_model = getattr(self, pipeline_component_name)\n model_cls = sub_model.__class__\n\n # Dynamo wraps the original model in a private class.\n # I didn't find a public API to get the original class.\n if is_compiled_module(sub_model):\n sub_model = sub_model._orig_mod\n model_cls = sub_model.__class__\n\n save_method_name = None\n # search for the model's base class in LOADABLE_CLASSES\n for library_name, library_classes in LOADABLE_CLASSES.items():\n if library_name in sys.modules:\n library = importlib.import_module(library_name)\n else:\n logger.info(\n f\"{library_name} is not installed. Cannot save {pipeline_component_name} as {library_classes} from {library_name}\"\n )\n\n for base_class, save_load_methods in library_classes.items():\n class_candidate = getattr(library, base_class, None)\n if class_candidate is not None and issubclass(model_cls, class_candidate):\n # if we found a suitable base class in LOADABLE_CLASSES then grab its save method\n save_method_name = save_load_methods[0]\n break\n if save_method_name is not None:\n break\n\n if save_method_name is None:\n logger.warn(f\"self.{pipeline_component_name}={sub_model} of type {type(sub_model)} cannot be saved.\")\n # make sure that unsaveable components are not tried to be loaded afterward\n self.register_to_config(**{pipeline_component_name: (None, None)})\n continue\n\n save_method = getattr(sub_model, save_method_name)\n\n # Call the save method with the argument safe_serialization only if it's supported\n save_method_signature = inspect.signature(save_method)\n save_method_accept_safe = \"safe_serialization\" in save_method_signature.parameters\n save_method_accept_variant = \"variant\" in save_method_signature.parameters\n\n save_kwargs = {}\n if save_method_accept_safe:\n save_kwargs[\"safe_serialization\"] = safe_serialization\n if save_method_accept_variant:\n save_kwargs[\"variant\"] = variant\n\n save_method(os.path.join(save_directory, pipeline_component_name), **save_kwargs)\n\n # finally save the config\n self.save_config(save_directory)\n\n if push_to_hub:\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n def to(self, *args, **kwargs):\n r\"\"\"\n Performs Pipeline dtype and/or device conversion. A torch.dtype and torch.device are inferred from the\n arguments of `self.to(*args, **kwargs).`\n\n <Tip>\n\n If the pipeline already has the correct torch.dtype and torch.device, then it is returned as is. Otherwise,\n the returned pipeline is a copy of self with the desired torch.dtype and torch.device.\n\n </Tip>\n\n\n Here are the ways to call `to`:\n\n - `to(dtype, silence_dtype_warnings=False) → DiffusionPipeline` to return a pipeline with the specified\n [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype)\n - `to(device, silence_dtype_warnings=False) → DiffusionPipeline` to return a pipeline with the specified\n [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device)\n - `to(device=None, dtype=None, silence_dtype_warnings=False) → DiffusionPipeline` to return a pipeline with the\n specified [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device) and\n [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype)\n\n Arguments:\n dtype (`torch.dtype`, *optional*):\n Returns a pipeline with the specified\n [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype)\n device (`torch.Device`, *optional*):\n Returns a pipeline with the specified\n [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device)\n silence_dtype_warnings (`str`, *optional*, defaults to `False`):\n Whether to omit warnings if the target `dtype` is not compatible with the target `device`.\n\n Returns:\n [`DiffusionPipeline`]: The pipeline converted to specified `dtype` and/or `dtype`.\n \"\"\"\n\n torch_dtype = kwargs.pop(\"torch_dtype\", None)\n if torch_dtype is not None:\n deprecate(\"torch_dtype\", \"0.25.0\", \"\")\n torch_device = kwargs.pop(\"torch_device\", None)\n if torch_device is not None:\n deprecate(\"torch_device\", \"0.25.0\", \"\")\n\n dtype_kwarg = kwargs.pop(\"dtype\", None)\n device_kwarg = kwargs.pop(\"device\", None)\n silence_dtype_warnings = kwargs.pop(\"silence_dtype_warnings\", False)\n\n if torch_dtype is not None and dtype_kwarg is not None:\n raise ValueError(\n \"You have passed both `torch_dtype` and `dtype` as a keyword argument. Please make sure to only pass `dtype`.\"\n )\n\n dtype = torch_dtype or dtype_kwarg\n\n if torch_device is not None and device_kwarg is not None:\n raise ValueError(\n \"You have passed both `torch_device` and `device` as a keyword argument. Please make sure to only pass `device`.\"\n )\n\n device = torch_device or device_kwarg\n\n dtype_arg = None\n device_arg = None\n if len(args) == 1:\n if isinstance(args[0], torch.dtype):\n dtype_arg = args[0]\n else:\n device_arg = torch.device(args[0]) if args[0] is not None else None\n elif len(args) == 2:\n if isinstance(args[0], torch.dtype):\n raise ValueError(\n \"When passing two arguments, make sure the first corresponds to `device` and the second to `dtype`.\"\n )\n device_arg = torch.device(args[0]) if args[0] is not None else None\n dtype_arg = args[1]\n elif len(args) > 2:\n raise ValueError(\"Please make sure to pass at most two arguments (`device` and `dtype`) `.to(...)`\")\n\n if dtype is not None and dtype_arg is not None:\n raise ValueError(\n \"You have passed `dtype` both as an argument and as a keyword argument. Please only pass one of the two.\"\n )\n\n dtype = dtype or dtype_arg\n\n if device is not None and device_arg is not None:\n raise ValueError(\n \"You have passed `device` both as an argument and as a keyword argument. Please only pass one of the two.\"\n )\n\n device = device or device_arg\n\n # throw warning if pipeline is in \"offloaded\"-mode but user tries to manually set to GPU.\n def module_is_sequentially_offloaded(module):\n if not is_accelerate_available() or is_accelerate_version(\"<\", \"0.14.0\"):\n return False\n\n return hasattr(module, \"_hf_hook\") and not isinstance(\n module._hf_hook, (accelerate.hooks.CpuOffload, accelerate.hooks.AlignDevicesHook)\n )\n\n def module_is_offloaded(module):\n if not is_accelerate_available() or is_accelerate_version(\"<\", \"0.17.0.dev0\"):\n return False\n\n return hasattr(module, \"_hf_hook\") and isinstance(module._hf_hook, accelerate.hooks.CpuOffload)\n\n # .to(\"cuda\") would raise an error if the pipeline is sequentially offloaded, so we raise our own to make it clearer\n pipeline_is_sequentially_offloaded = any(\n module_is_sequentially_offloaded(module) for _, module in self.components.items()\n )\n if pipeline_is_sequentially_offloaded and device and torch.device(device).type == \"cuda\":\n raise ValueError(\n \"It seems like you have activated sequential model offloading by calling `enable_sequential_cpu_offload`, but are now attempting to move the pipeline to GPU. This is not compatible with offloading. Please, move your pipeline `.to('cpu')` or consider removing the move altogether if you use sequential offloading.\"\n )\n\n # Display a warning in this case (the operation succeeds but the benefits are lost)\n pipeline_is_offloaded = any(module_is_offloaded(module) for _, module in self.components.items())\n if pipeline_is_offloaded and device and torch.device(device).type == \"cuda\":\n logger.warning(\n f\"It seems like you have activated model offloading by calling `enable_model_cpu_offload`, but are now manually moving the pipeline to GPU. It is strongly recommended against doing so as memory gains from offloading are likely to be lost. Offloading automatically takes care of moving the individual components {', '.join(self.components.keys())} to GPU when needed. To make sure offloading works as expected, you should consider moving the pipeline back to CPU: `pipeline.to('cpu')` or removing the move altogether if you use offloading.\"\n )\n\n module_names, _ = self._get_signature_keys(self)\n modules = [getattr(self, n, None) for n in module_names]\n modules = [m for m in modules if isinstance(m, torch.nn.Module)]\n\n is_offloaded = pipeline_is_offloaded or pipeline_is_sequentially_offloaded\n for module in modules:\n is_loaded_in_8bit = hasattr(module, \"is_loaded_in_8bit\") and module.is_loaded_in_8bit\n\n if is_loaded_in_8bit and dtype is not None:\n logger.warning(\n f\"The module '{module.__class__.__name__}' has been loaded in 8bit and conversion to {torch_dtype} is not yet supported. Module is still in 8bit precision.\"\n )\n\n if is_loaded_in_8bit and device is not None:\n logger.warning(\n f\"The module '{module.__class__.__name__}' has been loaded in 8bit and moving it to {torch_dtype} via `.to()` is not yet supported. Module is still on {module.device}.\"\n )\n else:\n module.to(device, dtype)\n\n if (\n module.dtype == torch.float16\n and str(device) in [\"cpu\"]\n and not silence_dtype_warnings\n and not is_offloaded\n ):\n logger.warning(\n \"Pipelines loaded with `dtype=torch.float16` cannot run with `cpu` device. It\"\n \" is not recommended to move them to `cpu` as running them will fail. Please make\"\n \" sure to use an accelerator to run the pipeline in inference, due to the lack of\"\n \" support for`float16` operations on this device in PyTorch. Please, remove the\"\n \" `torch_dtype=torch.float16` argument, or use another device for inference.\"\n )\n return self\n\n @property\n def device(self) -> torch.device:\n r\"\"\"\n Returns:\n `torch.device`: The torch device on which the pipeline is located.\n \"\"\"\n module_names, _ = self._get_signature_keys(self)\n modules = [getattr(self, n, None) for n in module_names]\n modules = [m for m in modules if isinstance(m, torch.nn.Module)]\n\n for module in modules:\n return module.device\n\n return torch.device(\"cpu\")\n\n @property\n def dtype(self) -> torch.dtype:\n r\"\"\"\n Returns:\n `torch.dtype`: The torch dtype on which the pipeline is located.\n \"\"\"\n module_names, _ = self._get_signature_keys(self)\n modules = [getattr(self, n, None) for n in module_names]\n modules = [m for m in modules if isinstance(m, torch.nn.Module)]\n\n for module in modules:\n return module.dtype\n\n return torch.float32\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):\n r\"\"\"\n Instantiate a PyTorch diffusion pipeline from pretrained pipeline weights.\n\n The pipeline is set in evaluation mode (`model.eval()`) by default.\n\n If you get the error message below, you need to finetune the weights for your downstream task:\n\n ```\n Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:\n - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated\n You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n ```\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline\n hosted on the Hub.\n - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights\n saved using\n [`~DiffusionPipeline.save_pretrained`].\n torch_dtype (`str` or `torch.dtype`, *optional*):\n Override the default `torch.dtype` and load the model with another dtype. If \"auto\" is passed, the\n dtype is automatically derived from the model's weights.\n custom_pipeline (`str`, *optional*):\n\n <Tip warning={true}>\n\n 🧪 This is an experimental feature and may change in the future.\n\n </Tip>\n\n Can be either:\n\n - A string, the *repo id* (for example `hf-internal-testing/diffusers-dummy-pipeline`) of a custom\n pipeline hosted on the Hub. The repository must contain a file called pipeline.py that defines\n the custom pipeline.\n - A string, the *file name* of a community pipeline hosted on GitHub under\n [Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file\n names must match the file name and not the pipeline script (`clip_guided_stable_diffusion`\n instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the\n current main branch of GitHub.\n - A path to a directory (`./my_pipeline_directory/`) containing a custom pipeline. The directory\n must contain a file called `pipeline.py` that defines the custom pipeline.\n\n For more information on how to load and create custom pipelines, please have a look at [Loading and\n Adding Custom\n Pipelines](https://huggingface.co/docs/diffusers/using-diffusers/custom_pipeline_overview)\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n custom_revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id similar to\n `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a\n custom pipeline from GitHub, otherwise it defaults to `\"main\"` when loading from the Hub.\n mirror (`str`, *optional*):\n Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not\n guarantee the timeliness or safety of the source, and you should refer to the mirror site for more\n information.\n device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):\n A map that specifies where each submodule should go. It doesn’t need to be defined for each\n parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the\n same device.\n\n Set `device_map=\"auto\"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For\n more information about each option see [designing a device\n map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).\n max_memory (`Dict`, *optional*):\n A dictionary device identifier for the maximum memory. Will default to the maximum memory available for\n each GPU and the available CPU RAM if unset.\n offload_folder (`str` or `os.PathLike`, *optional*):\n The path to offload weights if device_map contains the value `\"disk\"`.\n offload_state_dict (`bool`, *optional*):\n If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if\n the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True`\n when there is some disk offload.\n low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):\n Speed up model loading only loading the pretrained weights and not initializing the weights. This also\n tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.\n Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this\n argument to `True` will raise an error.\n use_safetensors (`bool`, *optional*, defaults to `None`):\n If set to `None`, the safetensors weights are downloaded if they're available **and** if the\n safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors\n weights. If set to `False`, safetensors weights are not loaded.\n use_onnx (`bool`, *optional*, defaults to `None`):\n If set to `True`, ONNX weights will always be downloaded if present. If set to `False`, ONNX weights\n will never be downloaded. By default `use_onnx` defaults to the `_is_onnx` class attribute which is\n `False` for non-ONNX pipelines and `True` for ONNX pipelines. ONNX weights include both files ending\n with `.onnx` and `.pb`.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline\n class). The overwritten components are passed directly to the pipelines `__init__` method. See example\n below for more information.\n variant (`str`, *optional*):\n Load weights from a specified variant filename such as `\"fp16\"` or `\"ema\"`. This is ignored when\n loading `from_flax`.\n\n <Tip>\n\n To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with\n `huggingface-cli login`.\n\n </Tip>\n\n Examples:\n\n ```py\n >>> from diffusers import DiffusionPipeline\n\n >>> # Download pipeline from huggingface.co and cache.\n >>> pipeline = DiffusionPipeline.from_pretrained(\"CompVis/ldm-text2im-large-256\")\n\n >>> # Download pipeline that requires an authorization token\n >>> # For more information on access tokens, please refer to this section\n >>> # of the documentation](https://huggingface.co/docs/hub/security-tokens)\n >>> pipeline = DiffusionPipeline.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n\n >>> # Use a different scheduler\n >>> from diffusers import LMSDiscreteScheduler\n\n >>> scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config)\n >>> pipeline.scheduler = scheduler\n ```\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n resume_download = kwargs.pop(\"resume_download\", False)\n force_download = kwargs.pop(\"force_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", HF_HUB_OFFLINE)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n revision = kwargs.pop(\"revision\", None)\n from_flax = kwargs.pop(\"from_flax\", False)\n torch_dtype = kwargs.pop(\"torch_dtype\", None)\n custom_pipeline = kwargs.pop(\"custom_pipeline\", None)\n custom_revision = kwargs.pop(\"custom_revision\", None)\n provider = kwargs.pop(\"provider\", None)\n sess_options = kwargs.pop(\"sess_options\", None)\n device_map = kwargs.pop(\"device_map\", None)\n max_memory = kwargs.pop(\"max_memory\", None)\n offload_folder = kwargs.pop(\"offload_folder\", None)\n offload_state_dict = kwargs.pop(\"offload_state_dict\", False)\n low_cpu_mem_usage = kwargs.pop(\"low_cpu_mem_usage\", _LOW_CPU_MEM_USAGE_DEFAULT)\n variant = kwargs.pop(\"variant\", None)\n use_safetensors = kwargs.pop(\"use_safetensors\", None)\n use_onnx = kwargs.pop(\"use_onnx\", None)\n load_connected_pipeline = kwargs.pop(\"load_connected_pipeline\", False)\n\n # 1. Download the checkpoints and configs\n # use snapshot download here to get it working from from_pretrained\n if not os.path.isdir(pretrained_model_name_or_path):\n if pretrained_model_name_or_path.count(\"/\") > 1:\n raise ValueError(\n f'The provided pretrained_model_name_or_path \"{pretrained_model_name_or_path}\"'\n \" is neither a valid local path nor a valid repo id. Please check the parameter.\"\n )\n cached_folder = cls.download(\n pretrained_model_name_or_path,\n cache_dir=cache_dir,\n resume_download=resume_download,\n force_download=force_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n from_flax=from_flax,\n use_safetensors=use_safetensors,\n use_onnx=use_onnx,\n custom_pipeline=custom_pipeline,\n custom_revision=custom_revision,\n variant=variant,\n load_connected_pipeline=load_connected_pipeline,\n **kwargs,\n )\n else:\n cached_folder = pretrained_model_name_or_path\n\n config_dict = cls.load_config(cached_folder)\n\n # pop out \"_ignore_files\" as it is only needed for download\n config_dict.pop(\"_ignore_files\", None)\n\n # 2. Define which model components should load variants\n # We retrieve the information by matching whether variant\n # model checkpoints exist in the subfolders\n model_variants = {}\n if variant is not None:\n for folder in os.listdir(cached_folder):\n folder_path = os.path.join(cached_folder, folder)\n is_folder = os.path.isdir(folder_path) and folder in config_dict\n variant_exists = is_folder and any(\n p.split(\".\")[1].startswith(variant) for p in os.listdir(folder_path)\n )\n if variant_exists:\n model_variants[folder] = variant\n\n # 3. Load the pipeline class, if using custom module then load it from the hub\n # if we load from explicit class, let's use it\n pipeline_class = _get_pipeline_class(\n cls,\n config_dict,\n load_connected_pipeline=load_connected_pipeline,\n custom_pipeline=custom_pipeline,\n cache_dir=cache_dir,\n revision=custom_revision,\n )\n\n # DEPRECATED: To be removed in 1.0.0\n if pipeline_class.__name__ == \"StableDiffusionInpaintPipeline\" and version.parse(\n version.parse(config_dict[\"_diffusers_version\"]).base_version\n ) <= version.parse(\"0.5.1\"):\n from diffusers import StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy\n\n pipeline_class = StableDiffusionInpaintPipelineLegacy\n\n deprecation_message = (\n \"You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the\"\n f\" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For\"\n \" better inpainting results, we strongly suggest using Stable Diffusion's official inpainting\"\n \" checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your\"\n f\" checkpoint {pretrained_model_name_or_path} to the format of\"\n \" https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain\"\n \" the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0.\"\n )\n deprecate(\"StableDiffusionInpaintPipelineLegacy\", \"1.0.0\", deprecation_message, standard_warn=False)\n\n # 4. Define expected modules given pipeline signature\n # and define non-None initialized modules (=`init_kwargs`)\n\n # some modules can be passed directly to the init\n # in this case they are already instantiated in `kwargs`\n # extract them here\n expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class)\n passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs}\n passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs}\n\n init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs)\n\n # define init kwargs and make sure that optional component modules are filtered out\n init_kwargs = {\n k: init_dict.pop(k)\n for k in optional_kwargs\n if k in init_dict and k not in pipeline_class._optional_components\n }\n init_kwargs = {**init_kwargs, **passed_pipe_kwargs}\n\n # remove `null` components\n def load_module(name, value):\n if value[0] is None:\n return False\n if name in passed_class_obj and passed_class_obj[name] is None:\n return False\n return True\n\n init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)}\n\n # Special case: safety_checker must be loaded separately when using `from_flax`\n if from_flax and \"safety_checker\" in init_dict and \"safety_checker\" not in passed_class_obj:\n raise NotImplementedError(\n \"The safety checker cannot be automatically loaded when loading weights `from_flax`.\"\n \" Please, pass `safety_checker=None` to `from_pretrained`, and load the safety checker\"\n \" separately if you need it.\"\n )\n\n # 5. Throw nice warnings / errors for fast accelerate loading\n if len(unused_kwargs) > 0:\n logger.warning(\n f\"Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored.\"\n )\n\n if low_cpu_mem_usage and not is_accelerate_available():\n low_cpu_mem_usage = False\n logger.warning(\n \"Cannot initialize model with low cpu memory usage because `accelerate` was not found in the\"\n \" environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install\"\n \" `accelerate` for faster and less memory-intense model loading. You can do so with: \\n```\\npip\"\n \" install accelerate\\n```\\n.\"\n )\n\n if device_map is not None and not is_torch_version(\">=\", \"1.9.0\"):\n raise NotImplementedError(\n \"Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set\"\n \" `device_map=None`.\"\n )\n\n if low_cpu_mem_usage is True and not is_torch_version(\">=\", \"1.9.0\"):\n raise NotImplementedError(\n \"Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set\"\n \" `low_cpu_mem_usage=False`.\"\n )\n\n if low_cpu_mem_usage is False and device_map is not None:\n raise ValueError(\n f\"You cannot set `low_cpu_mem_usage` to False while using device_map={device_map} for loading and\"\n \" dispatching. Please make sure to set `low_cpu_mem_usage=True`.\"\n )\n\n # import it here to avoid circular import\n from diffusers import pipelines\n\n # 6. Load each module in the pipeline\n for name, (library_name, class_name) in logging.tqdm(init_dict.items(), desc=\"Loading pipeline components...\"):\n # 6.1 - now that JAX/Flax is an official framework of the library, we might load from Flax names\n class_name = class_name[4:] if class_name.startswith(\"Flax\") else class_name\n\n # 6.2 Define all importable classes\n is_pipeline_module = hasattr(pipelines, library_name)\n importable_classes = ALL_IMPORTABLE_CLASSES\n loaded_sub_model = None\n\n # 6.3 Use passed sub model or load class_name from library_name\n if name in passed_class_obj:\n # if the model is in a pipeline module, then we load it from the pipeline\n # check that passed_class_obj has correct parent class\n maybe_raise_or_warn(\n library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module\n )\n\n loaded_sub_model = passed_class_obj[name]\n else:\n # load sub model\n loaded_sub_model = load_sub_model(\n library_name=library_name,\n class_name=class_name,\n importable_classes=importable_classes,\n pipelines=pipelines,\n is_pipeline_module=is_pipeline_module,\n pipeline_class=pipeline_class,\n torch_dtype=torch_dtype,\n provider=provider,\n sess_options=sess_options,\n device_map=device_map,\n max_memory=max_memory,\n offload_folder=offload_folder,\n offload_state_dict=offload_state_dict,\n model_variants=model_variants,\n name=name,\n from_flax=from_flax,\n variant=variant,\n low_cpu_mem_usage=low_cpu_mem_usage,\n cached_folder=cached_folder,\n )\n logger.info(\n f\"Loaded {name} as {class_name} from `{name}` subfolder of {pretrained_model_name_or_path}.\"\n )\n\n init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...)\n\n if pipeline_class._load_connected_pipes and os.path.isfile(os.path.join(cached_folder, \"README.md\")):\n modelcard = ModelCard.load(os.path.join(cached_folder, \"README.md\"))\n connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS}\n load_kwargs = {\n \"cache_dir\": cache_dir,\n \"resume_download\": resume_download,\n \"force_download\": force_download,\n \"proxies\": proxies,\n \"local_files_only\": local_files_only,\n \"use_auth_token\": use_auth_token,\n \"revision\": revision,\n \"torch_dtype\": torch_dtype,\n \"custom_pipeline\": custom_pipeline,\n \"custom_revision\": custom_revision,\n \"provider\": provider,\n \"sess_options\": sess_options,\n \"device_map\": device_map,\n \"max_memory\": max_memory,\n \"offload_folder\": offload_folder,\n \"offload_state_dict\": offload_state_dict,\n \"low_cpu_mem_usage\": low_cpu_mem_usage,\n \"variant\": variant,\n \"use_safetensors\": use_safetensors,\n }\n\n def get_connected_passed_kwargs(prefix):\n connected_passed_class_obj = {\n k.replace(f\"{prefix}_\", \"\"): w for k, w in passed_class_obj.items() if k.split(\"_\")[0] == prefix\n }\n connected_passed_pipe_kwargs = {\n k.replace(f\"{prefix}_\", \"\"): w for k, w in passed_pipe_kwargs.items() if k.split(\"_\")[0] == prefix\n }\n\n connected_passed_kwargs = {**connected_passed_class_obj, **connected_passed_pipe_kwargs}\n return connected_passed_kwargs\n\n connected_pipes = {\n prefix: DiffusionPipeline.from_pretrained(\n repo_id, **load_kwargs.copy(), **get_connected_passed_kwargs(prefix)\n )\n for prefix, repo_id in connected_pipes.items()\n if repo_id is not None\n }\n\n for prefix, connected_pipe in connected_pipes.items():\n # add connected pipes to `init_kwargs` with <prefix>_<component_name>, e.g. \"prior_text_encoder\"\n init_kwargs.update(\n {\"_\".join([prefix, name]): component for name, component in connected_pipe.components.items()}\n )\n\n # 7. Potentially add passed objects if expected\n missing_modules = set(expected_modules) - set(init_kwargs.keys())\n passed_modules = list(passed_class_obj.keys())\n optional_modules = pipeline_class._optional_components\n if len(missing_modules) > 0 and missing_modules <= set(passed_modules + optional_modules):\n for module in missing_modules:\n init_kwargs[module] = passed_class_obj.get(module, None)\n elif len(missing_modules) > 0:\n passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs\n raise ValueError(\n f\"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed.\"\n )\n\n # 8. Instantiate the pipeline\n model = pipeline_class(**init_kwargs)\n\n # 9. Save where the model was instantiated from\n model.register_to_config(_name_or_path=pretrained_model_name_or_path)\n return model\n\n @property\n def name_or_path(self) -> str:\n return getattr(self.config, \"_name_or_path\", None)\n\n @property\n def _execution_device(self):\n r\"\"\"\n Returns the device on which the pipeline's models will be executed. After calling\n [`~DiffusionPipeline.enable_sequential_cpu_offload`] the execution device can only be inferred from\n Accelerate's module hooks.\n \"\"\"\n for name, model in self.components.items():\n if not isinstance(model, torch.nn.Module) or name in self._exclude_from_cpu_offload:\n continue\n\n if not hasattr(model, \"_hf_hook\"):\n return self.device\n for module in model.modules():\n if (\n hasattr(module, \"_hf_hook\")\n and hasattr(module._hf_hook, \"execution_device\")\n and module._hf_hook.execution_device is not None\n ):\n return torch.device(module._hf_hook.execution_device)\n return self.device\n\n def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = \"cuda\"):\n r\"\"\"\n Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared\n to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`\n method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with\n `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.\n\n Arguments:\n gpu_id (`int`, *optional*):\n The ID of the accelerator that shall be used in inference. If not specified, it will default to 0.\n device (`torch.Device` or `str`, *optional*, defaults to \"cuda\"):\n The PyTorch device type of the accelerator that shall be used in inference. If not specified, it will\n default to \"cuda\".\n \"\"\"\n if self.model_cpu_offload_seq is None:\n raise ValueError(\n \"Model CPU offload cannot be enabled because no `model_cpu_offload_seq` class attribute is set.\"\n )\n\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.17.0.dev0\"):\n from accelerate import cpu_offload_with_hook\n else:\n raise ImportError(\"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.\")\n\n torch_device = torch.device(device)\n device_index = torch_device.index\n\n if gpu_id is not None and device_index is not None:\n raise ValueError(\n f\"You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}\"\n f\"Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}\"\n )\n\n # _offload_gpu_id should be set to passed gpu_id (or id in passed `device`) or default to previously set id or default to 0\n self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, \"_offload_gpu_id\", 0)\n\n device_type = torch_device.type\n device = torch.device(f\"{device_type}:{self._offload_gpu_id}\")\n\n if self.device.type != \"cpu\":\n self.to(\"cpu\", silence_dtype_warnings=True)\n device_mod = getattr(torch, self.device.type, None)\n if hasattr(device_mod, \"empty_cache\") and device_mod.is_available():\n device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist)\n\n all_model_components = {k: v for k, v in self.components.items() if isinstance(v, torch.nn.Module)}\n\n self._all_hooks = []\n hook = None\n for model_str in self.model_cpu_offload_seq.split(\"->\"):\n model = all_model_components.pop(model_str, None)\n if not isinstance(model, torch.nn.Module):\n continue\n\n _, hook = cpu_offload_with_hook(model, device, prev_module_hook=hook)\n self._all_hooks.append(hook)\n\n # CPU offload models that are not in the seq chain unless they are explicitly excluded\n # these models will stay on CPU until maybe_free_model_hooks is called\n # some models cannot be in the seq chain because they are iteratively called, such as controlnet\n for name, model in all_model_components.items():\n if not isinstance(model, torch.nn.Module):\n continue\n\n if name in self._exclude_from_cpu_offload:\n model.to(device)\n else:\n _, hook = cpu_offload_with_hook(model, device)\n self._all_hooks.append(hook)\n\n def maybe_free_model_hooks(self):\n r\"\"\"\n Function that offloads all components, removes all model hooks that were added when using\n `enable_model_cpu_offload` and then applies them again. In case the model has not been offloaded this function\n is a no-op. Make sure to add this function to the end of the `__call__` function of your pipeline so that it\n functions correctly when applying enable_model_cpu_offload.\n \"\"\"\n if not hasattr(self, \"_all_hooks\") or len(self._all_hooks) == 0:\n # `enable_model_cpu_offload` has not be called, so silently do nothing\n return\n\n for hook in self._all_hooks:\n # offload model and remove hook from model\n hook.offload()\n hook.remove()\n\n # make sure the model is in the same state as before calling it\n self.enable_model_cpu_offload()\n\n def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = \"cuda\"):\n r\"\"\"\n Offloads all models to CPU using 🤗 Accelerate, significantly reducing memory usage. When called, the state\n dicts of all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are saved to CPU\n and then moved to `torch.device('meta')` and loaded to GPU only when their specific submodule has its `forward`\n method called. Offloading happens on a submodule basis. Memory savings are higher than with\n `enable_model_cpu_offload`, but performance is lower.\n\n Arguments:\n gpu_id (`int`, *optional*):\n The ID of the accelerator that shall be used in inference. If not specified, it will default to 0.\n device (`torch.Device` or `str`, *optional*, defaults to \"cuda\"):\n The PyTorch device type of the accelerator that shall be used in inference. If not specified, it will\n default to \"cuda\".\n \"\"\"\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.14.0\"):\n from accelerate import cpu_offload\n else:\n raise ImportError(\"`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher\")\n\n torch_device = torch.device(device)\n device_index = torch_device.index\n\n if gpu_id is not None and device_index is not None:\n raise ValueError(\n f\"You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}\"\n f\"Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}\"\n )\n\n # _offload_gpu_id should be set to passed gpu_id (or id in passed `device`) or default to previously set id or default to 0\n self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, \"_offload_gpu_id\", 0)\n\n device_type = torch_device.type\n device = torch.device(f\"{device_type}:{self._offload_gpu_id}\")\n\n if self.device.type != \"cpu\":\n self.to(\"cpu\", silence_dtype_warnings=True)\n device_mod = getattr(torch, self.device.type, None)\n if hasattr(device_mod, \"empty_cache\") and device_mod.is_available():\n device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist)\n\n for name, model in self.components.items():\n if not isinstance(model, torch.nn.Module):\n continue\n\n if name in self._exclude_from_cpu_offload:\n model.to(device)\n else:\n # make sure to offload buffers if not all high level weights\n # are of type nn.Module\n offload_buffers = len(model._parameters) > 0\n cpu_offload(model, device, offload_buffers=offload_buffers)\n\n @classmethod\n def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]:\n r\"\"\"\n Download and cache a PyTorch diffusion pipeline from pretrained pipeline weights.\n\n Parameters:\n pretrained_model_name (`str` or `os.PathLike`, *optional*):\n A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline\n hosted on the Hub.\n custom_pipeline (`str`, *optional*):\n Can be either:\n\n - A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained\n pipeline hosted on the Hub. The repository must contain a file called `pipeline.py` that defines\n the custom pipeline.\n\n - A string, the *file name* of a community pipeline hosted on GitHub under\n [Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file\n names must match the file name and not the pipeline script (`clip_guided_stable_diffusion`\n instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the\n current `main` branch of GitHub.\n\n - A path to a *directory* (`./my_pipeline_directory/`) containing a custom pipeline. The directory\n must contain a file called `pipeline.py` that defines the custom pipeline.\n\n <Tip warning={true}>\n\n 🧪 This is an experimental feature and may change in the future.\n\n </Tip>\n\n For more information on how to load and create custom pipelines, take a look at [How to contribute a\n community pipeline](https://huggingface.co/docs/diffusers/main/en/using-diffusers/contribute_pipeline).\n\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n custom_revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id similar to\n `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a\n custom pipeline from GitHub, otherwise it defaults to `\"main\"` when loading from the Hub.\n mirror (`str`, *optional*):\n Mirror source to resolve accessibility issues if you're downloading a model in China. We do not\n guarantee the timeliness or safety of the source, and you should refer to the mirror site for more\n information.\n variant (`str`, *optional*):\n Load weights from a specified variant filename such as `\"fp16\"` or `\"ema\"`. This is ignored when\n loading `from_flax`.\n use_safetensors (`bool`, *optional*, defaults to `None`):\n If set to `None`, the safetensors weights are downloaded if they're available **and** if the\n safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors\n weights. If set to `False`, safetensors weights are not loaded.\n use_onnx (`bool`, *optional*, defaults to `False`):\n If set to `True`, ONNX weights will always be downloaded if present. If set to `False`, ONNX weights\n will never be downloaded. By default `use_onnx` defaults to the `_is_onnx` class attribute which is\n `False` for non-ONNX pipelines and `True` for ONNX pipelines. ONNX weights include both files ending\n with `.onnx` and `.pb`.\n\n Returns:\n `os.PathLike`:\n A path to the downloaded pipeline.\n\n <Tip>\n\n To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with\n `huggingface-cli login`.\n\n </Tip>\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n resume_download = kwargs.pop(\"resume_download\", False)\n force_download = kwargs.pop(\"force_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", HF_HUB_OFFLINE)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n revision = kwargs.pop(\"revision\", None)\n from_flax = kwargs.pop(\"from_flax\", False)\n custom_pipeline = kwargs.pop(\"custom_pipeline\", None)\n custom_revision = kwargs.pop(\"custom_revision\", None)\n variant = kwargs.pop(\"variant\", None)\n use_safetensors = kwargs.pop(\"use_safetensors\", None)\n use_onnx = kwargs.pop(\"use_onnx\", None)\n load_connected_pipeline = kwargs.pop(\"load_connected_pipeline\", False)\n\n allow_pickle = False\n if use_safetensors is None:\n use_safetensors = True\n allow_pickle = True\n\n allow_patterns = None\n ignore_patterns = None\n\n model_info_call_error: Optional[Exception] = None\n if not local_files_only:\n try:\n info = model_info(\n pretrained_model_name,\n use_auth_token=use_auth_token,\n revision=revision,\n )\n except HTTPError as e:\n logger.warn(f\"Couldn't connect to the Hub: {e}.\\nWill try to load from local cache.\")\n local_files_only = True\n model_info_call_error = e # save error to reraise it if model is not cached locally\n\n if not local_files_only:\n config_file = hf_hub_download(\n pretrained_model_name,\n cls.config_name,\n cache_dir=cache_dir,\n revision=revision,\n proxies=proxies,\n force_download=force_download,\n resume_download=resume_download,\n use_auth_token=use_auth_token,\n )\n\n config_dict = cls._dict_from_json_file(config_file)\n\n ignore_filenames = config_dict.pop(\"_ignore_files\", [])\n\n # retrieve all folder_names that contain relevant files\n folder_names = [k for k, v in config_dict.items() if isinstance(v, list)]\n\n filenames = {sibling.rfilename for sibling in info.siblings}\n model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant)\n\n if len(variant_filenames) == 0 and variant is not None:\n deprecation_message = (\n f\"You are trying to load the model files of the `variant={variant}`, but no such modeling files are available.\"\n f\"The default model files: {model_filenames} will be loaded instead. Make sure to not load from `variant={variant}`\"\n \"if such variant modeling files are not available. Doing so will lead to an error in v0.24.0 as defaulting to non-variant\"\n \"modeling files is deprecated.\"\n )\n deprecate(\"no variant default\", \"0.24.0\", deprecation_message, standard_warn=False)\n\n # remove ignored filenames\n model_filenames = set(model_filenames) - set(ignore_filenames)\n variant_filenames = set(variant_filenames) - set(ignore_filenames)\n\n # if the whole pipeline is cached we don't have to ping the Hub\n if revision in DEPRECATED_REVISION_ARGS and version.parse(\n version.parse(__version__).base_version\n ) >= version.parse(\"0.22.0\"):\n warn_deprecated_model_variant(\n pretrained_model_name, use_auth_token, variant, revision, model_filenames\n )\n\n model_folder_names = {os.path.split(f)[0] for f in model_filenames if os.path.split(f)[0] in folder_names}\n\n # all filenames compatible with variant will be added\n allow_patterns = list(model_filenames)\n\n # allow all patterns from non-model folders\n # this enables downloading schedulers, tokenizers, ...\n allow_patterns += [f\"{k}/*\" for k in folder_names if k not in model_folder_names]\n # also allow downloading config.json files with the model\n allow_patterns += [os.path.join(k, \"config.json\") for k in model_folder_names]\n\n allow_patterns += [\n SCHEDULER_CONFIG_NAME,\n CONFIG_NAME,\n cls.config_name,\n CUSTOM_PIPELINE_FILE_NAME,\n ]\n\n # retrieve passed components that should not be downloaded\n pipeline_class = _get_pipeline_class(\n cls,\n config_dict,\n load_connected_pipeline=load_connected_pipeline,\n custom_pipeline=custom_pipeline,\n cache_dir=cache_dir,\n revision=custom_revision,\n )\n expected_components, _ = cls._get_signature_keys(pipeline_class)\n passed_components = [k for k in expected_components if k in kwargs]\n\n if (\n use_safetensors\n and not allow_pickle\n and not is_safetensors_compatible(\n model_filenames, variant=variant, passed_components=passed_components\n )\n ):\n raise EnvironmentError(\n f\"Could not found the necessary `safetensors` weights in {model_filenames} (variant={variant})\"\n )\n if from_flax:\n ignore_patterns = [\"*.bin\", \"*.safetensors\", \"*.onnx\", \"*.pb\"]\n elif use_safetensors and is_safetensors_compatible(\n model_filenames, variant=variant, passed_components=passed_components\n ):\n ignore_patterns = [\"*.bin\", \"*.msgpack\"]\n\n use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx\n if not use_onnx:\n ignore_patterns += [\"*.onnx\", \"*.pb\"]\n\n safetensors_variant_filenames = {f for f in variant_filenames if f.endswith(\".safetensors\")}\n safetensors_model_filenames = {f for f in model_filenames if f.endswith(\".safetensors\")}\n if (\n len(safetensors_variant_filenames) > 0\n and safetensors_model_filenames != safetensors_variant_filenames\n ):\n logger.warn(\n f\"\\nA mixture of {variant} and non-{variant} filenames will be loaded.\\nLoaded {variant} filenames:\\n[{', '.join(safetensors_variant_filenames)}]\\nLoaded non-{variant} filenames:\\n[{', '.join(safetensors_model_filenames - safetensors_variant_filenames)}\\nIf this behavior is not expected, please check your folder structure.\"\n )\n else:\n ignore_patterns = [\"*.safetensors\", \"*.msgpack\"]\n\n use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx\n if not use_onnx:\n ignore_patterns += [\"*.onnx\", \"*.pb\"]\n\n bin_variant_filenames = {f for f in variant_filenames if f.endswith(\".bin\")}\n bin_model_filenames = {f for f in model_filenames if f.endswith(\".bin\")}\n if len(bin_variant_filenames) > 0 and bin_model_filenames != bin_variant_filenames:\n logger.warn(\n f\"\\nA mixture of {variant} and non-{variant} filenames will be loaded.\\nLoaded {variant} filenames:\\n[{', '.join(bin_variant_filenames)}]\\nLoaded non-{variant} filenames:\\n[{', '.join(bin_model_filenames - bin_variant_filenames)}\\nIf this behavior is not expected, please check your folder structure.\"\n )\n\n # Don't download any objects that are passed\n allow_patterns = [\n p for p in allow_patterns if not (len(p.split(\"/\")) == 2 and p.split(\"/\")[0] in passed_components)\n ]\n\n if pipeline_class._load_connected_pipes:\n allow_patterns.append(\"README.md\")\n\n # Don't download index files of forbidden patterns either\n ignore_patterns = ignore_patterns + [f\"{i}.index.*json\" for i in ignore_patterns]\n\n re_ignore_pattern = [re.compile(fnmatch.translate(p)) for p in ignore_patterns]\n re_allow_pattern = [re.compile(fnmatch.translate(p)) for p in allow_patterns]\n\n expected_files = [f for f in filenames if not any(p.match(f) for p in re_ignore_pattern)]\n expected_files = [f for f in expected_files if any(p.match(f) for p in re_allow_pattern)]\n\n snapshot_folder = Path(config_file).parent\n pipeline_is_cached = all((snapshot_folder / f).is_file() for f in expected_files)\n\n if pipeline_is_cached and not force_download:\n # if the pipeline is cached, we can directly return it\n # else call snapshot_download\n return snapshot_folder\n\n user_agent = {\"pipeline_class\": cls.__name__}\n if custom_pipeline is not None and not custom_pipeline.endswith(\".py\"):\n user_agent[\"custom_pipeline\"] = custom_pipeline\n\n # download all allow_patterns - ignore_patterns\n try:\n cached_folder = snapshot_download(\n pretrained_model_name,\n cache_dir=cache_dir,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n allow_patterns=allow_patterns,\n ignore_patterns=ignore_patterns,\n user_agent=user_agent,\n )\n\n # retrieve pipeline class from local file\n cls_name = cls.load_config(os.path.join(cached_folder, \"model_index.json\")).get(\"_class_name\", None)\n cls_name = cls_name[4:] if cls_name.startswith(\"Flax\") else cls_name\n\n pipeline_class = getattr(diffusers, cls_name, None)\n\n if pipeline_class is not None and pipeline_class._load_connected_pipes:\n modelcard = ModelCard.load(os.path.join(cached_folder, \"README.md\"))\n connected_pipes = sum([getattr(modelcard.data, k, []) for k in CONNECTED_PIPES_KEYS], [])\n for connected_pipe_repo_id in connected_pipes:\n download_kwargs = {\n \"cache_dir\": cache_dir,\n \"resume_download\": resume_download,\n \"force_download\": force_download,\n \"proxies\": proxies,\n \"local_files_only\": local_files_only,\n \"use_auth_token\": use_auth_token,\n \"variant\": variant,\n \"use_safetensors\": use_safetensors,\n }\n DiffusionPipeline.download(connected_pipe_repo_id, **download_kwargs)\n\n return cached_folder\n\n except FileNotFoundError:\n # Means we tried to load pipeline with `local_files_only=True` but the files have not been found in local cache.\n # This can happen in two cases:\n # 1. If the user passed `local_files_only=True` => we raise the error directly\n # 2. If we forced `local_files_only=True` when `model_info` failed => we raise the initial error\n if model_info_call_error is None:\n # 1. user passed `local_files_only=True`\n raise\n else:\n # 2. we forced `local_files_only=True` when `model_info` failed\n raise EnvironmentError(\n f\"Cannot load model {pretrained_model_name}: model is not cached locally and an error occured\"\n \" while trying to fetch metadata from the Hub. Please check out the root cause in the stacktrace\"\n \" above.\"\n ) from model_info_call_error\n\n @staticmethod\n def _get_signature_keys(obj):\n parameters = inspect.signature(obj.__init__).parameters\n required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty}\n optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty})\n expected_modules = set(required_parameters.keys()) - {\"self\"}\n return expected_modules, optional_parameters\n\n @property\n def components(self) -> Dict[str, Any]:\n r\"\"\"\n The `self.components` property can be useful to run different pipelines with the same weights and\n configurations without reallocating additional memory.\n\n Returns (`dict`):\n A dictionary containing all the modules needed to initialize the pipeline.\n\n Examples:\n\n ```py\n >>> from diffusers import (\n ... StableDiffusionPipeline,\n ... StableDiffusionImg2ImgPipeline,\n ... StableDiffusionInpaintPipeline,\n ... )\n\n >>> text2img = StableDiffusionPipeline.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components)\n >>> inpaint = StableDiffusionInpaintPipeline(**text2img.components)\n ```\n \"\"\"\n expected_modules, optional_parameters = self._get_signature_keys(self)\n components = {\n k: getattr(self, k) for k in self.config.keys() if not k.startswith(\"_\") and k not in optional_parameters\n }\n\n if set(components.keys()) != expected_modules:\n raise ValueError(\n f\"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected\"\n f\" {expected_modules} to be defined, but {components.keys()} are defined.\"\n )\n\n return components\n\n @staticmethod\n def numpy_to_pil(images):\n \"\"\"\n Convert a NumPy image or a batch of images to a PIL image.\n \"\"\"\n return numpy_to_pil(images)\n\n def progress_bar(self, iterable=None, total=None):\n if not hasattr(self, \"_progress_bar_config\"):\n self._progress_bar_config = {}\n elif not isinstance(self._progress_bar_config, dict):\n raise ValueError(\n f\"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}.\"\n )\n\n if iterable is not None:\n return tqdm(iterable, **self._progress_bar_config)\n elif total is not None:\n return tqdm(total=total, **self._progress_bar_config)\n else:\n raise ValueError(\"Either `total` or `iterable` has to be defined.\")\n\n def set_progress_bar_config(self, **kwargs):\n self._progress_bar_config = kwargs\n\n def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None):\n r\"\"\"\n Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). When this\n option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed\n up during training is not guaranteed.\n\n <Tip warning={true}>\n\n ⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes\n precedent.\n\n </Tip>\n\n Parameters:\n attention_op (`Callable`, *optional*):\n Override the default `None` operator for use as `op` argument to the\n [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention)\n function of xFormers.\n\n Examples:\n\n ```py\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp\n\n >>> pipe = DiffusionPipeline.from_pretrained(\"stabilityai/stable-diffusion-2-1\", torch_dtype=torch.float16)\n >>> pipe = pipe.to(\"cuda\")\n >>> pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)\n >>> # Workaround for not accepting attention shape using VAE for Flash Attention\n >>> pipe.vae.enable_xformers_memory_efficient_attention(attention_op=None)\n ```\n \"\"\"\n self.set_use_memory_efficient_attention_xformers(True, attention_op)\n\n def disable_xformers_memory_efficient_attention(self):\n r\"\"\"\n Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).\n \"\"\"\n self.set_use_memory_efficient_attention_xformers(False)\n\n def set_use_memory_efficient_attention_xformers(\n self, valid: bool, attention_op: Optional[Callable] = None\n ) -> None:\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid, attention_op)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n module_names, _ = self._get_signature_keys(self)\n modules = [getattr(self, n, None) for n in module_names]\n modules = [m for m in modules if isinstance(m, torch.nn.Module)]\n\n for module in modules:\n fn_recursive_set_mem_eff(module)\n\n def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = \"auto\"):\n r\"\"\"\n Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor\n in slices to compute attention in several steps. For more than one attention head, the computation is performed\n sequentially over each head. This is useful to save some memory in exchange for a small speed decrease.\n\n <Tip warning={true}>\n\n ⚠️ Don't enable attention slicing if you're already using `scaled_dot_product_attention` (SDPA) from PyTorch\n 2.0 or xFormers. These attention computations are already very memory efficient so you won't need to enable\n this function. If you enable attention slicing with SDPA or xFormers, it can lead to serious slow downs!\n\n </Tip>\n\n Args:\n slice_size (`str` or `int`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maximum amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n\n Examples:\n\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionPipeline\n\n >>> pipe = StableDiffusionPipeline.from_pretrained(\n ... \"runwayml/stable-diffusion-v1-5\",\n ... torch_dtype=torch.float16,\n ... use_safetensors=True,\n ... )\n\n >>> prompt = \"a photo of an astronaut riding a horse on mars\"\n >>> pipe.enable_attention_slicing()\n >>> image = pipe(prompt).images[0]\n ```\n \"\"\"\n self.set_attention_slice(slice_size)\n\n def disable_attention_slicing(self):\n r\"\"\"\n Disable sliced attention computation. If `enable_attention_slicing` was previously called, attention is\n computed in one step.\n \"\"\"\n # set slice_size = `None` to disable `attention slicing`\n self.enable_attention_slicing(None)\n\n def set_attention_slice(self, slice_size: Optional[int]):\n module_names, _ = self._get_signature_keys(self)\n modules = [getattr(self, n, None) for n in module_names]\n modules = [m for m in modules if isinstance(m, torch.nn.Module) and hasattr(m, \"set_attention_slice\")]\n\n for module in modules:\n module.set_attention_slice(slice_size)" }, { "identifier": "ImagePipelineOutput", "path": "llmga/diffusers/src/diffusers/pipelines/pipeline_utils.py", "snippet": "class ImagePipelineOutput(BaseOutput):\n \"\"\"\n Output class for image pipelines.\n\n Args:\n images (`List[PIL.Image.Image]` or `np.ndarray`)\n List of denoised PIL images of length `batch_size` or NumPy array of shape `(batch_size, height, width,\n num_channels)`.\n \"\"\"\n\n images: Union[List[PIL.Image.Image], np.ndarray]" } ]
from typing import List, Optional, Tuple, Union from ...utils.torch_utils import randn_tensor from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput import torch
18,721
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class DDPMPipeline(DiffusionPipeline): r""" Pipeline for image generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, num_inference_steps: int = 1000, output_type: Optional[str] = "pil", return_dict: bool = True,
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. class DDPMPipeline(DiffusionPipeline): r""" Pipeline for image generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet2DModel`]): A `UNet2DModel` to denoise the encoded image latents. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded image. Can be one of [`DDPMScheduler`], or [`DDIMScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, num_inference_steps: int = 1000, output_type: Optional[str] = "pil", return_dict: bool = True,
) -> Union[ImagePipelineOutput, Tuple]:
2
2023-11-27 18:46:55+00:00
24k
JiahuiLei/GART
solver.py
[ { "identifier": "prepare_real_seq", "path": "lib_data/get_data.py", "snippet": "def prepare_real_seq(\n seq_name,\n dataset_mode,\n split=\"train\",\n image_zoom_ratio=0.5,\n balance=False,\n ins_avt_wild_start_end_skip=None,\n):\n logging.info(\"Prepare real seq: {}\".format(seq_name))\n # * Get dataset\n if dataset_mode == \"ubcfashion\":\n dataset = UBCFasionDataset(\n data_root=\"./data/ubcfashion/\",\n video_list=[seq_name],\n image_zoom_ratio=image_zoom_ratio,\n start_end_skip=ins_avt_wild_start_end_skip,\n )\n elif dataset_mode == \"people_snapshot\":\n dataset = InstantAvatarDataset(\n noisy_flag=False,\n data_root=\"./data/people_snapshot/\",\n video_name=seq_name,\n split=split,\n image_zoom_ratio=image_zoom_ratio,\n )\n print(\"Load Instant Avatar processed PeopleSnapshot\")\n elif dataset_mode == \"zju\":\n dataset = ZJUDataset(\n data_root=\"./data/zju_mocap\",\n video_name=seq_name,\n split=split,\n image_zoom_ratio=image_zoom_ratio,\n )\n elif dataset_mode == \"instant_avatar_wild\":\n # assert image_zoom_ratio == 1.0, \"Check! in the wild data should use 1.0\"\n if image_zoom_ratio != 1.0:\n logging.warning(\n f\"Check! in the wild data should use 1.0, but got {image_zoom_ratio}\"\n )\n dataset = InstantAvatarWildDataset(\n data_root=\"./data/insav_wild\",\n video_name=seq_name,\n split=split,\n image_zoom_ratio=image_zoom_ratio,\n start_end_skip=ins_avt_wild_start_end_skip,\n )\n elif dataset_mode == \"dog_demo\":\n dataset = DogDemoDataset(data_root=\"./data/dog_data_official/\", video_name=seq_name)\n else:\n raise NotImplementedError(\"Unknown mode: {}\".format(dataset_mode))\n\n # prepare an optimizable data provider\n optimizable_data_provider = RealDataOptimizablePoseProviderPose(\n dataset,\n balance=balance,\n )\n return optimizable_data_provider, dataset" }, { "identifier": "DatabasePoseProvider", "path": "lib_data/data_provider.py", "snippet": "class DatabasePoseProvider(nn.Module):\n def __init__(\n self,\n pose_dirs: list,\n da_pose_prob=0.1,\n da_range=[0.0, np.pi / 4],\n device=torch.device(\"cuda\"),\n ) -> None:\n super().__init__()\n self.device = device\n self.base_R = matrix_to_axis_angle(\n torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, \"sxyz\"))[None]\n )[0]\n self.base_R = self.base_R.float().to(self.device)\n\n self.da_pose_prob = da_pose_prob\n self.da_range = da_range\n\n self.data = []\n\n # cache the poses\n for d in pose_dirs:\n print(f\"Caching {d} ...\")\n for subject in tqdm(os.listdir(d)):\n sub_dir = os.path.join(d, subject)\n if not os.path.isdir(sub_dir):\n continue\n npz_files = [f for f in os.listdir(sub_dir) if f.endswith(\".npz\")]\n npz_files.sort()\n for fn in npz_files:\n try:\n npz_fn = os.path.join(sub_dir, fn)\n pose_data = np.load(npz_fn)\n amass_len = pose_data[\"poses\"].shape[0]\n smplx_to_smpl = list(range(66)) + [72, 73, 74, 117, 118, 119]\n poses = pose_data[\"poses\"][:, smplx_to_smpl].reshape(\n amass_len, 24, 3\n )\n self.data.append(poses.astype(np.float16))\n except:\n # print(f\"Error in {npz_fn}, skip!\")\n pass\n self.data = np.concatenate(self.data, axis=0)\n print(\n f\"Database has poses {len(self.data)} with DA-pose prob {self.da_pose_prob} and range {self.da_range}\"\n )\n return\n\n def forward(self, N: int):\n pose, trans = self.sample_pose(N)\n return pose, trans\n\n def sample_pose(self, N: int):\n # da pose\n pose_list = []\n for i in range(N):\n seed = np.random.rand()\n if seed > self.da_pose_prob:\n # from database\n idx = np.random.randint(len(self.data))\n pose = torch.from_numpy(self.data[idx]).float().to(self.device)\n else:\n # da pose\n pose = torch.zeros(24, 3).to(self.device)\n da_theta = float(np.random.uniform(*self.da_range))\n pose[1, -1] = da_theta\n pose[2, -1] = -da_theta\n pose[0] = self.base_R\n pose_list.append(pose)\n pose = torch.stack(pose_list, dim=0)\n trans = torch.zeros(N, 3).to(self.device)\n return pose, trans" }, { "identifier": "get_template", "path": "lib_gart/templates.py", "snippet": "def get_template(\n mode, init_beta, cano_pose_type, voxel_deformer_res, template_model_path=None\n):\n if mode == \"human\":\n template = SMPLTemplate(\n smpl_model_path=template_model_path,\n init_beta=init_beta,\n cano_pose_type=cano_pose_type,\n voxel_deformer_res=voxel_deformer_res,\n )\n elif mode == \"dog\":\n template = SMALTemplate(\n init_beta=init_beta,\n cano_pose_type=cano_pose_type,\n voxel_deformer_res=voxel_deformer_res,\n )\n else:\n raise ValueError(f\"Unknown mode {mode}\")\n return template" }, { "identifier": "GaussianTemplateModel", "path": "lib_gart/model.py", "snippet": "class GaussianTemplateModel(nn.Module):\n def __init__(\n self,\n template,\n add_bones: AdditionalBones,\n ##################################\n # attr config\n w_correction_flag=True,\n # w_rest_dim=0, # additional skinnign weight\n f_localcode_dim=0,\n max_sph_order=0,\n w_memory_type=\"point\",\n ##################################\n max_scale=0.1, # use sigmoid activation, can't be too large\n min_scale=0.0,\n # geo init\n init_mode=\"on_mesh\",\n opacity_init_value=0.9, # the init value of opacity\n # on mesh init params\n onmesh_init_subdivide_num=0,\n onmesh_init_scale_factor=1.0,\n onmesh_init_thickness_factor=0.5,\n # near mesh init params\n scale_init_value=0.01, # the init value of scale\n nearmesh_init_num=10000,\n nearmesh_init_std=0.1,\n ##################################\n ) -> None:\n super().__init__()\n\n self.template = template\n self.num_bones = template.voxel_deformer.num_bones\n self.add_bones = add_bones\n self.num_add_bones = add_bones.num_bones\n\n self.max_scale = max_scale\n self.min_scale = min_scale\n self._init_act(self.max_scale, self.min_scale)\n self.opacity_init_logit = self.o_inv_act(opacity_init_value)\n\n # * init geometry\n if init_mode == \"on_mesh\":\n x, q, s, o = get_on_mesh_init_geo_values(\n template,\n on_mesh_subdivide=onmesh_init_subdivide_num,\n scale_init_factor=onmesh_init_scale_factor,\n thickness_init_factor=onmesh_init_thickness_factor,\n max_scale=max_scale,\n min_scale=min_scale,\n s_inv_act=self.s_inv_act,\n opacity_init_logit=self.opacity_init_logit,\n )\n elif init_mode == \"near_mesh\":\n self.scale_init_logit = self.s_inv_act(scale_init_value)\n x, q, s, o = get_near_mesh_init_geo_values(\n template,\n scale_base_logit=self.scale_init_logit,\n opacity_base_logit=self.opacity_init_logit,\n random_init_num=nearmesh_init_num,\n random_init_std=nearmesh_init_std,\n )\n elif init_mode == \"in_mesh\":\n self.scale_init_logit = self.s_inv_act(scale_init_value)\n x, q, s, o = get_inside_mesh_init_geo_values(\n template,\n scale_base_logit=self.scale_init_logit,\n opacity_base_logit=self.opacity_init_logit,\n random_init_num=nearmesh_init_num,\n )\n else:\n raise NotImplementedError(f\"Unknown init_mode {init_mode}\")\n self._xyz = nn.Parameter(x)\n self._rotation = nn.Parameter(q)\n self._scaling = nn.Parameter(s)\n self._opacity = nn.Parameter(o)\n\n # * init attributes\n self.w_memory_type = w_memory_type\n assert self.w_memory_type in [\"point\", \"voxel\"], f\"Unknown {w_memory_type}\"\n\n self.max_sph_order = max_sph_order\n self.w_dc_dim = self.template.dim if w_correction_flag else 0\n self.w_rest_dim = self.add_bones.num_bones\n self.f_localcode_dim = f_localcode_dim\n\n sph_rest_dim = 3 * (sph_order2nfeat(self.max_sph_order) - 1)\n self._features_dc = nn.Parameter(torch.zeros_like(self._xyz))\n self._features_rest = nn.Parameter(torch.zeros(self.N, sph_rest_dim))\n\n # * Different implementation of smoothness\n if self.w_memory_type == \"point\":\n self._w_correction_dc = nn.Parameter(torch.zeros(self.N, self.w_dc_dim))\n self._w_correction_rest = nn.Parameter(\n torch.ones(self.N, self.w_rest_dim) * 1e-4\n )\n elif self.w_memory_type == \"voxel\":\n self._w_correction_dc = nn.Parameter(torch.zeros(self.N, 0))\n self._w_correction_rest = nn.Parameter(torch.zeros(self.N, 0))\n if self.w_dc_dim > 0:\n self.template.voxel_deformer.enable_voxel_correction()\n if self.w_rest_dim > 0:\n self.template.voxel_deformer.enable_additional_correction(\n self.w_rest_dim\n )\n elif self.w_memory_type == \"hash\":\n raise NotImplementedError(\"TODO\")\n else:\n raise NotImplementedError(f\"Unknown {w_memory_type}\")\n\n self._features_localcode = nn.Parameter(\n torch.zeros(self.N, self.f_localcode_dim)\n )\n\n assert self.f_localcode_dim == 0, \"TODO, add local mlp ablation\"\n\n # * States\n # warning, our code use N, instead of (N,1) as in GS code\n self.register_buffer(\"xyz_gradient_accum\", torch.zeros(self.N).float())\n self.register_buffer(\"xyz_gradient_denom\", torch.zeros(self.N).long())\n self.register_buffer(\"max_radii2D\", torch.zeros(self.N).float())\n\n self.op_update_exclude = [\"add_bones\"]\n if self.w_memory_type != \"point\":\n self.op_update_exclude.extend([\"w_dc_vox\", \"w_rest_vox\"])\n # self.summary()\n return\n\n def summary(self):\n # logging.info number of parameters per pytorch sub module\n msg = \"\"\n for name, param in self.named_parameters():\n if name.startswith(\"add_bones\"):\n continue # compact print\n msg = msg + f\"[{name}:{param.numel()/1e3:.1f}K] \" \n # logging.info(f\"{name}, {param.numel()/1e6:.3f}M\")\n logging.info(msg)\n return\n\n def _init_act(self, max_s_value, min_s_value):\n def s_act(x):\n if isinstance(x, float):\n x = torch.tensor(x).squeeze()\n return min_s_value + torch.sigmoid(x) * (max_s_value - min_s_value)\n\n def s_inv_act(x):\n if isinstance(x, float):\n x = torch.tensor(x).squeeze()\n y = (x - min_s_value) / (max_s_value - min_s_value) + 1e-5\n y = torch.logit(y)\n assert not torch.isnan(\n y\n ).any(), f\"{x.min()}, {x.max()}, {y.min()}, {y.max()}\"\n return y\n\n def o_act(x):\n if isinstance(x, float):\n x = torch.tensor(x).squeeze()\n return torch.sigmoid(x)\n\n def o_inv_act(x):\n if isinstance(x, float):\n x = torch.tensor(x).squeeze()\n return torch.logit(x)\n\n self.s_act = s_act\n self.s_inv_act = s_inv_act\n self.o_act = o_act\n self.o_inv_act = o_inv_act\n\n return\n\n @property\n def N(self):\n return len(self._xyz)\n\n @property\n def get_x(self):\n return self._xyz\n\n @property\n def get_R(self):\n return quaternion_to_matrix(self._rotation)\n\n @property\n def get_o(self):\n return self.o_act(self._opacity)\n\n @property\n def get_s(self):\n return self.s_act(self._scaling)\n\n @property\n def get_c(self):\n return torch.cat([self._features_dc, self._features_rest], dim=-1)\n\n def cache_for_fast(self):\n _cached_W, _ = self.template.forward(None, self._xyz)\n self._cached_W = _cached_W.detach().clone()\n return\n\n def forward(\n self, theta, trans, additional_dict={}, active_sph_order=None, fast=False\n ):\n # * fast will use the cached per point attr, no query anymore\n # TODO: the additional dict contain info to do flexible skinning: it can contain the As directly for optimization, or it can contain t index to query some buffers to provide As, or it can contain t along with the input theta to query some MLP;\n\n # TODO: if use vol memory, every forward update self.xxx, and remove them from parameters, pretend that the attributes are per point, but actually they are queried every forward\n\n # theta: B,24,3; trans: B,3\n B = len(theta)\n if active_sph_order is None:\n active_sph_order = self.max_sph_order\n else:\n assert (\n active_sph_order <= self.max_sph_order\n ), \"active_sph_order should be smaller\"\n sph_dim = 3 * sph_order2nfeat(active_sph_order)\n\n xyz = self.get_x\n mu_can = xyz\n frame_can = self.get_R\n s = self.get_s\n o = self.get_o\n sph = self.get_c[:, :sph_dim]\n\n mu_can = mu_can[None].expand(B, -1, -1)\n frame_can = frame_can[None].expand(B, -1, -1, -1)\n\n if fast:\n # only forward skeleton, no query voxel\n _, A = self.template.forward(theta, None)\n W = self._cached_W[None].expand(B, -1, -1)\n else:\n W, A = self.template.forward(theta, mu_can)\n if self._w_correction_dc.shape[-1] > 0:\n W = W + self._w_correction_dc[None]\n T = torch.einsum(\"bnj, bjrc -> bnrc\", W[..., : self.num_bones], A)\n\n # * additional correction here\n if \"pose\" not in additional_dict.keys():\n # maybe later we want to viz the different pose effect in cano\n additional_dict[\"pose\"] = theta.reshape(B, -1)[:, 3:]\n add_A = self.add_bones(**additional_dict)\n if add_A is not None:\n if theta.ndim == 2:\n global_axis_angle = theta[:, :3]\n else:\n global_axis_angle = theta[:, 0]\n global_orient_action = self.template.get_rot_action(global_axis_angle) # B,4,4\n add_A = torch.einsum(\"bij, bnjk -> bnik\", global_orient_action, add_A)\n\n if self.w_memory_type == \"point\":\n assert self._w_correction_rest.shape[-1] > 0\n add_W = self._w_correction_rest[None].expand(B, -1, -1)\n elif self.w_memory_type == \"voxel\":\n add_W = W[..., self.num_bones :]\n\n add_T = torch.einsum(\"bnj, bjrc -> bnrc\", add_W, add_A)\n T = T + add_T # Linear\n additional_dict[\"As\"] = add_A\n\n R, t = T[:, :, :3, :3], T[:, :, :3, 3] # B,N,3,3; B,N,3\n\n mu = torch.einsum(\"bnij,bnj->bni\", R, mu_can) + t # B,N,3\n frame = torch.einsum(\"bnij,bnjk->bnik\", R, frame_can) # B,N,3,3\n\n s = s[None].expand(B, -1, -1) # B,N,1\n o = o[None].expand(B, -1, -1) # B,N,1\n sph = sph[:, :sph_dim][None].expand(B, -1, -1) # B,N,C\n\n mu = mu + trans[:, None, :]\n\n return mu, frame, s, o, sph, additional_dict\n\n def compute_reg(self, K):\n # !can cancel the knn, but the w reg is critical\n if K > 0:\n xyz = self._xyz\n # todo: this can be cached and updated every several steps!!\n dist_sq, nn_ind, _ = knn_points(xyz[None], xyz[None], K=K, return_nn=False)\n nn_ind = nn_ind.squeeze(0)\n # reg the std inside knn\n q = self._rotation[nn_ind, :] # N,K,4\n s = self.get_s[nn_ind, :] # N,K,3\n o = self.get_o[nn_ind, :] # N,K,1\n q_std = q.std(dim=1).mean()\n s_std = s.std(dim=1).mean()\n o_std = o.std(dim=1).mean()\n\n cd = self._features_dc[nn_ind, :] # N,K,3\n ch = self._features_rest[nn_ind, :] # N,K,C\n cd_std = cd.std(dim=1).mean()\n ch_std = ch.std(dim=1).mean()\n if ch.shape[-1] == 0:\n ch_std = torch.zeros_like(ch_std)\n\n w = self._w_correction_dc[nn_ind, :] # N,K,3\n w_rest = self._w_correction_rest[nn_ind, :] # N,K,C\n f = self._features_localcode[nn_ind, :] # N,K,C\n w_std = w.std(dim=1).mean()\n w_rest_std = w_rest.std(dim=1).mean()\n f_std = f.std(dim=1).mean()\n if w.shape[-1] == 0:\n w_std = torch.zeros_like(cd_std)\n if w_rest.shape[-1] == 0:\n w_rest_std = torch.zeros_like(cd_std)\n if f.shape[-1] == 0:\n f_std = torch.zeros_like(cd_std)\n else:\n dummy = torch.zeros(1).to(self._xyz).squeeze()\n q_std, s_std, o_std = dummy, dummy, dummy\n cd_std, ch_std = dummy, dummy\n w_std, w_rest_std, f_std = dummy, dummy, dummy\n dist_sq = dummy\n\n w_norm = self._w_correction_dc.norm(dim=-1).mean() # N\n w_rest_norm = self._w_correction_rest.norm(dim=-1).mean() # N\n\n if self.w_memory_type == \"voxel\":\n # update the w related std and norm\n w_std = self.template.voxel_deformer.get_tv(\"dc\")\n w_rest_std = self.template.voxel_deformer.get_tv(\"rest\")\n w_norm = self.template.voxel_deformer.get_mag(\"dc\")\n w_rest_norm = self.template.voxel_deformer.get_mag(\"rest\")\n\n max_s_square = torch.mean((self.get_s.max(dim=1).values) ** 2)\n\n return (\n q_std,\n s_std,\n o_std,\n cd_std,\n ch_std,\n w_std,\n w_rest_std,\n f_std,\n w_norm,\n w_rest_norm,\n dist_sq.mean(),\n max_s_square,\n )\n\n def get_optimizable_list(\n self,\n lr_p=0.00016,\n lr_q=0.001,\n lr_s=0.005,\n lr_o=0.05,\n lr_sph=0.0025,\n lr_sph_rest=None,\n lr_w=0.001,\n lr_w_rest=0.001,\n lr_f=0.0001,\n ):\n lr_sph_rest = lr_sph / 20 if lr_sph_rest is None else lr_sph_rest\n l = [\n {\"params\": [self._xyz], \"lr\": lr_p, \"name\": \"xyz\"},\n {\"params\": [self._opacity], \"lr\": lr_o, \"name\": \"opacity\"},\n {\"params\": [self._scaling], \"lr\": lr_s, \"name\": \"scaling\"},\n {\"params\": [self._rotation], \"lr\": lr_q, \"name\": \"rotation\"},\n {\"params\": [self._features_dc], \"lr\": lr_sph, \"name\": \"f_dc\"},\n {\"params\": [self._features_rest], \"lr\": lr_sph_rest, \"name\": \"f_rest\"},\n {\"params\": [self._w_correction_dc], \"lr\": lr_w, \"name\": \"w_dc\"},\n {\"params\": [self._w_correction_rest], \"lr\": lr_w_rest, \"name\": \"w_rest\"},\n {\"params\": [self._features_localcode], \"lr\": lr_f, \"name\": \"f_localcode\"},\n ]\n if self.w_memory_type == \"voxel\":\n if self.w_dc_dim > 0:\n l.append(\n {\n \"params\": [self.template.voxel_deformer.voxel_w_correction],\n \"lr\": lr_w,\n \"name\": \"w_dc_vox\",\n }\n )\n if self.w_rest_dim > 0:\n l.append(\n {\n \"params\": [self.template.voxel_deformer.additional_correction],\n \"lr\": lr_w_rest,\n \"name\": \"w_rest_vox\",\n }\n )\n return l\n\n # * Gaussian Control\n def record_xyz_grad_radii(self, viewspace_point_tensor, radii, update_filter):\n # Record the gradient norm, invariant across different poses\n assert len(viewspace_point_tensor) == self.N\n self.xyz_gradient_accum[update_filter] += torch.norm(\n viewspace_point_tensor.grad[update_filter, :2], dim=-1, keepdim=False\n )\n self.xyz_gradient_denom[update_filter] += 1\n self.max_radii2D[update_filter] = torch.max(\n self.max_radii2D[update_filter], radii[update_filter]\n )\n return\n\n def _densification_postprocess(\n self,\n optimizer,\n new_xyz,\n new_r,\n new_s,\n new_o,\n new_sph_dc,\n new_sph_rest,\n new_w_dc,\n new_w_rest,\n new_localcode,\n ):\n d = {\n \"xyz\": new_xyz,\n \"f_dc\": new_sph_dc,\n \"f_rest\": new_sph_rest,\n \"opacity\": new_o,\n \"scaling\": new_s,\n \"rotation\": new_r,\n \"w_dc\": new_w_dc,\n \"w_rest\": new_w_rest,\n \"f_localcode\": new_localcode,\n }\n d = {k: v for k, v in d.items() if v is not None}\n\n # First cat to optimizer and then return to self\n optimizable_tensors = cat_tensors_to_optimizer(optimizer, d)\n\n self._xyz = optimizable_tensors[\"xyz\"]\n self._opacity = optimizable_tensors[\"opacity\"]\n self._scaling = optimizable_tensors[\"scaling\"]\n self._rotation = optimizable_tensors[\"rotation\"]\n self._features_dc = optimizable_tensors[\"f_dc\"]\n self._features_rest = optimizable_tensors[\"f_rest\"]\n self._w_correction_dc = optimizable_tensors[\"w_dc\"]\n self._w_correction_rest = optimizable_tensors[\"w_rest\"]\n self._features_localcode = optimizable_tensors[\"f_localcode\"]\n\n self.xyz_gradient_accum = torch.zeros(self._xyz.shape[0], device=\"cuda\")\n self.xyz_gradient_denom = torch.zeros(self._xyz.shape[0], device=\"cuda\")\n self.max_radii2D = torch.cat(\n [self.max_radii2D, torch.zeros_like(new_xyz[:, 0])], dim=0\n )\n return\n\n def _densify_and_clone(self, optimizer, grad_norm, grad_threshold, scale_th):\n # Extract points that satisfy the gradient condition\n # padding for enabling both call of clone and split\n padded_grad = torch.zeros((self.N), device=\"cuda\")\n padded_grad[: grad_norm.shape[0]] = grad_norm.squeeze()\n selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)\n selected_pts_mask = torch.logical_and(\n selected_pts_mask,\n torch.max(self.get_s, dim=1).values <= scale_th,\n )\n if selected_pts_mask.sum() == 0:\n return 0\n\n new_xyz = self._xyz[selected_pts_mask]\n new_rotation = self._rotation[selected_pts_mask]\n new_scaling = self._scaling[selected_pts_mask]\n new_opacities = self._opacity[selected_pts_mask]\n new_features_dc = self._features_dc[selected_pts_mask]\n new_features_rest = self._features_rest[selected_pts_mask]\n new_w_dc = self._w_correction_dc[selected_pts_mask]\n new_w_rest = self._w_correction_rest[selected_pts_mask]\n new_localcode = self._features_localcode[selected_pts_mask]\n\n self._densification_postprocess(\n optimizer,\n new_xyz=new_xyz,\n new_r=new_rotation,\n new_s=new_scaling,\n new_o=new_opacities,\n new_sph_dc=new_features_dc,\n new_sph_rest=new_features_rest,\n new_w_dc=new_w_dc,\n new_w_rest=new_w_rest,\n new_localcode=new_localcode,\n )\n\n return len(new_xyz)\n\n def _densify_and_split(\n self,\n optimizer,\n grad_norm,\n grad_threshold,\n scale_th,\n N=2,\n ):\n # Extract points that satisfy the gradient condition\n _scaling = self.get_s\n # padding for enabling both call of clone and split\n padded_grad = torch.zeros((self.N), device=\"cuda\")\n padded_grad[: grad_norm.shape[0]] = grad_norm.squeeze()\n selected_pts_mask = torch.where(padded_grad >= grad_threshold, True, False)\n selected_pts_mask = torch.logical_and(\n selected_pts_mask,\n torch.max(_scaling, dim=1).values > scale_th,\n )\n if selected_pts_mask.sum() == 0:\n return 0\n\n stds = _scaling[selected_pts_mask].repeat(N, 1)\n means = torch.zeros((stds.size(0), 3), device=\"cuda\")\n samples = torch.normal(mean=means, std=stds)\n rots = quaternion_to_matrix(self._rotation[selected_pts_mask]).repeat(N, 1, 1)\n new_xyz = torch.bmm(rots, samples.unsqueeze(-1)).squeeze(-1) + self._xyz[\n selected_pts_mask\n ].repeat(N, 1)\n new_scaling = _scaling[selected_pts_mask].repeat(N, 1) / (0.8 * N)\n new_scaling = torch.clamp(new_scaling, max=self.max_scale, min=self.min_scale)\n new_scaling = self.s_inv_act(new_scaling)\n new_rotation = self._rotation[selected_pts_mask].repeat(N, 1)\n new_features_dc = self._features_dc[selected_pts_mask].repeat(N, 1)\n new_features_rest = self._features_rest[selected_pts_mask].repeat(N, 1)\n new_opacities = self._opacity[selected_pts_mask].repeat(N, 1)\n new_w_dc = self._w_correction_dc[selected_pts_mask].repeat(N, 1)\n new_w_rest = self._w_correction_rest[selected_pts_mask].repeat(N, 1)\n new_localcode = self._features_localcode[selected_pts_mask].repeat(N, 1)\n\n self._densification_postprocess(\n optimizer,\n new_xyz=new_xyz,\n new_r=new_rotation,\n new_s=new_scaling,\n new_o=new_opacities,\n new_sph_dc=new_features_dc,\n new_sph_rest=new_features_rest,\n new_w_dc=new_w_dc,\n new_w_rest=new_w_rest,\n new_localcode=new_localcode,\n )\n\n prune_filter = torch.cat(\n (\n selected_pts_mask,\n torch.zeros(N * selected_pts_mask.sum(), device=\"cuda\", dtype=bool),\n )\n )\n self._prune_points(optimizer, prune_filter)\n return len(new_xyz)\n\n def densify(self, optimizer, max_grad, percent_dense, extent, verbose=True):\n grads = self.xyz_gradient_accum / self.xyz_gradient_denom\n grads[grads.isnan()] = 0.0\n\n # n_clone = self._densify_and_clone(optimizer, grads, max_grad)\n n_clone = self._densify_and_clone(\n optimizer, grads, max_grad, percent_dense * extent\n )\n n_split = self._densify_and_split(\n optimizer, grads, max_grad, percent_dense * extent, N=2\n )\n\n if verbose:\n logging.info(f\"Densify: Clone[+] {n_clone}, Split[+] {n_split}\")\n # logging.info(f\"Densify: Clone[+] {n_clone}\")\n # torch.cuda.empty_cache()\n return\n\n def random_grow(self, optimizer, num_factor=0.05, std=0.1, init_opa_value=0.1):\n # * New operation, randomly add largely disturbed points to the geometry\n ind = torch.randperm(self.N)[: int(self.N * num_factor)]\n selected_pts_mask = torch.zeros(self.N, dtype=bool, device=\"cuda\")\n selected_pts_mask[ind] = True\n\n new_xyz = self._xyz[selected_pts_mask]\n noise = torch.randn_like(new_xyz) * std\n new_xyz = new_xyz + noise\n new_features_dc = self._features_dc[selected_pts_mask]\n new_features_rest = self._features_rest[selected_pts_mask]\n\n new_opacities = torch.ones_like(self._opacity[selected_pts_mask])\n new_opacities = new_opacities * self.o_inv_act(init_opa_value)\n\n new_scaling = self._scaling[selected_pts_mask]\n new_rotation = self._rotation[selected_pts_mask]\n\n new_w_dc = self._w_correction_dc[selected_pts_mask]\n new_w_rest = self._w_correction_rest[selected_pts_mask]\n new_localcode = self._features_localcode[selected_pts_mask]\n\n self._densification_postprocess(\n optimizer,\n new_xyz=new_xyz,\n new_r=new_rotation,\n new_s=new_scaling,\n new_o=new_opacities,\n new_sph_dc=new_features_dc,\n new_sph_rest=new_features_rest,\n new_w_dc=new_w_dc,\n new_w_rest=new_w_rest,\n new_localcode=new_localcode,\n )\n logging.info(f\"Random grow: {len(new_xyz)}\")\n return len(new_xyz)\n\n def prune_points(self, optimizer, min_opacity, max_screen_size, verbose=True):\n opacity = self.o_act(self._opacity)\n prune_mask = (opacity < min_opacity).squeeze()\n if max_screen_size: # if a point is too large\n big_points_vs = self.max_radii2D > max_screen_size\n prune_mask = torch.logical_or(prune_mask, big_points_vs)\n # * reset the maxRadii\n self.max_radii2D = torch.zeros_like(self.max_radii2D)\n self._prune_points(optimizer, prune_mask)\n if verbose:\n logging.info(f\"Prune: {prune_mask.sum()}\")\n\n def _prune_points(self, optimizer, mask):\n valid_points_mask = ~mask\n optimizable_tensors = prune_optimizer(\n optimizer,\n valid_points_mask,\n exclude_names=self.op_update_exclude,\n )\n\n self._xyz = optimizable_tensors[\"xyz\"]\n if getattr(self, \"color_memory\", None) is None:\n self._features_dc = optimizable_tensors[\"f_dc\"]\n self._features_rest = optimizable_tensors[\"f_rest\"]\n self._opacity = optimizable_tensors[\"opacity\"]\n self._scaling = optimizable_tensors[\"scaling\"]\n self._rotation = optimizable_tensors[\"rotation\"]\n self._w_correction_dc = optimizable_tensors[\"w_dc\"]\n self._w_correction_rest = optimizable_tensors[\"w_rest\"]\n self._features_localcode = optimizable_tensors[\"f_localcode\"]\n\n self.xyz_gradient_accum = self.xyz_gradient_accum[valid_points_mask]\n self.xyz_gradient_denom = self.xyz_gradient_denom[valid_points_mask]\n self.max_radii2D = self.max_radii2D[valid_points_mask]\n # torch.cuda.empty_cache()\n return\n\n @torch.no_grad()\n def regaussian(self, optimizer, max_scale=0.03):\n # raise NotImplementedError(\"TODO, like split\")\n # * New operation, manually split the large gaussians with smaller ones to approximate\n # * Now, try bi-split\n\n # Extract points that satisfy the gradient condition\n _scaling = self.get_s\n selected_pts_mask = torch.max(_scaling, dim=1).values > max_scale\n\n step = 0\n before_num = self.N\n while selected_pts_mask.any():\n # This can be done more than 3 times, becuase there may be huge gaussians, which should be devided several times\n fg_xyz = self._xyz[selected_pts_mask]\n fg_scale = _scaling[selected_pts_mask]\n fg_frame = quaternion_to_matrix(self._rotation[selected_pts_mask])\n # each column is the direction of axis in global frame\n axis_ind = torch.argmax(fg_scale, dim=1)\n axis_scale = fg_scale.max(dim=1).values\n # select column\n axis_dir = torch.gather(\n fg_frame, dim=2, index=axis_ind[:, None, None].expand(-1, 3, -1)\n ).squeeze(\n -1\n ) # N,3\n new_x1 = fg_xyz + axis_dir.squeeze() * axis_scale[:, None] / 2.0\n new_x2 = fg_xyz - axis_dir.squeeze() * axis_scale[:, None] / 2.0\n # Repeat will change [1,2,3...] to [1,2,3..., 1,2,3...]\n new_xyz = torch.cat([new_x1, new_x2], dim=0).reshape(-1, 3)\n new_scaling = _scaling[selected_pts_mask]\n new_scaling = torch.scatter(\n new_scaling,\n dim=1,\n index=axis_ind[:, None],\n src=axis_scale[:, None] / 2.0,\n ).repeat(2, 1)\n new_scaling = torch.clamp(\n new_scaling, max=self.max_scale, min=self.min_scale\n )\n new_scaling = self.s_inv_act(new_scaling)\n new_rotation = self._rotation[selected_pts_mask].repeat(2, 1)\n new_features_dc = self._features_dc[selected_pts_mask].repeat(2, 1)\n new_features_rest = self._features_rest[selected_pts_mask].repeat(2, 1)\n new_opacities = self._opacity[selected_pts_mask].repeat(2, 1)\n new_w_dc = self._w_correction_dc[selected_pts_mask].repeat(2, 1)\n new_w_rest = self._w_correction_rest[selected_pts_mask].repeat(2, 1)\n new_localcode = self._features_localcode[selected_pts_mask].repeat(2, 1)\n\n self._densification_postprocess(\n optimizer,\n new_xyz=new_xyz.float(),\n new_r=new_rotation.float(),\n new_s=new_scaling.float(),\n new_o=new_opacities.float(),\n new_sph_dc=new_features_dc.float(),\n new_sph_rest=new_features_rest.float(),\n new_w_dc=new_w_dc.float(),\n new_w_rest=new_w_rest.float(),\n new_localcode=new_localcode.float(),\n )\n\n prune_filter = torch.cat(\n (\n selected_pts_mask,\n torch.zeros(2 * selected_pts_mask.sum(), device=\"cuda\", dtype=bool),\n )\n )\n self._prune_points(optimizer, prune_filter)\n\n step += 1\n logging.info(\n f\"Regaussian-[{step}], {selected_pts_mask.sum()} ({selected_pts_mask.float().mean()*100}% pts-scale>{max_scale})\"\n )\n\n _scaling = self.get_s\n selected_pts_mask = torch.max(_scaling, dim=1).values > max_scale\n logging.info(f\"Re-gaussian: {before_num} -> {self.N}\")\n return\n\n def reset_opacity(self, optimizer, value=0.01, verbose=True):\n opacities_new = self.o_inv_act(\n torch.min(self.o_act(self._opacity), torch.ones_like(self._opacity) * value)\n )\n optimizable_tensors = replace_tensor_to_optimizer(\n optimizer, opacities_new, \"opacity\"\n )\n if verbose:\n logging.info(f\"Reset opacity to {value}\")\n self._opacity = optimizable_tensors[\"opacity\"]\n\n def load(self, ckpt):\n # because N changed, have to re-init the buffers\n self._xyz = nn.Parameter(torch.as_tensor(ckpt[\"_xyz\"], dtype=torch.float32))\n\n self._features_dc = nn.Parameter(\n torch.as_tensor(ckpt[\"_features_dc\"], dtype=torch.float32)\n )\n self._features_rest = nn.Parameter(\n torch.as_tensor(ckpt[\"_features_rest\"], dtype=torch.float32)\n )\n self._opacity = nn.Parameter(\n torch.as_tensor(ckpt[\"_opacity\"], dtype=torch.float32)\n )\n self._scaling = nn.Parameter(\n torch.as_tensor(ckpt[\"_scaling\"], dtype=torch.float32)\n )\n self._rotation = nn.Parameter(\n torch.as_tensor(ckpt[\"_rotation\"], dtype=torch.float32)\n )\n self._w_correction_dc = nn.Parameter(\n torch.as_tensor(ckpt[\"_w_correction_dc\"], dtype=torch.float32)\n )\n self._w_correction_rest = nn.Parameter(\n torch.as_tensor(ckpt[\"_w_correction_rest\"], dtype=torch.float32)\n )\n self._features_localcode = nn.Parameter(\n torch.as_tensor(ckpt[\"_features_localcode\"], dtype=torch.float32)\n )\n self.xyz_gradient_accum = torch.as_tensor(\n ckpt[\"xyz_gradient_accum\"], dtype=torch.float32\n )\n self.xyz_gradient_denom = torch.as_tensor(\n ckpt[\"xyz_gradient_denom\"], dtype=torch.int64\n )\n self.max_radii2D = torch.as_tensor(ckpt[\"max_radii2D\"], dtype=torch.float32)\n\n # * add bones may have different total_t\n if \"add_bones.dt_list\" in ckpt.keys():\n self.add_bones.total_t = ckpt[\"add_bones.dt_list\"].shape[0]\n self.add_bones.dt_list = nn.Parameter(\n torch.as_tensor(ckpt[\"add_bones.dt_list\"], dtype=torch.float32)\n )\n self.add_bones.dr_list = nn.Parameter(\n torch.as_tensor(ckpt[\"add_bones.dr_list\"], dtype=torch.float32)\n )\n # load others\n self.load_state_dict(ckpt, strict=True)\n # this is critical, reinit the funcs\n self._init_act(self.max_scale, self.min_scale)\n return" }, { "identifier": "AdditionalBones", "path": "lib_gart/model.py", "snippet": "class AdditionalBones(nn.Module):\n def __init__(\n self, # additional bones\n num_bones: int = 0,\n total_t: int = 0, # any usage of time should use this!\n mode=\"pose-mlp\",\n # pose-mlp\n pose_dim=23 * 3,\n mlp_hidden_dims=[256, 256, 256, 256],\n mlp_act=nn.LeakyReLU,\n # pose+t-mlp\n ):\n super().__init__()\n self.num_bones = num_bones\n if self.num_bones == 0:\n return\n self.mode = mode\n assert self.mode in [\"pose-mlp\", \"pose+t-mlp\", \"delta-list\", \"list\"]\n self.total_t = total_t\n\n if self.mode == \"pose-mlp\":\n self.pose_dim = pose_dim\n self.mlp_layers = nn.ModuleList()\n c_in = self.pose_dim\n for c_out in mlp_hidden_dims:\n self.mlp_layers.append(nn.Sequential(nn.Linear(c_in, c_out), mlp_act()))\n c_in = c_out\n self.mlp_output_head = nn.Linear(c_in, 7 * self.num_bones, bias=False)\n with torch.no_grad():\n self.mlp_output_head.weight.data = (\n torch.randn_like(self.mlp_output_head.weight.data) * 1e-3\n )\n elif self.mode == \"delta-list\":\n self.dr_list = nn.Parameter(torch.zeros(self.total_t, num_bones, 3))\n self.dt_list = nn.Parameter(torch.zeros(self.total_t, num_bones, 3))\n else:\n raise NotImplementedError()\n\n return\n\n def forward(self, pose=None, t=None, As=None):\n if self.num_bones == 0:\n # * No additional bones\n return None\n if As is not None:\n # * Directly return if As already provided\n return As\n if self.mode == \"pose-mlp\":\n assert pose is not None\n assert pose.ndim == 2 and pose.shape[1] == self.pose_dim\n B = len(pose)\n x = pose\n for layer in self.mlp_layers:\n x = layer(x)\n x = self.mlp_output_head(x).reshape(B, -1, 7)\n q, t = x[:, :, :4], x[:, :, 4:]\n q[..., 0] = q[..., 0] + 1.0\n q = F.normalize(q, dim=-1)\n R = quaternion_to_matrix(q)\n Rt = torch.cat([R, t[:, :, :, None]], dim=-1)\n bottom = torch.zeros_like(Rt[:, :, 0:1])\n bottom[:, :, :, -1] = 1.0\n As = torch.cat([Rt, bottom], dim=2)\n return As\n elif self.mode == \"delta-list\":\n As = self._roll_out_continuous_T()\n if t is None:\n B = len(pose)\n # # ! If no time is set, now return eye(4)\n # ret = (\n # torch.eye(4)\n # .to(As.device)[None, None]\n # .repeat(B, self.num_bones, 1, 1)\n # )\n # ! If no time is set, now return first frame\n ret = As[0][None].repeat(B, 1, 1, 1)\n else:\n if isinstance(t, int):\n t = torch.tensor([t]).to(As.device)\n ret = As[t]\n return ret\n else:\n raise NotImplementedError()\n\n return # As in canonical frame\n\n def _roll_out_continuous_T(self):\n # ! this assumes continuous frames, single frame!\n R = axis_angle_to_matrix(self.dr_list)\n dT = (\n torch.eye(4).to(R.device)[None, None].repeat(self.total_t, R.shape[1], 1, 1)\n )\n dT[:, :, :3, :3] = dT[:, :, :3, :3] * 0 + R\n dT[:, :, :3, 3] = dT[:, :, :3, 3] * 0 + self.dt_list\n T = [dT[0]]\n for i in range(1, self.total_t):\n T.append(torch.einsum(\"nij, njk->nik\", T[-1], dT[i]))\n T = torch.stack(T, dim=0)\n return T" }, { "identifier": "render_cam_pcl", "path": "lib_render/gauspl_renderer.py", "snippet": "def render_cam_pcl(\n xyz,\n frame,\n scale,\n opacity,\n color_feat,\n H,\n W,\n CAM_K,\n verbose=False,\n active_sph_order=0,\n bg_color=[1.0, 1.0, 1.0],\n):\n # ! Camera is at origin, every input is in camera coordinate space\n\n S = torch.zeros_like(frame)\n S[:, 0, 0] = scale[:, 0]\n S[:, 1, 1] = scale[:, 1]\n S[:, 2, 2] = scale[:, 2]\n actual_covariance = frame @ (S**2) @ frame.permute(0, 2, 1)\n\n # Create zero tensor. We will use it to make pytorch return gradients of the 2D (screen-space) means\n device = xyz.device\n screenspace_points = (\n torch.zeros_like(xyz, dtype=xyz.dtype, requires_grad=True, device=xyz.device) + 0\n )\n # screenspace_points.retain_grad()\n try:\n screenspace_points.retain_grad()\n except:\n pass\n\n # * Specially handle the non-centered camera, using first padding and finally crop\n if abs(H // 2 - CAM_K[1, 2]) > 1.0 or abs(W // 2 - CAM_K[0, 2]) > 1.0:\n center_handling_flag = True\n left_w, right_w = CAM_K[0, 2], W - CAM_K[0, 2]\n top_h, bottom_h = CAM_K[1, 2], H - CAM_K[1, 2]\n new_W = int(2 * max(left_w, right_w))\n new_H = int(2 * max(top_h, bottom_h))\n else:\n center_handling_flag = False\n new_W, new_H = W, H\n\n # Set up rasterization configuration\n FoVx = focal2fov(CAM_K[0, 0], new_W)\n FoVy = focal2fov(CAM_K[1, 1], new_H)\n tanfovx = math.tan(FoVx * 0.5)\n tanfovy = math.tan(FoVy * 0.5)\n\n # TODO: Check dynamic gaussian repos and original gaussian repo, they use projection matrix to handle non-centered K, not using this stupid padding like me\n viewmatrix = torch.from_numpy(getWorld2View2(np.eye(3), np.zeros(3)).transpose(0, 1)).to(device)\n projection_matrix = (\n getProjectionMatrix(znear=0.01, zfar=1.0, fovX=FoVx, fovY=FoVy).transpose(0, 1).to(device)\n )\n full_proj_transform = (viewmatrix.unsqueeze(0).bmm(projection_matrix.unsqueeze(0))).squeeze(0)\n camera_center = viewmatrix.inverse()[3, :3]\n\n raster_settings = GaussianRasterizationSettings(\n image_height=new_H,\n image_width=new_W,\n tanfovx=tanfovx,\n tanfovy=tanfovy,\n bg=torch.tensor(bg_color, dtype=torch.float32, device=device),\n scale_modifier=1.0,\n viewmatrix=viewmatrix,\n projmatrix=full_proj_transform,\n sh_degree=0, # ! use pre-compute color!\n campos=camera_center,\n prefiltered=False,\n debug=False,\n )\n rasterizer = GaussianRasterizer(raster_settings=raster_settings)\n\n means3D = xyz\n means2D = screenspace_points\n # opacity = torch.ones_like(means3D[:, 0]) * sigma\n\n # If precomputed 3d covariance is provided, use it. If not, then it will be computed from\n # scaling / rotation by the rasterizer.\n scales = None\n rotations = None\n # JH\n cov3D_precomp = strip_lowerdiag(actual_covariance)\n\n # If precomputed colors are provided, use them. Otherwise, if it is desired to precompute colors\n # from SHs in Python, do it. If not, then SH -> RGB conversion will be done by rasterizer.\n # xyz are in camera frame, so the dir in camera frame is just their normalized direction\n dir_cam = F.normalize(xyz, dim=-1)\n # P_w = Frame @ P_local\n dir_local = torch.einsum(\"nji,nj->ni\", frame, dir_cam) # note the transpose\n dir_local = F.normalize(\n dir_local, dim=-1\n ) # If frame is not SO(3) but Affinity, have to normalize\n N = len(color_feat)\n shs_view = color_feat.reshape(N, -1, 3) # N, Deg, Channels\n sh2rgb = eval_sh(active_sph_order, shs_view.permute(0, 2, 1), dir_local)\n colors_precomp = torch.clamp_min(sh2rgb + 0.5, 0.0)\n # colors_precomp = color_feat\n\n # Rasterize visible Gaussians to image, obtain their radii (on screen).\n\n start_time = time.time()\n ret = rasterizer(\n means3D=means3D.float(),\n means2D=means2D.float(),\n shs=None,\n colors_precomp=colors_precomp.float(),\n opacities=opacity.float(),\n scales=scales,\n rotations=rotations,\n cov3D_precomp=cov3D_precomp.float(),\n )\n if len(ret) == 2:\n rendered_image, radii = ret\n depth, alpha = None, None\n elif len(ret) == 4:\n rendered_image, radii, depth, alpha = ret\n else:\n raise ValueError(f\"Unexpected return value from rasterizer with len={len(ret)}\")\n if verbose:\n print(\n f\"render time: {(time.time() - start_time)*1000:.3f}ms\",\n )\n ret = {\n \"rgb\": rendered_image,\n \"dep\": depth,\n \"alpha\": alpha,\n \"viewspace_points\": screenspace_points,\n \"visibility_filter\": radii > 0,\n \"radii\": radii,\n }\n if center_handling_flag:\n for k in [\"rgb\", \"dep\", \"alpha\"]:\n if ret[k] is None:\n continue\n if left_w > right_w:\n ret[k] = ret[k][:, :, :W]\n else:\n ret[k] = ret[k][:, :, -W:]\n if top_h > bottom_h:\n ret[k] = ret[k][:, :H, :]\n else:\n ret[k] = ret[k][:, -H:, :]\n return ret" }, { "identifier": "transform_mu_frame", "path": "lib_gart/model_utils.py", "snippet": "def transform_mu_frame(mu, frame, T):\n if len(mu) != len(T):\n assert len(mu) == 1 and len(frame) == 1\n mu = mu.expand(len(T), -1, -1)\n frame = frame.expand(len(T), -1, -1, -1)\n R, t = T[:, :3, :3], T[:, :3, 3]\n new_frame = torch.einsum(\"bij, bnjk->bnik\", R, frame)\n new_mu = torch.einsum(\"bij, bnj->bni\", R, mu) + t[:, None]\n return new_mu, new_frame" }, { "identifier": "viz_render", "path": "utils/viz.py", "snippet": "def viz_render(gt_rgb, gt_mask, pred_pkg, save_path=None):\n pred_rgb = pred_pkg[\"rgb\"].permute(1, 2, 0)\n pred_mask = pred_pkg[\"alpha\"].squeeze(0)\n pred_depth = pred_pkg[\"dep\"].squeeze(0)\n fig = plt.figure(figsize=(20, 5))\n plt.subplot(1, 5, 1)\n plt.imshow(torch.clamp(gt_rgb, 0.0, 1.0).detach().cpu().numpy())\n plt.title(\"GT\"), plt.axis(\"off\")\n plt.subplot(1, 5, 2)\n plt.imshow(torch.clamp(pred_rgb, 0.0, 1.0).detach().cpu().numpy())\n plt.title(\"Pred view\"), plt.axis(\"off\")\n plt.subplot(1, 5, 3)\n error = torch.clamp(abs(pred_rgb - gt_rgb), 0.0, 1.0).detach().cpu().numpy().max(axis=-1)\n cmap = plt.imshow(error)\n plt.title(\"Render Error (max in rgb)\"), plt.axis(\"off\")\n plt.colorbar(cmap, shrink=0.8)\n\n plt.subplot(1, 5, 4)\n error = torch.clamp(pred_mask - gt_mask, -1.0, 1.0).detach().cpu().numpy()\n cmap = plt.imshow(error)\n plt.title(\"(Pr - GT) Mask Error\"), plt.axis(\"off\")\n plt.colorbar(cmap, shrink=0.8)\n \n plt.subplot(1, 5, 5)\n depth = pred_depth.detach().cpu().numpy()\n cmap = plt.imshow(depth)\n plt.title(\"Pred Depth\"), plt.axis(\"off\")\n plt.colorbar(cmap, shrink=0.8)\n\n plt.tight_layout()\n fig.canvas.draw()\n fig_np = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)\n fig_np = fig_np.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n if save_path is not None:\n plt.savefig(save_path)\n plt.close(fig)\n return fig_np" }, { "identifier": "sample_camera", "path": "lib_guidance/camera_sampling.py", "snippet": "def sample_camera(\n global_step=1,\n n_view=4,\n real_batch_size=1,\n random_azimuth_range=[-180.0, 180.0],\n random_elevation_range=[0.0, 30.0],\n eval_elevation_deg=15,\n camera_distance_range=[0.8, 1.0], # relative\n fovy_range=[15, 60],\n zoom_range=[1.0, 1.0],\n progressive_until=0,\n relative_radius=True,\n):\n # camera_perturb = 0.0\n # center_perturb = 0.0\n # up_perturb: 0.0\n\n # ! from uncond.py\n # ThreeStudio has progressive increase of camera poses, from eval to random\n r = min(1.0, global_step / (progressive_until + 1))\n elevation_range = [\n (1 - r) * eval_elevation_deg + r * random_elevation_range[0],\n (1 - r) * eval_elevation_deg + r * random_elevation_range[1],\n ]\n azimuth_range = [\n (1 - r) * 0.0 + r * random_azimuth_range[0],\n (1 - r) * 0.0 + r * random_azimuth_range[1],\n ]\n\n # sample elevation angles\n if random.random() < 0.5:\n # sample elevation angles uniformly with a probability 0.5 (biased towards poles)\n elevation_deg = (\n torch.rand(real_batch_size) * (elevation_range[1] - elevation_range[0])\n + elevation_range[0]\n ).repeat_interleave(n_view, dim=0)\n elevation = elevation_deg * math.pi / 180\n else:\n # otherwise sample uniformly on sphere\n elevation_range_percent = [\n (elevation_range[0] + 90.0) / 180.0,\n (elevation_range[1] + 90.0) / 180.0,\n ]\n # inverse transform sampling\n elevation = torch.asin(\n 2\n * (\n torch.rand(real_batch_size)\n * (elevation_range_percent[1] - elevation_range_percent[0])\n + elevation_range_percent[0]\n )\n - 1.0\n ).repeat_interleave(n_view, dim=0)\n elevation_deg = elevation / math.pi * 180.0\n\n # sample azimuth angles from a uniform distribution bounded by azimuth_range\n # ensures sampled azimuth angles in a batch cover the whole range\n azimuth_deg = (\n torch.rand(real_batch_size).reshape(-1, 1) + torch.arange(n_view).reshape(1, -1)\n ).reshape(-1) / n_view * (azimuth_range[1] - azimuth_range[0]) + azimuth_range[0]\n azimuth = azimuth_deg * math.pi / 180\n\n ######## Different from original ########\n # sample fovs from a uniform distribution bounded by fov_range\n fovy_deg = (\n torch.rand(real_batch_size) * (fovy_range[1] - fovy_range[0]) + fovy_range[0]\n ).repeat_interleave(n_view, dim=0)\n fovy = fovy_deg * math.pi / 180\n\n # sample distances from a uniform distribution bounded by distance_range\n camera_distances = (\n torch.rand(real_batch_size) * (camera_distance_range[1] - camera_distance_range[0])\n + camera_distance_range[0]\n ).repeat_interleave(n_view, dim=0)\n if relative_radius:\n scale = 1 / torch.tan(0.5 * fovy)\n camera_distances = scale * camera_distances\n\n # zoom in by decreasing fov after camera distance is fixed\n zoom = (\n torch.rand(real_batch_size) * (zoom_range[1] - zoom_range[0]) + zoom_range[0]\n ).repeat_interleave(n_view, dim=0)\n fovy = fovy * zoom\n fovy_deg = fovy_deg * zoom\n ###########################################\n\n # convert spherical coordinates to cartesian coordinates\n # right hand coordinate system, x back, y right, z up\n # elevation in (-90, 90), azimuth from +x to +y in (-180, 180)\n camera_positions = torch.stack(\n [\n camera_distances * torch.cos(elevation) * torch.cos(azimuth),\n camera_distances * torch.cos(elevation) * torch.sin(azimuth),\n camera_distances * torch.sin(elevation),\n ],\n dim=-1,\n )\n\n azimuth, elevation\n # build opencv camera\n z = -torch.stack(\n [\n torch.cos(elevation) * torch.cos(azimuth),\n torch.cos(elevation) * torch.sin(azimuth),\n torch.sin(elevation),\n ],\n -1,\n ) # nview, 3\n # up is 0,0,1\n x = torch.cross(z, torch.tensor([0.0, 0.0, 1.0], device=z.device).repeat(n_view, 1), -1)\n y = torch.cross(z, x, -1)\n\n R_wc = torch.stack([x, y, z], dim=2) # nview, 3, 3, col is basis\n t_wc = camera_positions\n\n T_wc = torch.eye(4, device=R_wc.device).repeat(n_view, 1, 1)\n T_wc[:, :3, :3] = R_wc\n T_wc[:, :3, 3] = t_wc\n\n return T_wc, fovy_deg # B,4,4, B" }, { "identifier": "fov2K", "path": "lib_guidance/camera_sampling.py", "snippet": "def fov2K(fov=90, H=256, W=256):\n if isinstance(fov, torch.Tensor):\n f = H / (2 * torch.tan(fov / 2 * np.pi / 180))\n K = torch.eye(3).repeat(fov.shape[0], 1, 1).to(fov)\n K[:, 0, 0], K[:, 0, 2] = f, W / 2.0\n K[:, 1, 1], K[:, 1, 2] = f, H / 2.0\n return K.clone()\n else:\n f = H / (2 * np.tan(fov / 2 * np.pi / 180))\n K = np.eye(3)\n K[0, 0], K[0, 2] = f, W / 2.0\n K[1, 1], K[1, 2] = f, H / 2.0\n return K.copy()" }, { "identifier": "opencv2blender", "path": "lib_guidance/camera_sampling.py", "snippet": "def opencv2blender(T):\n ret = T.clone()\n # y,z are negative\n ret[:, :, 1] *= -1\n ret[:, :, 2] *= -1\n return ret" }, { "identifier": "viz_spinning", "path": "viz_utils.py", "snippet": "@torch.no_grad()\ndef viz_spinning(\n model,\n pose,\n trans,\n H,\n W,\n K,\n save_path,\n time_index=None,\n n_spinning=10,\n model_mask=None,\n active_sph_order=0,\n bg_color=[1.0, 1.0, 1.0],\n):\n device = pose.device\n mu, fr, s, o, sph, additional_ret = model(\n pose, trans, {\"t\": time_index}, active_sph_order=active_sph_order\n )\n if model_mask is not None:\n assert len(model_mask) == mu.shape[1]\n mu = mu[:, model_mask.bool()]\n fr = fr[:, model_mask.bool()]\n s = s[:, model_mask.bool()]\n o = o[:, model_mask.bool()]\n sph = sph[:, model_mask.bool()]\n\n viz_frames = []\n for vid in range(n_spinning):\n spin_R = (\n torch.from_numpy(euler2mat(0, 2 * np.pi * vid / n_spinning, 0, \"sxyz\"))\n .to(device)\n .float()\n )\n spin_t = mu.mean(1)[0]\n spin_t = (torch.eye(3).to(device) - spin_R) @ spin_t[:, None]\n spin_T = torch.eye(4).to(device)\n spin_T[:3, :3] = spin_R\n spin_T[:3, 3] = spin_t.squeeze(-1)\n viz_mu, viz_fr = transform_mu_frame(mu, fr, spin_T[None])\n\n render_pkg = render_cam_pcl(\n viz_mu[0],\n viz_fr[0],\n s[0],\n o[0],\n sph[0],\n H,\n W,\n K,\n False,\n active_sph_order,\n bg_color=bg_color,\n )\n viz_frame = (\n torch.clamp(render_pkg[\"rgb\"], 0.0, 1.0)\n .permute(1, 2, 0)\n .detach()\n .cpu()\n .numpy()\n )\n viz_frame = (viz_frame * 255).astype(np.uint8)\n viz_frames.append(viz_frame)\n imageio.mimsave(save_path, viz_frames)\n return" }, { "identifier": "viz_human_all", "path": "viz_utils.py", "snippet": "@torch.no_grad()\ndef viz_human_all(\n solver,\n data_provider: RealDataOptimizablePoseProviderPose = None,\n ckpt_dir=None,\n training_skip=1,\n n_spinning=40,\n novel_pose_dir=\"novel_poses\",\n novel_skip=2,\n model=None,\n model_mask=None,\n viz_name=\"\",\n export_mesh_flag=False, # remove this from release version\n):\n if model is None:\n model = solver.load_saved_model(ckpt_dir)\n model.eval()\n\n viz_dir = osp.join(solver.log_dir, f\"{viz_name}_human_viz\")\n os.makedirs(viz_dir, exist_ok=True)\n\n active_sph_order = int(model.max_sph_order)\n\n if data_provider is not None:\n # if ckpt_dir is None:\n # ckpt_dir = solver.log_dir\n # pose_path = osp.join(ckpt_dir, \"pose.pth\")\n pose_base_list = data_provider.pose_base_list\n pose_rest_list = data_provider.pose_rest_list\n global_trans_list = data_provider.global_trans_list\n pose_list = torch.cat([pose_base_list, pose_rest_list], 1)\n pose_list, global_trans_list = pose_list.to(\n solver.device\n ), global_trans_list.to(solver.device)\n rgb_list = data_provider.rgb_list\n mask_list = data_provider.mask_list\n K_list = data_provider.K_list\n H, W = rgb_list.shape[1:3]\n else:\n H, W = 512, 512\n K_list = [torch.from_numpy(fov2K(45, H, W)).float().to(solver.device)]\n global_trans_list = torch.zeros(1, 3).to(solver.device)\n global_trans_list[0, -1] = 3.0\n\n # viz training\n if data_provider is not None:\n print(\"Viz training...\")\n viz_frames = []\n for t in range(len(pose_list)):\n if t % training_skip != 0:\n continue\n pose = pose_list[t][None]\n K = K_list[t]\n trans = global_trans_list[t][None]\n time_index = torch.Tensor([t]).long().to(solver.device)\n mu, fr, s, o, sph, _ = model(\n pose,\n trans,\n {\"t\": time_index}, # use time_index from training set\n active_sph_order=active_sph_order,\n )\n if model_mask is not None:\n assert len(model_mask) == mu.shape[1]\n mu = mu[:, model_mask.bool()]\n fr = fr[:, model_mask.bool()]\n s = s[:, model_mask.bool()]\n o = o[:, model_mask.bool()]\n sph = sph[:, model_mask.bool()]\n render_pkg = render_cam_pcl(\n mu[0],\n fr[0],\n s[0],\n o[0],\n sph[0],\n H,\n W,\n K,\n False,\n active_sph_order,\n bg_color=getattr(solver, \"DEFAULT_BG\", [1.0, 1.0, 1.0]),\n )\n viz_frame = viz_render(rgb_list[t], mask_list[t], render_pkg)\n viz_frames.append(viz_frame)\n imageio.mimsave(f\"{viz_dir}/training.gif\", viz_frames)\n\n # viz static spinning\n print(\"Viz spinning...\")\n can_pose = model.template.canonical_pose.detach()\n viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, \"sxyz\"))\n viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float()\n can_pose[0] = viz_base_R_opencv.to(can_pose.device)\n can_pose = matrix_to_axis_angle(can_pose)[None]\n dapose = torch.from_numpy(np.zeros((1, 24, 3))).float().to(solver.device)\n dapose[:, 1, -1] = np.pi / 4\n dapose[:, 2, -1] = -np.pi / 4\n dapose[:, 0] = matrix_to_axis_angle(solver.viz_base_R[None])[0]\n tpose = torch.from_numpy(np.zeros((1, 24, 3))).float().to(solver.device)\n tpose[:, 0] = matrix_to_axis_angle(solver.viz_base_R[None])[0]\n to_viz = {\"cano-pose\": can_pose, \"t-pose\": tpose, \"da-pose\": dapose}\n if data_provider is not None:\n to_viz[\"first-frame\"] = pose_list[0][None]\n\n for name, pose in to_viz.items():\n print(f\"Viz novel {name}...\")\n # if export_mesh_flag:\n # from lib_marchingcubes.gaumesh_utils import MeshExtractor\n # # also extract a mesh\n # mesh = solver.extract_mesh(model, pose)\n # mesh.export(f\"{viz_dir}/mc_{name}.obj\", \"obj\")\n\n # # for making figures, the rotation is in another way\n # viz_spinning_self_rotate(\n # model,\n # solver.viz_base_R.detach(),\n # pose,\n # global_trans_list[0][None],\n # H,\n # W,\n # K_list[0],\n # f\"{viz_dir}/{name}_selfrotate.gif\",\n # time_index=None, # if set to None and use t, the add_bone will hand this\n # n_spinning=n_spinning,\n # active_sph_order=model.max_sph_order,\n # )\n viz_spinning(\n model,\n pose,\n global_trans_list[0][None],\n H,\n W,\n K_list[0],\n f\"{viz_dir}/{name}.gif\",\n time_index=None, # if set to None and use t, the add_bone will hand this\n n_spinning=n_spinning,\n active_sph_order=model.max_sph_order,\n bg_color=getattr(solver, \"DEFAULT_BG\", [1.0, 1.0, 1.0]),\n )\n\n # viz novel pose dynamic spinning\n print(\"Viz novel seq...\")\n novel_pose_names = [\n f[:-4] for f in os.listdir(novel_pose_dir) if f.endswith(\".npy\")\n ]\n seq_viz_todo = {}\n for name in novel_pose_names:\n novel_pose_fn = osp.join(novel_pose_dir, f\"{name}.npy\")\n novel_poses = np.load(novel_pose_fn, allow_pickle=True)\n novel_poses = novel_poses[::novel_skip]\n N_frames = len(novel_poses)\n novel_poses = torch.from_numpy(novel_poses).float().to(solver.device)\n novel_poses = novel_poses.reshape(N_frames, 24, 3)\n\n seq_viz_todo[name] = (novel_poses, N_frames)\n if data_provider is not None:\n seq_viz_todo[\"training\"] = [pose_list, len(pose_list)]\n\n for name, (novel_poses, N_frames) in seq_viz_todo.items():\n base_R = solver.viz_base_R.detach().cpu().numpy()\n viz_frames = []\n K = K_list[0]\n for vid in range(N_frames):\n pose = novel_poses[vid][None]\n # pose = novel_poses[0][None] # debug\n rotation = euler2mat(2 * np.pi * vid / N_frames, 0.0, 0.0, \"syxz\")\n rotation = torch.from_numpy(rotation @ base_R).float().to(solver.device)\n pose[:, 0] = matrix_to_axis_angle(rotation[None])[0]\n trans = global_trans_list[0][None]\n mu, fr, s, o, sph, _ = model(\n pose,\n trans,\n # not pass in {}, so t is auto none\n additional_dict={},\n active_sph_order=active_sph_order,\n )\n if model_mask is not None:\n assert len(model_mask) == mu.shape[1]\n mu = mu[:, model_mask.bool()]\n fr = fr[:, model_mask.bool()]\n s = s[:, model_mask.bool()]\n o = o[:, model_mask.bool()]\n sph = sph[:, model_mask.bool()]\n render_pkg = render_cam_pcl(\n mu[0],\n fr[0],\n s[0],\n o[0],\n sph[0],\n H,\n W,\n K,\n False,\n active_sph_order,\n bg_color=getattr(solver, \"DEFAULT_BG\", [1.0, 1.0, 1.0]),\n # bg_color=[1.0, 1.0, 1.0], # ! use white bg for viz\n )\n viz_frame = (\n torch.clamp(render_pkg[\"rgb\"], 0.0, 1.0)\n .permute(1, 2, 0)\n .detach()\n .cpu()\n .numpy()\n )\n viz_frame = (viz_frame * 255).astype(np.uint8)\n viz_frames.append(viz_frame)\n imageio.mimsave(f\"{viz_dir}/novel_pose_{name}.gif\", viz_frames)\n return" }, { "identifier": "viz_dog_all", "path": "viz_utils.py", "snippet": "@torch.no_grad()\ndef viz_dog_all(solver, data_provider, model=None, ckpt_dir=None, viz_name=\"\"):\n if model is None:\n model = solver.load_saved_model(ckpt_dir)\n model.eval()\n viz_dir = osp.join(solver.log_dir, f\"{viz_name}_dog_viz\")\n os.makedirs(viz_dir, exist_ok=True)\n\n viz_pose = (\n torch.cat([data_provider.pose_base_list, data_provider.pose_rest_list], 1)\n .detach()\n .clone()\n )\n viz_pose = torch.mean(viz_pose, dim=0, keepdim=True) # use mean pose for viz \n limb = viz_pose[:, -7:] \n pose = viz_pose[:, :-7].reshape(-1, 35, 3)\n pose[:, :-3] = 0 # exclude ears and mouth poses\n\n viz_pose = torch.concat([pose.reshape(1, -1), limb], dim=1)\n viz_trans = torch.tensor([[0.0, -0.3, 25.0]], device=\"cuda:0\")\n\n viz_dog_spin(\n model.to(\"cuda\"),\n viz_pose,\n viz_trans,\n data_provider.H,\n data_provider.W,\n data_provider.K_list[0],\n save_path=osp.join(viz_dir, \"spin.gif\"),\n n_spinning=42,\n )\n\n viz_dog_spin2(\n model.to(\"cuda\"),\n viz_pose,\n viz_trans,\n data_provider.H,\n data_provider.W,\n data_provider.K_list[0],\n save_path=osp.join(viz_dir, \"spin2.gif\"),\n n_spinning=20,\n )\n\n ######################################################################\n # Dataset pose seq\n viz_pose = (\n torch.cat([data_provider.pose_base_list, data_provider.pose_rest_list], 1)\n .detach()\n .clone()\n )\n viz_pose = torch.mean(viz_pose, dim=0, keepdim=True)\n pose = viz_pose[:, :-7].reshape(-1, 35, 3)\n limb = viz_pose[:, -7:]\n\n # Animation\n aroot = osp.join(osp.dirname(__file__), \"novel_poses/husky\")\n window = list(range(350, 440)) # Run\n trans = torch.tensor([[0.3, -0.3, 25.0]], device=\"cuda:0\")\n files = [f\"{aroot}/{i:04d}.npz\" for i in window]\n pose_list = [dict(np.load(file))[\"pred_pose\"] for file in files]\n pose_list = np.concatenate(pose_list)\n animation = matrix_to_axis_angle(torch.from_numpy(pose_list)).to(solver.device)\n animation[:, [32, 33, 34]] = pose[:, [32, 33, 34]] \n\n viz_dog_animation(\n model.to(\"cuda\"),\n animation,\n limb,\n trans,\n data_provider.H,\n data_provider.W,\n data_provider.K_list[0],\n save_path=osp.join(viz_dir, \"animation.gif\"),\n fps=12,\n )\n return" }, { "identifier": "ssim", "path": "utils/ssim.py", "snippet": "def ssim(img1, img2, window_size=11, size_average=True):\n channel = img1.size(-3)\n window = create_window(window_size, channel)\n\n if img1.is_cuda:\n window = window.cuda(img1.get_device())\n window = window.type_as(img1)\n\n return _ssim(img1, img2, window, window_size, channel, size_average)" }, { "identifier": "test", "path": "test_utils/test_func.py", "snippet": "def test(\n solver,\n seq_name: str,\n tto_flag=True,\n tto_step=300,\n tto_decay=60,\n tto_decay_factor=0.5,\n pose_base_lr=3e-3,\n pose_rest_lr=3e-3,\n trans_lr=3e-3,\n dataset_mode=\"people_snapshot\",\n training_optimized_seq=None,\n):\n device = solver.device\n model = solver.load_saved_model()\n\n assert dataset_mode in [\n \"people_snapshot\",\n \"zju\",\n \"instant_avatar_wild\",\n \"dog_demo\",\n ], f\"Unknown dataset mode {dataset_mode}\"\n\n if dataset_mode == \"people_snapshot\":\n eval_mode = \"avatar\"\n bg = [1.0, 1.0, 1.0]\n test_dataset = InstantAvatarDataset(\n noisy_flag=False,\n data_root=\"./data/people_snapshot/\",\n video_name=seq_name,\n split=\"test\",\n image_zoom_ratio=0.5,\n )\n elif dataset_mode == \"zju\":\n eval_mode = \"nvr\"\n test_dataset = ZJUDataset(\n data_root=\"./data/zju_mocap\",\n video_name=seq_name,\n split=\"test\",\n image_zoom_ratio=0.5,\n )\n bg = [0.0, 0.0, 0.0] # zju use black background\n elif dataset_mode == \"instant_avatar_wild\":\n eval_mode = \"avatar\"\n test_dataset = InstantAvatarWildDataset(\n data_root=\"./data/insav_wild\",\n video_name=seq_name,\n split=\"test\",\n image_zoom_ratio=1.0,\n # ! warning, here follow the `ubc_hard.yaml` in InstAVT setting, use slicing\n start_end_skip=[2, 1000000000, 4],\n )\n bg = [1.0, 1.0, 1.0]\n\n test_len = len(test_dataset)\n assert (training_optimized_seq.total_t == test_len) or (\n training_optimized_seq.total_t == 1 + test_len\n ), \"Now UBC can only support the same length of training and testing or + 1\"\n test_dataset.smpl_params[\"body_pose\"] = (\n training_optimized_seq.pose_rest_list.reshape(-1, 69)[:test_len]\n .detach()\n .cpu()\n .numpy()\n )\n test_dataset.smpl_params[\"global_orient\"] = (\n training_optimized_seq.pose_base_list.reshape(-1, 3)[:test_len]\n .detach()\n .cpu()\n .numpy()\n )\n test_dataset.smpl_params[\"transl\"] = (\n training_optimized_seq.global_trans_list.reshape(-1, 3)[:test_len]\n .detach()\n .cpu()\n .numpy()\n )\n elif dataset_mode == \"dog_demo\":\n eval_mode = \"avatar_brightness\"\n bg = [1.0, 1.0, 1.0]\n test_dataset = DogDemoDataset(\n data_root=\"./data/dog_data_official/\", video_name=seq_name, test=True\n )\n else:\n raise NotImplementedError()\n\n evaluator = get_evaluator(eval_mode, device)\n\n _save_eval_maps(\n solver.log_dir,\n \"test\",\n model,\n solver,\n test_dataset,\n dataset_mode=dataset_mode,\n device=device,\n bg=bg,\n tto_flag=tto_flag,\n tto_step=tto_step,\n tto_decay=tto_decay,\n tto_decay_factor=tto_decay_factor,\n tto_evaluator=evaluator,\n pose_base_lr=pose_base_lr,\n pose_rest_lr=pose_rest_lr,\n trans_lr=trans_lr,\n )\n\n if tto_flag:\n _evaluate_dir(evaluator, solver.log_dir, \"test_tto\")\n else:\n _evaluate_dir(evaluator, solver.log_dir, \"test\")\n\n return" } ]
from matplotlib import pyplot as plt from pytorch3d.transforms import matrix_to_axis_angle from tqdm import tqdm from transforms3d.euler import euler2mat from omegaconf import OmegaConf from lib_data.get_data import prepare_real_seq from lib_data.data_provider import DatabasePoseProvider from lib_gart.templates import get_template from lib_gart.model import GaussianTemplateModel, AdditionalBones from lib_gart.optim_utils import * from lib_render.gauspl_renderer import render_cam_pcl from lib_gart.model_utils import transform_mu_frame from utils.misc import * from utils.viz import viz_render from pytorch3d.transforms import axis_angle_to_matrix, matrix_to_axis_angle from pytorch3d.ops import knn_points from lib_guidance.camera_sampling import sample_camera, fov2K, opencv2blender from viz_utils import viz_spinning, viz_human_all, viz_dog_all from utils.ssim import ssim from datetime import datetime from test_utils import test from lib_guidance.mvdream.mvdream_guidance import MVDream from utils.lpips import LPIPS import imageio import torch import numpy as np import os, os.path as osp, shutil, sys import time import logging import argparse
20,497
# from lib_marchingcubes.gaumesh_utils import MeshExtractor try: # from lib_guidance.sd_utils import StableDiffusion except: logging.warning("No guidance module") class TGFitter: def __init__( self, log_dir, profile_fn, mode, template_model_path="data/smpl_model/SMPL_NEUTRAL.pkl", device=torch.device("cuda:0"), **kwargs, ) -> None: self.log_dir = log_dir os.makedirs(self.log_dir, exist_ok=True) self.profile_fn = profile_fn try: shutil.copy(profile_fn, osp.join(self.log_dir, osp.basename(profile_fn))) except: pass self.mode = mode assert self.mode in ["human", "dog"], "Only support human and dog for now" self.template_model_path = template_model_path self.device = device # * auto set attr cfg = OmegaConf.load(profile_fn) # assign the cfg to self attribute for k, v in cfg.items(): setattr(self, k, v) for k, v in kwargs.items(): setattr(self, k, v) # * explicitly set flags self.FAST_TRAINING = getattr(self, "FAST_TRAINING", False) self.LAMBDA_SSIM = getattr(self, "LAMBDA_SSIM", 0.0) self.LAMBDA_LPIPS = getattr(self, "LAMBDA_LPIPS", 0.0) if self.LAMBDA_LPIPS > 0: self.lpips = LPIPS(net="vgg").to(self.device) for param in self.lpips.parameters(): param.requires_grad = False if isinstance(self.RESET_OPACITY_STEPS, int): self.RESET_OPACITY_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.RESET_OPACITY_STEPS == 0 ] if isinstance(self.REGAUSSIAN_STEPS, int): self.REGAUSSIAN_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.REGAUSSIAN_STEPS == 0 ] # prepare base R if self.mode == "human": viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, "sxyz")) else: viz_base_R_opencv = np.asarray(euler2mat(np.pi / 2.0, 0, np.pi, "rxyz")) viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float() self.viz_base_R = viz_base_R_opencv.to(self.device) if self.mode == "human": self.reg_base_R_global = ( matrix_to_axis_angle( torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, "sxyz"))[ None ] )[0] .float() .to(self.device) ) else: # TODO, for generation of dog pass self.writer = create_log( self.log_dir, name=osp.basename(self.profile_fn).split(".")[0], debug=False ) return def prepare_fake_data(self, mode, *args, **kwargs): if mode == "amass": # todo: change to amass
# from lib_marchingcubes.gaumesh_utils import MeshExtractor try: # from lib_guidance.sd_utils import StableDiffusion except: logging.warning("No guidance module") class TGFitter: def __init__( self, log_dir, profile_fn, mode, template_model_path="data/smpl_model/SMPL_NEUTRAL.pkl", device=torch.device("cuda:0"), **kwargs, ) -> None: self.log_dir = log_dir os.makedirs(self.log_dir, exist_ok=True) self.profile_fn = profile_fn try: shutil.copy(profile_fn, osp.join(self.log_dir, osp.basename(profile_fn))) except: pass self.mode = mode assert self.mode in ["human", "dog"], "Only support human and dog for now" self.template_model_path = template_model_path self.device = device # * auto set attr cfg = OmegaConf.load(profile_fn) # assign the cfg to self attribute for k, v in cfg.items(): setattr(self, k, v) for k, v in kwargs.items(): setattr(self, k, v) # * explicitly set flags self.FAST_TRAINING = getattr(self, "FAST_TRAINING", False) self.LAMBDA_SSIM = getattr(self, "LAMBDA_SSIM", 0.0) self.LAMBDA_LPIPS = getattr(self, "LAMBDA_LPIPS", 0.0) if self.LAMBDA_LPIPS > 0: self.lpips = LPIPS(net="vgg").to(self.device) for param in self.lpips.parameters(): param.requires_grad = False if isinstance(self.RESET_OPACITY_STEPS, int): self.RESET_OPACITY_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.RESET_OPACITY_STEPS == 0 ] if isinstance(self.REGAUSSIAN_STEPS, int): self.REGAUSSIAN_STEPS = [ i for i in range(1, self.TOTAL_steps) if i % self.REGAUSSIAN_STEPS == 0 ] # prepare base R if self.mode == "human": viz_base_R_opencv = np.asarray(euler2mat(np.pi, 0, 0, "sxyz")) else: viz_base_R_opencv = np.asarray(euler2mat(np.pi / 2.0, 0, np.pi, "rxyz")) viz_base_R_opencv = torch.from_numpy(viz_base_R_opencv).float() self.viz_base_R = viz_base_R_opencv.to(self.device) if self.mode == "human": self.reg_base_R_global = ( matrix_to_axis_angle( torch.as_tensor(euler2mat(np.pi / 2.0, 0, np.pi / 2.0, "sxyz"))[ None ] )[0] .float() .to(self.device) ) else: # TODO, for generation of dog pass self.writer = create_log( self.log_dir, name=osp.basename(self.profile_fn).split(".")[0], debug=False ) return def prepare_fake_data(self, mode, *args, **kwargs): if mode == "amass": # todo: change to amass
provider = DatabasePoseProvider(*args, **kwargs, device=torch.device("cpu"))
1
2023-11-27 17:30:04+00:00
24k
skhu101/GauHuman
scene/dataset_readers.py
[ { "identifier": "read_extrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n images = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n image_id = int(elems[0])\n qvec = np.array(tuple(map(float, elems[1:5])))\n tvec = np.array(tuple(map(float, elems[5:8])))\n camera_id = int(elems[8])\n image_name = elems[9]\n elems = fid.readline().split()\n xys = np.column_stack([tuple(map(float, elems[0::3])),\n tuple(map(float, elems[1::3]))])\n point3D_ids = np.array(tuple(map(int, elems[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_text", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_text(path):\n \"\"\"\n Taken from https://github.com/colmap/colmap/blob/dev/scripts/python/read_write_model.py\n \"\"\"\n cameras = {}\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n camera_id = int(elems[0])\n model = elems[1]\n assert model == \"PINHOLE\", \"While the loader support other types, the rest of the code assumes PINHOLE\"\n width = int(elems[2])\n height = int(elems[3])\n params = np.array(tuple(map(float, elems[4:])))\n cameras[camera_id] = Camera(id=camera_id, model=model,\n width=width, height=height,\n params=params)\n return cameras" }, { "identifier": "qvec2rotmat", "path": "scene/colmap_loader.py", "snippet": "def qvec2rotmat(qvec):\n return np.array([\n [1 - 2 * qvec[2]**2 - 2 * qvec[3]**2,\n 2 * qvec[1] * qvec[2] - 2 * qvec[0] * qvec[3],\n 2 * qvec[3] * qvec[1] + 2 * qvec[0] * qvec[2]],\n [2 * qvec[1] * qvec[2] + 2 * qvec[0] * qvec[3],\n 1 - 2 * qvec[1]**2 - 2 * qvec[3]**2,\n 2 * qvec[2] * qvec[3] - 2 * qvec[0] * qvec[1]],\n [2 * qvec[3] * qvec[1] - 2 * qvec[0] * qvec[2],\n 2 * qvec[2] * qvec[3] + 2 * qvec[0] * qvec[1],\n 1 - 2 * qvec[1]**2 - 2 * qvec[2]**2]])" }, { "identifier": "read_extrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_extrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadImagesBinary(const std::string& path)\n void Reconstruction::WriteImagesBinary(const std::string& path)\n \"\"\"\n images = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_reg_images = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_reg_images):\n binary_image_properties = read_next_bytes(\n fid, num_bytes=64, format_char_sequence=\"idddddddi\")\n image_id = binary_image_properties[0]\n qvec = np.array(binary_image_properties[1:5])\n tvec = np.array(binary_image_properties[5:8])\n camera_id = binary_image_properties[8]\n image_name = \"\"\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n while current_char != b\"\\x00\": # look for the ASCII 0 entry\n image_name += current_char.decode(\"utf-8\")\n current_char = read_next_bytes(fid, 1, \"c\")[0]\n num_points2D = read_next_bytes(fid, num_bytes=8,\n format_char_sequence=\"Q\")[0]\n x_y_id_s = read_next_bytes(fid, num_bytes=24*num_points2D,\n format_char_sequence=\"ddq\"*num_points2D)\n xys = np.column_stack([tuple(map(float, x_y_id_s[0::3])),\n tuple(map(float, x_y_id_s[1::3]))])\n point3D_ids = np.array(tuple(map(int, x_y_id_s[2::3])))\n images[image_id] = Image(\n id=image_id, qvec=qvec, tvec=tvec,\n camera_id=camera_id, name=image_name,\n xys=xys, point3D_ids=point3D_ids)\n return images" }, { "identifier": "read_intrinsics_binary", "path": "scene/colmap_loader.py", "snippet": "def read_intrinsics_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::WriteCamerasBinary(const std::string& path)\n void Reconstruction::ReadCamerasBinary(const std::string& path)\n \"\"\"\n cameras = {}\n with open(path_to_model_file, \"rb\") as fid:\n num_cameras = read_next_bytes(fid, 8, \"Q\")[0]\n for _ in range(num_cameras):\n camera_properties = read_next_bytes(\n fid, num_bytes=24, format_char_sequence=\"iiQQ\")\n camera_id = camera_properties[0]\n model_id = camera_properties[1]\n model_name = CAMERA_MODEL_IDS[camera_properties[1]].model_name\n width = camera_properties[2]\n height = camera_properties[3]\n num_params = CAMERA_MODEL_IDS[model_id].num_params\n params = read_next_bytes(fid, num_bytes=8*num_params,\n format_char_sequence=\"d\"*num_params)\n cameras[camera_id] = Camera(id=camera_id,\n model=model_name,\n width=width,\n height=height,\n params=np.array(params))\n assert len(cameras) == num_cameras\n return cameras" }, { "identifier": "read_points3D_binary", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_binary(path_to_model_file):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DBinary(const std::string& path)\n void Reconstruction::WritePoints3DBinary(const std::string& path)\n \"\"\"\n\n\n with open(path_to_model_file, \"rb\") as fid:\n num_points = read_next_bytes(fid, 8, \"Q\")[0]\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n\n for p_id in range(num_points):\n binary_point_line_properties = read_next_bytes(\n fid, num_bytes=43, format_char_sequence=\"QdddBBBd\")\n xyz = np.array(binary_point_line_properties[1:4])\n rgb = np.array(binary_point_line_properties[4:7])\n error = np.array(binary_point_line_properties[7])\n track_length = read_next_bytes(\n fid, num_bytes=8, format_char_sequence=\"Q\")[0]\n track_elems = read_next_bytes(\n fid, num_bytes=8*track_length,\n format_char_sequence=\"ii\"*track_length)\n xyzs[p_id] = xyz\n rgbs[p_id] = rgb\n errors[p_id] = error\n return xyzs, rgbs, errors" }, { "identifier": "read_points3D_text", "path": "scene/colmap_loader.py", "snippet": "def read_points3D_text(path):\n \"\"\"\n see: src/base/reconstruction.cc\n void Reconstruction::ReadPoints3DText(const std::string& path)\n void Reconstruction::WritePoints3DText(const std::string& path)\n \"\"\"\n xyzs = None\n rgbs = None\n errors = None\n num_points = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n num_points += 1\n\n\n xyzs = np.empty((num_points, 3))\n rgbs = np.empty((num_points, 3))\n errors = np.empty((num_points, 1))\n count = 0\n with open(path, \"r\") as fid:\n while True:\n line = fid.readline()\n if not line:\n break\n line = line.strip()\n if len(line) > 0 and line[0] != \"#\":\n elems = line.split()\n xyz = np.array(tuple(map(float, elems[1:4])))\n rgb = np.array(tuple(map(int, elems[4:7])))\n error = np.array(float(elems[7]))\n xyzs[count] = xyz\n rgbs[count] = rgb\n errors[count] = error\n count += 1\n\n return xyzs, rgbs, errors" }, { "identifier": "getWorld2View2", "path": "utils/graphics_utils.py", "snippet": "def getWorld2View2(R, t, translate=np.array([.0, .0, .0]), scale=1.0):\n Rt = np.zeros((4, 4))\n Rt[:3, :3] = R.transpose()\n Rt[:3, 3] = t\n Rt[3, 3] = 1.0\n\n C2W = np.linalg.inv(Rt)\n cam_center = C2W[:3, 3]\n cam_center = (cam_center + translate) * scale\n C2W[:3, 3] = cam_center\n Rt = np.linalg.inv(C2W)\n return np.float32(Rt)" }, { "identifier": "focal2fov", "path": "utils/graphics_utils.py", "snippet": "def focal2fov(focal, pixels):\n return 2*math.atan(pixels/(2*focal))" }, { "identifier": "fov2focal", "path": "utils/graphics_utils.py", "snippet": "def fov2focal(fov, pixels):\n return pixels / (2 * math.tan(fov / 2))" }, { "identifier": "SH2RGB", "path": "utils/sh_utils.py", "snippet": "def SH2RGB(sh):\n return sh * C0 + 0.5" }, { "identifier": "BasicPointCloud", "path": "scene/gaussian_model.py", "snippet": "class GaussianModel:\n def setup_functions(self):\n def build_covariance_from_scaling_rotation(scaling, scaling_modifier, rotation, transform):\n def __init__(self, sh_degree : int, smpl_type : str, motion_offset_flag : bool, actor_gender: str):\n def capture(self):\n def restore(self, model_args, training_args):\n def get_scaling(self):\n def get_rotation(self):\n def get_xyz(self):\n def get_features(self):\n def get_opacity(self):\n def get_covariance(self, scaling_modifier = 1, transform=None):\n def oneupSHdegree(self):\n def create_from_pcd(self, pcd : BasicPointCloud, spatial_lr_scale : float):\n def training_setup(self, training_args):\n def update_learning_rate(self, iteration):\n def construct_list_of_attributes(self):\n def save_ply(self, path):\n def reset_opacity(self):\n def load_ply(self, path):\n def replace_tensor_to_optimizer(self, tensor, name):\n def _prune_optimizer(self, mask):\n def prune_points(self, mask):\n def cat_tensors_to_optimizer(self, tensors_dict):\n def densification_postfix(self, new_xyz, new_features_dc, new_features_rest, new_opacities, new_scaling, new_rotation):\n def densify_and_split(self, grads, grad_threshold, scene_extent, N=2):\n def densify_and_clone(self, grads, grad_threshold, scene_extent):\n def kl_densify_and_clone(self, grads, grad_threshold, scene_extent, kl_threshold=0.4):\n def kl_densify_and_split(self, grads, grad_threshold, scene_extent, kl_threshold=0.4, N=2):\n def kl_merge(self, grads, grad_threshold, scene_extent, kl_threshold=0.1):\n def densify_and_prune(self, max_grad, min_opacity, extent, max_screen_size, kl_threshold=0.4, t_vertices=None, iter=None):\n def kl_div(self, mu_0, rotation_0_q, scaling_0_diag, mu_1, rotation_1_q, scaling_1_diag):\n def add_densification_stats(self, viewspace_point_tensor, update_filter):\n def coarse_deform_c2source(self, query_pts, params, t_params, t_vertices, lbs_weights=None, correct_Rs=None, return_transl=False):\ndef read_pickle(pkl_path):\ndef SMPL_to_tensor(params, device):\ndef batch_rodrigues_torch(poses):\ndef get_rigid_transformation_torch(rot_mats, joints, parents):\ndef get_transform_params_torch(smpl, params, rot_mats=None, correct_Rs=None):\ndef batch_rodrigues(rot_vecs, epsilon=1e-8, dtype=torch.float32):\n L = build_scaling_rotation(scaling_modifier * scaling, rotation)\n L_0 = rotation_0 @ scaling_0\n A = torch.matmul(bweights, A.reshape(bs, joints_num, -1))\n A = torch.reshape(A, (bs, -1, 4, 4))\n A = torch.matmul(bweights, self.s_A.reshape(bs, joints_num, -1))\n A = torch.reshape(A, (bs, -1, 4, 4))\n K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1)\n K = K.reshape([batch_size, 3, 3])\n A = get_rigid_transformation_torch(rot_mats, joints, parents)\n R = params['R'] \n K = torch.zeros((batch_size, 3, 3), dtype=dtype, device=device)\n K = torch.cat([zeros, -rz, ry, rz, zeros, -rx, -ry, rx, zeros], dim=1) \\\n .view((batch_size, 3, 3))" }, { "identifier": "SMPL", "path": "smpl/smpl_numpy.py", "snippet": "class SMPL():\n def __init__(self, sex, model_dir):\n super(SMPL, self).__init__()\n\n model_paths = {\n 'male': os.path.join(model_dir, MALE_PATH),\n 'female': os.path.join(model_dir, FEMALE_PATH),\n # 'neutral': os.path.join(model_dir, NEUTRAL_PATH)\n 'neutral': os.path.join('assets/SMPL_NEUTRAL.pkl')\n }\n\n with open(model_paths[sex], 'rb') as f:\n smpl_model = pickle.load(f, encoding='latin1')\n self.J_regressor = np.array(smpl_model['J_regressor'].todense()) # (24, 6890)\n self.weights = smpl_model['weights'] # (6890, 24)\n self.posedirs = smpl_model['posedirs'] # (6890, 3, 207)\n self.v_template = smpl_model['v_template'] # (6890, 3)\n self.shapedirs = np.array(smpl_model['shapedirs']) # (6890, 3, 10)\n self.faces = smpl_model['f'].astype('int32') # (13776, 3)\n self.kintree_table = smpl_model['kintree_table'].astype('int64') # (2, 24)\n\n id_to_col = {self.kintree_table[1, i].item(): i for i in range(self.kintree_table.shape[1])}\n self.parent = np.array([id_to_col[self.kintree_table[0, it]] for it in range(1, self.kintree_table.shape[1])])\n\n self.pose_shape = [24, 3]\n self.beta_shape = [10]\n self.pose = np.zeros(self.pose_shape)\n self.beta = np.zeros(self.beta_shape)\n\n self.verts = None\n self.J = None\n self.R = None\n\n def __call__(self, pose, beta):\n\n v_template = self.v_template # (6890, 3)\n shapedirs = self.shapedirs.reshape(-1,10) # (6890*3, 10)\n beta = beta[:, None] # (10, 1)\n\n v_shaped = shapedirs.dot(beta).reshape(6890, 3) + v_template # (6890, 3)\n J = self.J_regressor.dot(v_shaped) # (24, 3)\n\n # input is a rotation matrix: (24,3,3)\n if pose.shape == (24, 3, 3):\n R = pose\n # input is a rotation axis-angle vector: (1, 72), (72, 1) or (72, )\n elif pose.shape == (1, 72) or pose.shape == (72, 1) or pose.shape == (72,):\n pose_vectors = pose.reshape(-1, 3) # (24, 3)\n R = np.array([rodrigues(pose_vectors[p_idx])[0] \n for p_idx in range(pose_vectors.shape[0])\n ], \n dtype='float32') # (24, 3, 3)\n else:\n raise ValueError(\"Unsupported Pose Inputs - the Pose Shape is {}\".format(pose.shape))\n\n Is = np.eye(3, dtype='float32')[None, :] # (1, 3, 3)\n lrotmin = (R[1:,:] - Is).reshape(-1, 1) # (23x3x3, 1)\n posedirs = self.posedirs.reshape(-1,207) # (6890x3, 207)\n v_posed = v_shaped + posedirs.dot(lrotmin).reshape(6890, 3) # (6890, 3)\n\n J_ = J.copy()\n J_[1:, :] = J[1:, :] - J[self.parent, :] # (24, 3)\n G_ = np.concatenate([R, J_[:, :, None]], axis=-1) # (24, 3, 4)\n pad_rows = np.array([[0, 0, 0, 1]], dtype='float32')\n pad_rows = np.repeat(pad_rows, 24, axis=0).reshape(-1, 1, 4)\n G_ = np.concatenate([G_, pad_rows], axis=1) # (24, 4, 4)\n\n G = [G_[0].copy()]\n for i in range(1, 24):\n G.append(G[self.parent[i-1]].dot(G_[i, :, :]))\n G = np.stack(G, axis=0) # (24, 4, 4)\n\n joints = G[:, :3, 3]\n rest_joints = np.concatenate([J, np.zeros((24, 1))], axis=-1)[:, :, None] # (24, 4, 1)\n zeros = np.zeros((24, 4, 3), dtype='float32') # (24, 4, 3)\n rest_joints_mtx = np.concatenate([zeros, rest_joints], axis=-1) # (24, 4, 4) \n # print(\"G1: \", G[0], \"rest_joints_mtx1: \", rest_joints_mtx[0])\n posed_joints_mtx = np.matmul(G, rest_joints_mtx)\n # print(\"rest_joints_mtx2: \", posed_joints_mtx[0])\n G = G - posed_joints_mtx\n # print(G[0]) \n rest_shape_h = np.concatenate([v_posed, np.ones(v_posed.shape[0])[:, None]], axis=-1) #(6890, 4)\n T = self.weights.dot(G.reshape(24, -1)).reshape(6890, 4, 4)\n v = np.matmul(T, rest_shape_h[:, :, None])[:, :3, 0]\n \n return v, joints" }, { "identifier": "SMPLX", "path": "smplx/body_models.py", "snippet": "class SMPLX(SMPLH):\n '''\n SMPL-X (SMPL eXpressive) is a unified body model, with shape parameters\n trained jointly for the face, hands and body.\n SMPL-X uses standard vertex based linear blend skinning with learned\n corrective blend shapes, has N=10475 vertices and K=54 joints,\n which includes joints for the neck, jaw, eyeballs and fingers.\n '''\n\n NUM_BODY_JOINTS = SMPLH.NUM_BODY_JOINTS\n NUM_HAND_JOINTS = 15\n NUM_FACE_JOINTS = 3\n NUM_JOINTS = NUM_BODY_JOINTS + 2 * NUM_HAND_JOINTS + NUM_FACE_JOINTS\n EXPRESSION_SPACE_DIM = 100\n NECK_IDX = 12\n\n def __init__(\n self, model_path: str,\n kid_template_path: str = '',\n num_expression_coeffs: int = 10,\n create_expression: bool = True,\n expression: Optional[Tensor] = None,\n create_jaw_pose: bool = True,\n jaw_pose: Optional[Tensor] = None,\n create_leye_pose: bool = True,\n leye_pose: Optional[Tensor] = None,\n create_reye_pose=True,\n reye_pose: Optional[Tensor] = None,\n use_face_contour: bool = False,\n batch_size: int = 1,\n gender: str = 'neutral',\n age: str = 'adult',\n dtype=torch.float32,\n ext: str = 'npz',\n **kwargs\n ) -> None:\n ''' SMPLX model constructor\n\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n num_expression_coeffs: int, optional\n Number of expression components to use\n (default = 10).\n create_expression: bool, optional\n Flag for creating a member variable for the expression space\n (default = True).\n expression: torch.tensor, optional, Bx10\n The default value for the expression member variable.\n (default = None)\n create_jaw_pose: bool, optional\n Flag for creating a member variable for the jaw pose.\n (default = False)\n jaw_pose: torch.tensor, optional, Bx3\n The default value for the jaw pose variable.\n (default = None)\n create_leye_pose: bool, optional\n Flag for creating a member variable for the left eye pose.\n (default = False)\n leye_pose: torch.tensor, optional, Bx10\n The default value for the left eye pose variable.\n (default = None)\n create_reye_pose: bool, optional\n Flag for creating a member variable for the right eye pose.\n (default = False)\n reye_pose: torch.tensor, optional, Bx10\n The default value for the right eye pose variable.\n (default = None)\n use_face_contour: bool, optional\n Whether to compute the keypoints that form the facial contour\n batch_size: int, optional\n The batch size used for creating the member variables\n gender: str, optional\n Which gender to load\n dtype: torch.dtype\n The data type for the created variables\n '''\n\n # Load the model\n if osp.isdir(model_path):\n model_fn = 'SMPLX_{}.{ext}'.format(gender.upper(), ext=ext)\n smplx_path = os.path.join(model_path, model_fn)\n else:\n smplx_path = model_path\n assert osp.exists(smplx_path), 'Path {} does not exist!'.format(\n smplx_path)\n\n if ext == 'pkl':\n with open(smplx_path, 'rb') as smplx_file:\n model_data = pickle.load(smplx_file, encoding='latin1')\n elif ext == 'npz':\n model_data = np.load(smplx_path, allow_pickle=True)\n else:\n raise ValueError('Unknown extension: {}'.format(ext))\n\n data_struct = Struct(**model_data)\n\n super(SMPLX, self).__init__(\n model_path=model_path,\n kid_template_path=kid_template_path,\n data_struct=data_struct,\n dtype=dtype,\n batch_size=batch_size,\n vertex_ids=VERTEX_IDS['smplx'],\n gender=gender, age=age, ext=ext,\n **kwargs)\n\n lmk_faces_idx = data_struct.lmk_faces_idx\n self.register_buffer('lmk_faces_idx',\n torch.tensor(lmk_faces_idx, dtype=torch.long))\n lmk_bary_coords = data_struct.lmk_bary_coords\n self.register_buffer('lmk_bary_coords',\n torch.tensor(lmk_bary_coords, dtype=dtype))\n\n self.use_face_contour = use_face_contour\n if self.use_face_contour:\n dynamic_lmk_faces_idx = data_struct.dynamic_lmk_faces_idx\n dynamic_lmk_faces_idx = torch.tensor(\n dynamic_lmk_faces_idx,\n dtype=torch.long)\n self.register_buffer('dynamic_lmk_faces_idx',\n dynamic_lmk_faces_idx)\n\n dynamic_lmk_bary_coords = data_struct.dynamic_lmk_bary_coords\n dynamic_lmk_bary_coords = torch.tensor(\n dynamic_lmk_bary_coords, dtype=dtype)\n self.register_buffer('dynamic_lmk_bary_coords',\n dynamic_lmk_bary_coords)\n\n neck_kin_chain = find_joint_kin_chain(self.NECK_IDX, self.parents)\n self.register_buffer(\n 'neck_kin_chain',\n torch.tensor(neck_kin_chain, dtype=torch.long))\n\n if create_jaw_pose:\n if jaw_pose is None:\n default_jaw_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_jaw_pose = torch.tensor(jaw_pose, dtype=dtype)\n jaw_pose_param = nn.Parameter(default_jaw_pose,\n requires_grad=True)\n self.register_parameter('jaw_pose', jaw_pose_param)\n\n if create_leye_pose:\n if leye_pose is None:\n default_leye_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_leye_pose = torch.tensor(leye_pose, dtype=dtype)\n leye_pose_param = nn.Parameter(default_leye_pose,\n requires_grad=True)\n self.register_parameter('leye_pose', leye_pose_param)\n\n if create_reye_pose:\n if reye_pose is None:\n default_reye_pose = torch.zeros([batch_size, 3], dtype=dtype)\n else:\n default_reye_pose = torch.tensor(reye_pose, dtype=dtype)\n reye_pose_param = nn.Parameter(default_reye_pose,\n requires_grad=True)\n self.register_parameter('reye_pose', reye_pose_param)\n\n shapedirs = data_struct.shapedirs\n if len(shapedirs.shape) < 3:\n shapedirs = shapedirs[:, :, None]\n if (shapedirs.shape[-1] < self.SHAPE_SPACE_DIM +\n self.EXPRESSION_SPACE_DIM):\n print(f'WARNING: You are using a {self.name()} model, with only'\n ' 10 shape and 10 expression coefficients.')\n expr_start_idx = 10\n expr_end_idx = 20\n num_expression_coeffs = min(num_expression_coeffs, 10)\n else:\n expr_start_idx = self.SHAPE_SPACE_DIM\n expr_end_idx = self.SHAPE_SPACE_DIM + num_expression_coeffs\n num_expression_coeffs = min(\n num_expression_coeffs, self.EXPRESSION_SPACE_DIM)\n\n self._num_expression_coeffs = num_expression_coeffs\n\n expr_dirs = shapedirs[:, :, expr_start_idx:expr_end_idx]\n self.register_buffer(\n 'expr_dirs', to_tensor(to_np(expr_dirs), dtype=dtype))\n\n if create_expression:\n if expression is None:\n default_expression = torch.zeros(\n [batch_size, self.num_expression_coeffs], dtype=dtype)\n else:\n default_expression = torch.tensor(expression, dtype=dtype)\n expression_param = nn.Parameter(default_expression,\n requires_grad=True)\n self.register_parameter('expression', expression_param)\n\n def name(self) -> str:\n return 'SMPL-X'\n\n @property\n def num_expression_coeffs(self):\n return self._num_expression_coeffs\n\n def create_mean_pose(self, data_struct, flat_hand_mean=False):\n # Create the array for the mean pose. If flat_hand is false, then use\n # the mean that is given by the data, rather than the flat open hand\n global_orient_mean = torch.zeros([3], dtype=self.dtype)\n body_pose_mean = torch.zeros([self.NUM_BODY_JOINTS * 3],\n dtype=self.dtype)\n jaw_pose_mean = torch.zeros([3], dtype=self.dtype)\n leye_pose_mean = torch.zeros([3], dtype=self.dtype)\n reye_pose_mean = torch.zeros([3], dtype=self.dtype)\n # pose_mean = np.concatenate([global_orient_mean, body_pose_mean, jaw_pose_mean, leye_pose_mean, reye_pose_mean, self.left_hand_mean, self.right_hand_mean], axis=0)\n pose_mean = torch.cat([global_orient_mean, body_pose_mean, jaw_pose_mean, leye_pose_mean, reye_pose_mean, self.left_hand_mean, self.right_hand_mean], 0)\n\n return pose_mean\n\n def extra_repr(self):\n msg = super(SMPLX, self).extra_repr()\n msg = [\n msg,\n f'Number of Expression Coefficients: {self.num_expression_coeffs}'\n ]\n return '\\n'.join(msg)\n\n def forward(\n self,\n betas: Optional[Tensor] = None,\n global_orient: Optional[Tensor] = None,\n body_pose: Optional[Tensor] = None,\n left_hand_pose: Optional[Tensor] = None,\n right_hand_pose: Optional[Tensor] = None,\n transl: Optional[Tensor] = None,\n expression: Optional[Tensor] = None,\n jaw_pose: Optional[Tensor] = None,\n leye_pose: Optional[Tensor] = None,\n reye_pose: Optional[Tensor] = None,\n return_verts: bool = True,\n return_full_pose: bool = False,\n pose2rot: bool = True,\n return_shaped: bool = True,\n **kwargs\n ) -> TensorOutput:\n '''\n Forward pass for the SMPLX model\n\n Parameters\n ----------\n global_orient: torch.tensor, optional, shape Bx3\n If given, ignore the member variable and use it as the global\n rotation of the body. Useful if someone wishes to predicts this\n with an external model. (default=None)\n betas: torch.tensor, optional, shape BxN_b\n If given, ignore the member variable `betas` and use it\n instead. For example, it can used if shape parameters\n `betas` are predicted from some external model.\n (default=None)\n expression: torch.tensor, optional, shape BxN_e\n If given, ignore the member variable `expression` and use it\n instead. For example, it can used if expression parameters\n `expression` are predicted from some external model.\n body_pose: torch.tensor, optional, shape Bx(J*3)\n If given, ignore the member variable `body_pose` and use it\n instead. For example, it can used if someone predicts the\n pose of the body joints are predicted from some external model.\n It should be a tensor that contains joint rotations in\n axis-angle format. (default=None)\n left_hand_pose: torch.tensor, optional, shape BxP\n If given, ignore the member variable `left_hand_pose` and\n use this instead. It should either contain PCA coefficients or\n joint rotations in axis-angle format.\n right_hand_pose: torch.tensor, optional, shape BxP\n If given, ignore the member variable `right_hand_pose` and\n use this instead. It should either contain PCA coefficients or\n joint rotations in axis-angle format.\n jaw_pose: torch.tensor, optional, shape Bx3\n If given, ignore the member variable `jaw_pose` and\n use this instead. It should either joint rotations in\n axis-angle format.\n transl: torch.tensor, optional, shape Bx3\n If given, ignore the member variable `transl` and use it\n instead. For example, it can used if the translation\n `transl` is predicted from some external model.\n (default=None)\n return_verts: bool, optional\n Return the vertices. (default=True)\n return_full_pose: bool, optional\n Returns the full axis-angle pose vector (default=False)\n\n Returns\n -------\n output: ModelOutput\n A named tuple of type `ModelOutput`\n '''\n\n # If no shape and pose parameters are passed along, then use the\n # ones from the module\n global_orient = (global_orient if global_orient is not None else\n self.global_orient)\n body_pose = body_pose if body_pose is not None else self.body_pose\n betas = betas if betas is not None else self.betas\n\n left_hand_pose = (left_hand_pose if left_hand_pose is not None else\n self.left_hand_pose)\n right_hand_pose = (right_hand_pose if right_hand_pose is not None else\n self.right_hand_pose)\n jaw_pose = jaw_pose if jaw_pose is not None else self.jaw_pose\n leye_pose = leye_pose if leye_pose is not None else self.leye_pose\n reye_pose = reye_pose if reye_pose is not None else self.reye_pose\n expression = expression if expression is not None else self.expression\n\n apply_trans = transl is not None or hasattr(self, 'transl')\n if transl is None:\n if hasattr(self, 'transl'):\n transl = self.transl\n\n if self.use_pca:\n left_hand_pose = torch.einsum(\n 'bi,ij->bj', [left_hand_pose, self.left_hand_components])\n right_hand_pose = torch.einsum(\n 'bi,ij->bj', [right_hand_pose, self.right_hand_components])\n\n full_pose = torch.cat([global_orient.reshape(-1, 1, 3),\n body_pose.reshape(-1, self.NUM_BODY_JOINTS, 3),\n jaw_pose.reshape(-1, 1, 3),\n leye_pose.reshape(-1, 1, 3),\n reye_pose.reshape(-1, 1, 3),\n left_hand_pose.reshape(-1, 15, 3),\n right_hand_pose.reshape(-1, 15, 3)],\n dim=1).reshape(-1, 165).to(self.pose_mean.device)\n\n # Add the mean pose of the model. Does not affect the body, only the\n # hands when flat_hand_mean == False\n full_pose += self.pose_mean\n\n batch_size = max(betas.shape[0], global_orient.shape[0],\n body_pose.shape[0])\n # Concatenate the shape and expression coefficients\n scale = int(batch_size / betas.shape[0])\n if scale > 1:\n betas = betas.expand(scale, -1)\n shape_components = torch.cat([betas, expression], dim=-1).to(self.pose_mean.device)\n\n shapedirs = torch.cat([self.shapedirs, self.expr_dirs], dim=-1)\n\n vertices, joints, A, T = lbs(shape_components, full_pose, self.v_template,\n shapedirs, self.posedirs,\n self.J_regressor, self.parents,\n self.lbs_weights, pose2rot=pose2rot,\n )\n\n lmk_faces_idx = self.lmk_faces_idx.unsqueeze(\n dim=0).expand(batch_size, -1).contiguous()\n lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).repeat(\n self.batch_size, 1, 1)\n if self.use_face_contour:\n lmk_idx_and_bcoords = find_dynamic_lmk_idx_and_bcoords(\n vertices, full_pose, self.dynamic_lmk_faces_idx,\n self.dynamic_lmk_bary_coords,\n self.neck_kin_chain,\n pose2rot=True,\n )\n dyn_lmk_faces_idx, dyn_lmk_bary_coords = lmk_idx_and_bcoords\n\n lmk_faces_idx = torch.cat([lmk_faces_idx,\n dyn_lmk_faces_idx], 1)\n lmk_bary_coords = torch.cat(\n [lmk_bary_coords.expand(batch_size, -1, -1),\n dyn_lmk_bary_coords], 1)\n\n landmarks = vertices2landmarks(vertices, self.faces_tensor,\n lmk_faces_idx,\n lmk_bary_coords)\n\n # import matplotlib.pyplot as plt\n # import numpy as np\n # xs = joints[0,:,0]\n # ys = joints[0,:,1]\n # plt.scatter(xs, ys)\n\n # # zip joins x and y coordinates in pairs\n # count = 0\n # for x,y in zip(xs, ys):\n\n # label = \"{:.2f}\".format(count)\n\n # plt.annotate(label, # this is the text\n # (x,y), # these are the coordinates to position the label\n # textcoords=\"offset points\", # how to position the text\n # xytext=(0,10), # distance from text to points (x,y)\n # ha='center') # horizontal alignment can be left, right or center\n # count += 1\n # plt.savefig(\"joints.png\")\n # import pdb; pdb.set_trace()\n\n # Add any extra joints that might be needed\n joints = self.vertex_joint_selector(vertices, joints)\n # Add the landmarks to the joints\n joints = torch.cat([joints, landmarks], dim=1)\n # Map the joints to the current dataset\n\n if self.joint_mapper is not None:\n joints = self.joint_mapper(joints=joints, vertices=vertices)\n\n if apply_trans:\n joints += transl.unsqueeze(dim=1)\n vertices += transl.unsqueeze(dim=1)\n # clone because we are modifying them in-place\n A = A.clone()\n A[..., :3, 3] += transl.unsqueeze(dim=1)\n T = T.clone()\n T[..., :3, 3] += transl.unsqueeze(dim=1)\n\n v_shaped = None\n if return_shaped:\n v_shaped = self.v_template + blend_shapes(betas, self.shapedirs)\n else:\n v_shaped = Tensor(0)\n\n output = TensorOutput(vertices=vertices if return_verts else None,\n joints=joints,\n betas=betas,\n expression=expression,\n global_orient=global_orient,\n body_pose=body_pose,\n left_hand_pose=left_hand_pose,\n right_hand_pose=right_hand_pose,\n jaw_pose=jaw_pose,\n v_shaped=v_shaped,\n full_pose=full_pose if return_full_pose else None,\n A=A,\n T=T,\n f=self.faces)\n return output" }, { "identifier": "SMCReader", "path": "data/dna_rendering/dna_rendering_sample_code/SMCReader.py", "snippet": "class SMCReader:\n\n def __init__(self, file_path):\n \"\"\"Read SenseMocapFile endswith \".smc\".\n\n Args:\n file_path (str):\n Path to an SMC file.\n body_model (nn.Module or dict):\n Only needed for SMPL transformation to device frame\n if nn.Module: a body_model instance\n if dict: a body_model config\n \"\"\"\n self.smc = h5py.File(file_path, 'r')\n self.__calibration_dict__ = None\n self.__kinect_calib_dict__ = None \n self.__available_keys__ = list(self.smc.keys())\n \n self.actor_info = None \n if hasattr(self.smc, 'attrs') and len(self.smc.attrs.keys()) > 0:\n self.actor_info = dict(\n id=self.smc.attrs['actor_id'],\n perf_id=self.smc.attrs['performance_id'],\n age=self.smc.attrs['age'],\n gender=self.smc.attrs['gender'],\n height=self.smc.attrs['height'],\n weight=self.smc.attrs['weight'],\n ethnicity=self.smc.attrs['ethnicity'],\n )\n\n self.Camera_5mp_info = None \n if 'Camera_5mp' in self.smc:\n self.Camera_5mp_info = dict(\n num_device=self.smc['Camera_5mp'].attrs['num_device'],\n num_frame=self.smc['Camera_5mp'].attrs['num_frame'],\n resolution=self.smc['Camera_5mp'].attrs['resolution'],\n )\n self.Camera_12mp_info = None \n if 'Camera_12mp' in self.smc:\n self.Camera_12mp_info = dict(\n num_device=self.smc['Camera_12mp'].attrs['num_device'],\n num_frame=self.smc['Camera_12mp'].attrs['num_frame'],\n resolution=self.smc['Camera_12mp'].attrs['resolution'],\n )\n self.Kinect_info = None\n if 'Kinect' in self.smc:\n self.Kinect_info=dict(\n num_device=self.smc['Kinect'].attrs['num_device'],\n num_frame=self.smc['Kinect'].attrs['num_frame'],\n resolution=self.smc['Kinect'].attrs['resolution'],\n )\n\n def get_available_keys(self):\n return self.__available_keys__ \n\n def get_actor_info(self):\n return self.actor_info\n \n def get_Camera_12mp_info(self):\n return self.Camera_12mp_info\n\n def get_Camera_5mp_info(self):\n return self.Camera_5mp_info\n \n def get_Kinect_info(self):\n return self.Kinect_info\n \n ### RGB Camera Calibration\n def get_Calibration_all(self):\n \"\"\"Get calibration matrix of all cameras and save it in self\n \n Args:\n None\n\n Returns:\n Dictionary of calibration matrixs of all matrixs.\n dict( \n Camera_Parameter: Camera_id : Matrix_type : value\n )\n Notice:\n Camera_id(str) in {'Camera_5mp': '0'~'47', 'Camera_12mp':'48'~'60'}\n Matrix_type in ['D', 'K', 'RT', 'Color_Calibration'] \n \"\"\" \n if not 'Camera_Parameter' in self.smc:\n print(\"=== no key: Camera_Parameter.\\nplease check available keys!\")\n return None \n\n if self.__calibration_dict__ is not None:\n return self.__calibration_dict__\n\n self.__calibration_dict__ = dict()\n for ci in self.smc['Camera_Parameter'].keys():\n self.__calibration_dict__.setdefault(ci,dict())\n for mt in ['D', 'K', 'RT', 'Color_Calibration'] :\n self.__calibration_dict__[ci][mt] = \\\n self.smc['Camera_Parameter'][ci][mt][()]\n return self.__calibration_dict__\n\n def get_Calibration(self, Camera_id):\n \"\"\"Get calibration matrixs of a certain camera by its type and id \n\n Args:\n Camera_id (int/str of a number):\n Camera_id(str) in {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60'}\n Returns:\n Dictionary of calibration matrixs.\n ['D', 'K', 'RT', 'Color_Calibration'] \n \"\"\"\n if not 'Camera_Parameter' in self.smc:\n print(\"=== no key: Camera_Parameter.\\nplease check available keys!\")\n return None \n\n rs = dict()\n for k in ['D', 'K', 'RT', 'Color_Calibration'] :\n rs[k] = self.smc['Camera_Parameter'][f'{int(Camera_id):02d}'][k][()]\n return rs\n\n ### Kinect Camera Calibration\n def get_Kinect_Calibration_all(self):\n \"\"\"Get calibration matrix of all kinect cameras and save it in self\n \n Args:\n None\n\n Returns:\n Dictionary of calibration matrixs of all matrixs.\n dict( \n Camera_group: Camera_id : Matrix_type : value\n )\n Notice:\n Camera_group(str) in ['Kinect']\n Camera_id(str) in {'Kinect': '0'~'7'}\n Matrix_type in ['D', 'K', 'RT'] \n \"\"\" \n if not 'Calibration' in self.smc:\n print(\"=== no key: Calibration.\\nplease check available keys!\")\n return None \n\n if self.__kinect_calib_dict__ is not None:\n return self.__kinect_calib_dict__\n\n self.__kinect_calib_dict__ = dict()\n for cg in ['Kinect']:\n self.__kinect_calib_dict__.setdefault(cg,dict())\n for ci in self.smc['Calibration'][cg].keys():\n self.__kinect_calib_dict__[cg].setdefault(ci,dict())\n for mt in ['D', 'K', 'RT'] :\n self.__kinect_calib_dict__[cg][ci][mt] = \\\n self.smc['Calibration'][cg][ci][mt][()]\n return self.__kinect_calib_dict__\n\n def get_kinect_Calibration(self, Camera_id):\n \"\"\"Get calibration matrixs of a certain kinect camera by its type and id \n\n Args:\n Camera_group (str):\n Camera_group in ['Kinect'].\n Camera_id (int/str of a number):\n CameraID(str) in {'Kinect': '0'~'7'}\n Returns:\n Dictionary of calibration matrixs.\n ['D', 'K', 'RT'] \n \"\"\" \n if not 'Calibration' in self.smc:\n print(\"=== no key: Calibration.\\nplease check available keys!\")\n return None \n\n Camera_id = f'{int(Camera_id):02d}'\n assert(Camera_id in self.smc['Calibration'][\"Kinect\"].keys())\n rs = dict()\n for k in ['D', 'K', 'RT']:\n rs[k] = self.smc['Calibration'][\"Kinect\"][Camera_id][k][()]\n return rs\n\n ### RGB image\n def __read_color_from_bytes__(self, color_array):\n \"\"\"Decode an RGB image from an encoded byte array.\"\"\"\n return cv2.imdecode(color_array, cv2.IMREAD_COLOR)\n\n def get_mask(self, Camera_id, Frame_id=None, disable_tqdm=True):\n \"\"\"Get mask from Camera_id, Frame_id\n\n Args:\n Camera_id (int/str of a number):\n Camera_id (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',\n 'Kinect': '0'~'7'}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame'\n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not 'Mask' in self.smc:\n print(\"=== no key: Mask.\\nplease check available keys!\")\n return None \n\n Camera_id = str(Camera_id)\n\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = str(Frame_id)\n assert(Frame_id in self.smc['Mask'][Camera_id]['mask'].keys())\n img_byte = self.smc['Mask'][Camera_id]['mask'][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n img_color = np.max(img_color,2)\n return img_color \n else:\n if Frame_id is None:\n Frame_id_list =sorted([int(l) for l in self.smc['Mask'][Camera_id]['mask'].keys()])\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list, disable=disable_tqdm):\n rs.append(self.get_mask(Camera_id,fi))\n return np.stack(rs,axis=0)\n\n def get_img(self, Camera_group, Camera_id, Image_type, Frame_id=None, disable_tqdm=True):\n \"\"\"Get image its Camera_group, Camera_id, Image_type and Frame_id\n\n Args:\n Camera_group (str):\n Camera_group in ['Camera_12mp', 'Camera_5mp','Kinect'].\n Camera_id (int/str of a number):\n CameraID (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',\n 'Kinect': '0'~'7'}\n Image_type(str) in \n {'Camera_5mp': ['color'], \n 'Camera_12mp': ['color'],\n 'Kinect': ['depth', 'mask']}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not Camera_group in self.smc:\n print(\"=== no key: %s.\\nplease check available keys!\" % Camera_group)\n return None\n\n assert(Camera_group in ['Camera_12mp', 'Camera_5mp','Kinect'])\n Camera_id = str(Camera_id)\n assert(Camera_id in self.smc[Camera_group].keys())\n assert(Image_type in self.smc[Camera_group][Camera_id].keys())\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = str(Frame_id)\n assert(Frame_id in self.smc[Camera_group][Camera_id][Image_type].keys())\n if Image_type in ['color']:\n img_byte = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n if Image_type == 'mask':\n img_byte = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n img_color = self.__read_color_from_bytes__(img_byte)\n img_color = np.max(img_color,2)\n if Image_type == 'depth':\n img_color = self.smc[Camera_group][Camera_id][Image_type][Frame_id][()]\n return img_color \n else:\n if Frame_id is None:\n Frame_id_list =sorted([int(l) for l in self.smc[Camera_group][Camera_id][Image_type].keys()])\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm(Frame_id_list, disable=disable_tqdm):\n rs.append(self.get_img(Camera_group, Camera_id, Image_type,fi))\n return np.stack(rs,axis=0)\n \n ###Keypoints2d\n def get_Keypoints2d(self, Camera_id, Frame_id=None):\n \"\"\"Get keypoint2D by its Camera_group, Camera_id and Frame_id\n\n Args:\n Camera_id (int/str of a number):\n CameraID (str) in \n {'Camera_5mp': '0'~'47', \n 'Camera_12mp':'48'~'60',}\n Frame_id a.(int/str of a number): '0' ~ 'num_frame-1'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n a single img :\n 'color': HWC in bgr (uint8)\n 'mask' : HW (uint8)\n 'depth': HW (uint16)\n \"\"\" \n if not 'Keypoints_2D' in self.smc:\n print(\"=== no key: Keypoints_2D.\\nplease check available keys!\")\n return None \n\n Camera_id = f'{int(Camera_id):02d}'\n assert(isinstance(Frame_id,(list,int, str, type(None))))\n if isinstance(Frame_id, (str,int)):\n Frame_id = int(Frame_id)\n return self.smc['Keypoints_2D'][Camera_id][()][Frame_id,:]\n else:\n if Frame_id is None:\n return self.smc['Keypoints_2D'][Camera_id][()]\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list):\n rs.append(self.get_Keypoints2d(Camera_id,fi))\n return np.stack(rs,axis=0)\n\n ###Keypoints3d\n def get_Keypoints3d(self, Frame_id=None):\n \"\"\"Get keypoint3D Frame_id, TODO coordinate\n\n Args:\n Frame_id a.(int/str of a number): '0' ~ 'num_frame-1'('149') \n b.list of numbers (int/str)\n c.None: get batch of all imgs in order of time sequence \n Returns:\n Keypoints3d tensor: np.ndarray of shape ([N], ,3)\n \"\"\" \n if not 'Keypoints_3D' in self.smc:\n print(\"=== no key: Keypoints_3D.\\nplease check available keys!\")\n return None \n\n if isinstance(Frame_id, (str,int)):\n Frame_id = int(Frame_id)\n return self.smc['Keypoints_3D'][\"keypoints3d\"][Frame_id,:]\n else:\n if Frame_id is None:\n return self.smc['Keypoints_3D'][\"keypoints3d\"]\n elif isinstance(Frame_id, list):\n Frame_id_list = Frame_id\n rs = []\n for fi in tqdm.tqdm(Frame_id_list):\n rs.append(self.get_Keypoints3d(fi))\n return np.stack(rs,axis=0)\n\n ###SMPLx\n def get_SMPLx(self, Frame_id=None):\n \"\"\"Get SMPL (world coordinate) computed by mocap processing pipeline.\n\n Args:\n Frame_id (int, list or None, optional):\n int: frame id of one selected frame\n list: a list of frame id\n None: all frames will be returned\n Defaults to None.\n\n Returns:\n dict:\n 'global_orient': np.ndarray of shape (N, 3)\n 'body_pose': np.ndarray of shape (N, 21, 3)\n 'transl': np.ndarray of shape (N, 3)\n 'betas': np.ndarray of shape (1, 10)\n \"\"\"\n if not 'SMPLx' in self.smc:\n print(\"=== no key: SMPLx.\\nplease check available keys!\")\n return None \n\n t_frame = self.smc['SMPLx']['betas'][()].shape[0]\n if Frame_id is None:\n frame_list = range(t_frame)\n elif isinstance(Frame_id, list):\n frame_list = [int(fi) for fi in Frame_id]\n elif isinstance(Frame_id, (int,str)):\n Frame_id = int(Frame_id)\n assert Frame_id < t_frame,\\\n f'Invalid frame_index {Frame_id}'\n frame_list = Frame_id\n else:\n raise TypeError('frame_id should be int, list or None.')\n\n smpl_dict = {}\n for key in ['betas', 'expression', 'fullpose', 'transl']:\n smpl_dict[key] = self.smc['SMPLx'][key][()][frame_list, ...]\n smpl_dict['scale'] = self.smc['SMPLx']['scale'][()]\n\n return smpl_dict\n\n def release(self):\n self.smc = None \n self.__calibration_dict__ = None\n self.__kinect_calib_dict__ = None\n self.__available_keys__ = None\n self.actor_info = None \n self.Camera_5mp_info = None\n self.Camera_12mp_info = None \n self.Kinect_info = None" } ]
import os import sys import numpy as np import torch import json import imageio import cv2 import random from PIL import Image from typing import NamedTuple from scene.colmap_loader import read_extrinsics_text, read_intrinsics_text, qvec2rotmat, \ read_extrinsics_binary, read_intrinsics_binary, read_points3D_binary, read_points3D_text from utils.graphics_utils import getWorld2View2, focal2fov, fov2focal from pathlib import Path from plyfile import PlyData, PlyElement from utils.sh_utils import SH2RGB from scene.gaussian_model import BasicPointCloud from smpl.smpl_numpy import SMPL from smplx.body_models import SMPLX from data.dna_rendering.dna_rendering_sample_code.SMCReader import SMCReader
17,242
# get the world-to-camera transform and set R, T R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code T = w2c[:3, 3] # Reduce the image resolution by ratio, then remove the back ground ratio = image_scaling if ratio != 1.: H, W = int(image.shape[0] * ratio), int(image.shape[1] * ratio) image = cv2.resize(image, (W, H), interpolation=cv2.INTER_AREA) msk = cv2.resize(msk, (W, H), interpolation=cv2.INTER_NEAREST) K[:2] = K[:2] * ratio image = Image.fromarray(np.array(image*255.0, dtype=np.byte), "RGB") focalX = K[0,0] focalY = K[1,1] FovX = focal2fov(focalX, image.size[0]) FovY = focal2fov(focalY, image.size[1]) # load smpl data i = int(os.path.basename(image_path)[:-4]) vertices_path = os.path.join(path, 'smpl_vertices', '{}.npy'.format(i)) xyz = np.load(vertices_path).astype(np.float32) smpl_param_path = os.path.join(path, "smpl_params", '{}.npy'.format(i)) smpl_param = np.load(smpl_param_path, allow_pickle=True).item() Rh = smpl_param['Rh'] smpl_param['R'] = cv2.Rodrigues(Rh)[0].astype(np.float32) smpl_param['Th'] = smpl_param['Th'].astype(np.float32) smpl_param['shapes'] = smpl_param['shapes'].astype(np.float32) smpl_param['poses'] = smpl_param['poses'].astype(np.float32) # obtain the original bounds for point sampling min_xyz = np.min(xyz, axis=0) max_xyz = np.max(xyz, axis=0) min_xyz -= 0.05 max_xyz += 0.05 world_bound = np.stack([min_xyz, max_xyz], axis=0) # get bounding mask and bcakground mask bound_mask = get_bound_2d_mask(world_bound, K, w2c[:3], image.size[1], image.size[0]) bound_mask = Image.fromarray(np.array(bound_mask*255.0, dtype=np.byte)) bkgd_mask = Image.fromarray(np.array(msk*255.0, dtype=np.byte)) cam_infos.append(CameraInfo(uid=idx, pose_id=pose_index, R=R, T=T, K=K, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, bkgd_mask=bkgd_mask, bound_mask=bound_mask, width=image.size[0], height=image.size[1], smpl_param=smpl_param, world_vertex=xyz, world_bound=world_bound, big_pose_smpl_param=big_pose_smpl_param, big_pose_world_vertex=big_pose_xyz, big_pose_world_bound=big_pose_world_bound)) idx += 1 return cam_infos def readZJUMoCapRefineInfo(path, white_background, output_path, eval): train_view = [4] test_view = [i for i in range(0, 23)] test_view.remove(train_view[0]) print("Reading Training Transforms") train_cam_infos = readCamerasZJUMoCapRefine(path, train_view, white_background, split='train') print("Reading Test Transforms") test_cam_infos = readCamerasZJUMoCapRefine(path, test_view, white_background, split='test', novel_view_vis=False) if not eval: train_cam_infos.extend(test_cam_infos) test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) if len(train_view) == 1: nerf_normalization['radius'] = 1 ply_path = os.path.join('output', output_path, "points3d.ply") if not os.path.exists(ply_path): # Since this data set has no colmap data, we start with random points num_pts = 6890 #100_000 print(f"Generating random point cloud ({num_pts})...") # We create random points inside the bounds of the synthetic Blender scenes xyz = train_cam_infos[0].big_pose_world_vertex shs = np.random.random((num_pts, 3)) / 255.0 pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3))) storePly(ply_path, xyz, SH2RGB(shs) * 255) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info ################################## DNARendering ################################## def readCamerasDNARendering(path, output_view, white_background, image_scaling=0.5, split='train'): cam_infos = [] if split == 'train': pose_start = 0 pose_interval = 1 pose_num = 100 else: pose_start = 0 pose_interval = 5 pose_num = 20 smc_reader = SMCReader(path) annots_file_path = path.replace('main', 'annotations').split('.')[0] + '_annots.smc' smc_annots_reader = SMCReader(annots_file_path) gender = smc_reader.actor_info['gender'] smpl_model = {}
# # Copyright (C) 2023, Inria # GRAPHDECO research group, https://team.inria.fr/graphdeco # All rights reserved. # # This software is free for non-commercial, research and evaluation use # under the terms of the LICENSE.md file. # # For inquiries contact [email protected] # class CameraInfo(NamedTuple): uid: int pose_id: int R: np.array T: np.array K: np.array FovY: np.array FovX: np.array image: np.array image_path: str image_name: str bkgd_mask: np.array bound_mask: np.array width: int height: int smpl_param: dict world_vertex: np.array world_bound: np.array big_pose_smpl_param: dict big_pose_world_vertex: np.array big_pose_world_bound: np.array class SceneInfo(NamedTuple): point_cloud: BasicPointCloud train_cameras: list test_cameras: list nerf_normalization: dict ply_path: str def getNerfppNorm(cam_info): def get_center_and_diag(cam_centers): cam_centers = np.hstack(cam_centers) avg_cam_center = np.mean(cam_centers, axis=1, keepdims=True) center = avg_cam_center dist = np.linalg.norm(cam_centers - center, axis=0, keepdims=True) diagonal = np.max(dist) return center.flatten(), diagonal cam_centers = [] for cam in cam_info: W2C = getWorld2View2(cam.R, cam.T) C2W = np.linalg.inv(W2C) cam_centers.append(C2W[:3, 3:4]) center, diagonal = get_center_and_diag(cam_centers) radius = diagonal * 1.1 translate = -center return {"translate": translate, "radius": radius} def readColmapCameras(cam_extrinsics, cam_intrinsics, images_folder): cam_infos = [] for idx, key in enumerate(cam_extrinsics): sys.stdout.write('\r') # the exact output you're looking for: sys.stdout.write("Reading camera {}/{}".format(idx+1, len(cam_extrinsics))) sys.stdout.flush() extr = cam_extrinsics[key] intr = cam_intrinsics[extr.camera_id] height = intr.height width = intr.width uid = intr.id R = np.transpose(qvec2rotmat(extr.qvec)) T = np.array(extr.tvec) if intr.model=="SIMPLE_PINHOLE": focal_length_x = intr.params[0] FovY = focal2fov(focal_length_x, height) FovX = focal2fov(focal_length_x, width) elif intr.model=="PINHOLE": focal_length_x = intr.params[0] focal_length_y = intr.params[1] FovY = focal2fov(focal_length_y, height) FovX = focal2fov(focal_length_x, width) else: assert False, "Colmap camera model not handled: only undistorted datasets (PINHOLE or SIMPLE_PINHOLE cameras) supported!" image_path = os.path.join(images_folder, os.path.basename(extr.name)) image_name = os.path.basename(image_path).split(".")[0] image = Image.open(image_path) cam_info = CameraInfo(uid=uid, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, width=width, height=height) cam_infos.append(cam_info) sys.stdout.write('\n') return cam_infos def fetchPly(path): plydata = PlyData.read(path) vertices = plydata['vertex'] positions = np.vstack([vertices['x'], vertices['y'], vertices['z']]).T colors = np.vstack([vertices['red'], vertices['green'], vertices['blue']]).T / 255.0 normals = np.vstack([vertices['nx'], vertices['ny'], vertices['nz']]).T return BasicPointCloud(points=positions, colors=colors, normals=normals) def storePly(path, xyz, rgb): # Define the dtype for the structured array dtype = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('nx', 'f4'), ('ny', 'f4'), ('nz', 'f4'), ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')] normals = np.zeros_like(xyz) elements = np.empty(xyz.shape[0], dtype=dtype) attributes = np.concatenate((xyz, normals, rgb), axis=1) elements[:] = list(map(tuple, attributes)) # Create the PlyData object and write to file vertex_element = PlyElement.describe(elements, 'vertex') ply_data = PlyData([vertex_element]) ply_data.write(path) def readColmapSceneInfo(path, images, eval, llffhold=8): try: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.bin") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.bin") cam_extrinsics = read_extrinsics_binary(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_binary(cameras_intrinsic_file) except: cameras_extrinsic_file = os.path.join(path, "sparse/0", "images.txt") cameras_intrinsic_file = os.path.join(path, "sparse/0", "cameras.txt") cam_extrinsics = read_extrinsics_text(cameras_extrinsic_file) cam_intrinsics = read_intrinsics_text(cameras_intrinsic_file) reading_dir = "images" if images == None else images cam_infos_unsorted = readColmapCameras(cam_extrinsics=cam_extrinsics, cam_intrinsics=cam_intrinsics, images_folder=os.path.join(path, reading_dir)) cam_infos = sorted(cam_infos_unsorted.copy(), key = lambda x : x.image_name) if eval: train_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold != 0] test_cam_infos = [c for idx, c in enumerate(cam_infos) if idx % llffhold == 0] else: train_cam_infos = cam_infos test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "sparse/0/points3D.ply") bin_path = os.path.join(path, "sparse/0/points3D.bin") txt_path = os.path.join(path, "sparse/0/points3D.txt") if not os.path.exists(ply_path): print("Converting point3d.bin to .ply, will happen only the first time you open the scene.") try: xyz, rgb, _ = read_points3D_binary(bin_path) except: xyz, rgb, _ = read_points3D_text(txt_path) storePly(ply_path, xyz, rgb) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info def readCamerasFromTransforms(path, transformsfile, white_background, extension=".png"): cam_infos = [] with open(os.path.join(path, transformsfile)) as json_file: contents = json.load(json_file) fovx = contents["camera_angle_x"] frames = contents["frames"] for idx, frame in enumerate(frames[:20]): cam_name = os.path.join(path, frame["file_path"] + extension) # NeRF 'transform_matrix' is a camera-to-world transform c2w = np.array(frame["transform_matrix"]) # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward) c2w[:3, 1:3] *= -1 # get the world-to-camera transform and set R, T w2c = np.linalg.inv(c2w) R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code T = w2c[:3, 3] image_path = os.path.join(path, cam_name) image_name = Path(cam_name).stem image = Image.open(image_path) im_data = np.array(image.convert("RGBA")) bg = np.array([1,1,1]) if white_background else np.array([0, 0, 0]) norm_data = im_data / 255.0 arr = norm_data[:,:,:3] * norm_data[:, :, 3:4] + bg * (1 - norm_data[:, :, 3:4]) image = Image.fromarray(np.array(arr*255.0, dtype=np.byte), "RGB") fovy = focal2fov(fov2focal(fovx, image.size[0]), image.size[1]) FovY = fovy FovX = fovx cam_infos.append(CameraInfo(uid=idx, R=R, T=T, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, bkgd_mask=None, bound_mask=None, width=image.size[0], height=image.size[1])) return cam_infos def readNerfSyntheticInfo(path, white_background, eval, extension=".png"): print("Reading Training Transforms") train_cam_infos = readCamerasFromTransforms(path, "transforms_train.json", white_background, extension) print("Reading Test Transforms") test_cam_infos = readCamerasFromTransforms(path, "transforms_test.json", white_background, extension) if not eval: train_cam_infos.extend(test_cam_infos) test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) ply_path = os.path.join(path, "points3d.ply") if not os.path.exists(ply_path): # Since this data set has no colmap data, we start with random points num_pts = 100_000 print(f"Generating random point cloud ({num_pts})...") # We create random points inside the bounds of the synthetic Blender scenes xyz = np.random.random((num_pts, 3)) * 2.6 - 1.3 shs = np.random.random((num_pts, 3)) / 255.0 pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3))) storePly(ply_path, xyz, SH2RGB(shs) * 255) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info ################################## MonoCap ################################## def get_camera_extrinsics_monocap(view_index, val=False, camera_view_num=36): def norm_np_arr(arr): return arr / np.linalg.norm(arr) def lookat(eye, at, up): zaxis = norm_np_arr(at - eye) xaxis = norm_np_arr(np.cross(zaxis, up)) yaxis = np.cross(xaxis, zaxis) _viewMatrix = np.array([ [xaxis[0], xaxis[1], xaxis[2], -np.dot(xaxis, eye)], [yaxis[0], yaxis[1], yaxis[2], -np.dot(yaxis, eye)], [-zaxis[0], -zaxis[1], -zaxis[2], np.dot(zaxis, eye)], [0 , 0 , 0 , 1 ] ]) return _viewMatrix def fix_eye(phi, theta): camera_distance = 3 return np.array([ camera_distance * np.sin(theta) * np.cos(phi), camera_distance * np.sin(theta) * np.sin(phi), camera_distance * np.cos(theta) ]) if val: at = np.array([0, 0.8, 0]).astype(np.float32) eye = fix_eye(np.pi + np.pi/12 + 1e-6, -np.pi/2 + 2 * np.pi * view_index / camera_view_num + 1e-6).astype(np.float32) + at extrinsics = lookat(eye, at, np.array([0, 1, 0])).astype(np.float32) return extrinsics def readCamerasMonoCapdata(path, output_view, white_background, image_scaling=1.0, split='train', novel_view_vis=False): cam_infos = [] if 'olek_images0812' in path or 'vlad_images1011' in path: pose_start = 1 else: pose_start = 0 if split == 'train': pose_interval = 5 pose_num = 100 elif split == 'test': pose_interval = 30 pose_num = 17 annot_path = os.path.join(path, 'annots.npy') annots = np.load(annot_path, allow_pickle=True).item() cam = annots['cams'] # load SMPL model smpl_model = SMPL(sex='neutral', model_dir='assets/SMPL_NEUTRAL_renderpeople.pkl') # SMPL in canonical space big_pose_smpl_param = {} big_pose_smpl_param['R'] = np.eye(3).astype(np.float32) big_pose_smpl_param['Th'] = np.zeros((1,3)).astype(np.float32) big_pose_smpl_param['shapes'] = np.zeros((1,10)).astype(np.float32) big_pose_smpl_param['poses'] = np.zeros((1,72)).astype(np.float32) big_pose_smpl_param['poses'][0, 5] = 45/180*np.array(np.pi) big_pose_smpl_param['poses'][0, 8] = -45/180*np.array(np.pi) big_pose_smpl_param['poses'][0, 23] = -30/180*np.array(np.pi) big_pose_smpl_param['poses'][0, 26] = 30/180*np.array(np.pi) big_pose_xyz, _ = smpl_model(big_pose_smpl_param['poses'], big_pose_smpl_param['shapes'].reshape(-1)) big_pose_xyz = (np.matmul(big_pose_xyz, big_pose_smpl_param['R'].transpose()) + big_pose_smpl_param['Th']).astype(np.float32) # obtain the original bounds for point sampling big_pose_min_xyz = np.min(big_pose_xyz, axis=0) big_pose_max_xyz = np.max(big_pose_xyz, axis=0) big_pose_min_xyz -= 0.05 big_pose_max_xyz += 0.05 big_pose_world_bound = np.stack([big_pose_min_xyz, big_pose_max_xyz], axis=0) idx = 0 for pose_index in range(pose_start, pose_start+pose_num*pose_interval, pose_interval): for view_index in output_view: if novel_view_vis: view_index_look_at = view_index view_index = 0 # Load image, mask, K, D, R, T if 'olek_images0812' in path: image_path = os.path.join(path, 'images', str(view_index).zfill(2), str(pose_index).zfill(6)+'.jpg') msk_path = os.path.join(path, 'mask', str(view_index).zfill(2), str(pose_index).zfill(6)+'.png') elif 'vlad_images1011' in path: image_path = os.path.join(path, 'images', str(view_index).zfill(3), str(pose_index).zfill(6)+'.jpg') msk_path = os.path.join(path, 'mask', str(view_index).zfill(3), str(pose_index).zfill(6)+'.jpg') else: image_path = os.path.join(path, 'images', str(view_index).zfill(2), str(pose_index).zfill(4)+'.jpg') msk_path = os.path.join(path, 'mask', str(view_index).zfill(2), str(pose_index).zfill(4)+'.png') image_name = view_index image = np.array(imageio.imread(image_path).astype(np.float32) / 255.) msk = imageio.imread(msk_path).astype(np.float32) / 255 if msk.shape[-1] == 3: msk = msk[:,:,0] if not novel_view_vis: cam_id = view_index K = cam['K'][cam_id] D = cam['D'][cam_id] R = cam["R"][cam_id] T = cam["T"][cam_id][...,None].reshape(-1, 1) / 1000 # undistort image and mask image = cv2.undistort(image, K, D) msk = cv2.undistort(msk, K, D) else: pose = np.matmul(np.array([[1,0,0,0], [0,-1,0,0], [0,0,-1,0], [0,0,0,1]]), get_camera_extrinsics_monocap(view_index_look_at, val=True)) R = pose[:3,:3] T = pose[:3, 3].reshape(-1, 1) cam_id = view_index K = cam['K'][cam_id] # mask image if 'olek_images0812' in path or 'vlad_images1011' in path: image = image * msk[...,None].repeat(3, axis=2) else: image[msk == 0] = 1 if white_background else 0 # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward) w2c = np.eye(4) w2c[:3,:3] = R w2c[:3,3:4] = T # get the world-to-camera transform and set R, T R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code T = w2c[:3, 3] # Reduce the image resolution by ratio, then remove the back ground ratio = image_scaling if ratio != 1.0: H, W = int(image.shape[0] * ratio), int(image.shape[1] * ratio) image = cv2.resize(image, (W, H), interpolation=cv2.INTER_AREA) msk = cv2.resize(msk, (W, H), interpolation=cv2.INTER_NEAREST) K[:2] = K[:2] * ratio image = Image.fromarray(np.array(image*255.0, dtype=np.byte), "RGB") focalX = K[0,0] focalY = K[1,1] FovX = focal2fov(focalX, image.size[0]) FovY = focal2fov(focalY, image.size[1]) # load smpl data params_path = os.path.join(path, 'params', '{}.npy'.format(pose_index)) params = np.load(params_path, allow_pickle=True).item() Rh = params['Rh'].astype(np.float32) Th = params['Th'].astype(np.float32) smpl_param = {} smpl_param['shapes'] = np.array(params['shapes']).astype(np.float32) smpl_param['poses'] = np.array(params["poses"]).astype(np.float32).reshape(1,72) smpl_param['R'] = cv2.Rodrigues(Rh)[0].astype(np.float32) #np.eye(3).astype(np.float32) smpl_param['Th'] = Th #np.array(params["Th"]).astype(np.float32) xyz, _ = smpl_model(smpl_param['poses'], smpl_param['shapes'].reshape(-1)) xyz = (np.matmul(xyz, smpl_param['R'].transpose()) + smpl_param['Th']).astype(np.float32) # obtain the original bounds for point sampling min_xyz = np.min(xyz, axis=0) max_xyz = np.max(xyz, axis=0) min_xyz -= 0.1 max_xyz += 0.1 world_bound = np.stack([min_xyz, max_xyz], axis=0) # get bounding mask and bcakground mask bound_mask = get_bound_2d_mask(world_bound, K, w2c[:3], image.size[1], image.size[0]) bound_mask = Image.fromarray(np.array(bound_mask*255.0, dtype=np.byte)) bkgd_mask = Image.fromarray(np.array(msk*255.0, dtype=np.byte)) cam_infos.append(CameraInfo(uid=idx, pose_id=pose_index, R=R, T=T, K=K, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, bkgd_mask=bkgd_mask, bound_mask=bound_mask, width=image.size[0], height=image.size[1], smpl_param=smpl_param, world_vertex=xyz, world_bound=world_bound, big_pose_smpl_param=big_pose_smpl_param, big_pose_world_vertex=big_pose_xyz, big_pose_world_bound=big_pose_world_bound)) idx += 1 return cam_infos def readMonoCapdataInfo(path, white_background, output_path, eval): if 'olek_images0812' in path: train_view = [44] test_view = [0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 49] test_view = [45] elif 'vlad_images1011' in path: train_view = [66] test_view = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100] else: train_view = [0] test_view = range(1,11) print("Reading Training Transforms") train_cam_infos = readCamerasMonoCapdata(path, train_view, white_background, split='train') print("Reading Test Transforms") test_cam_infos = readCamerasMonoCapdata(path, test_view, white_background, split='test', novel_view_vis=False) if not eval: train_cam_infos.extend(test_cam_infos) test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) if len(train_view) == 1: nerf_normalization['radius'] = 1 # ply_path = os.path.join(path, "points3d.ply") ply_path = os.path.join('output', output_path, "points3d.ply") if not os.path.exists(ply_path): # Since this data set has no colmap data, we start with random points num_pts = 6890 #100_000 print(f"Generating random point cloud ({num_pts})...") # We create random points inside the bounds of the synthetic Blender scenes xyz = train_cam_infos[0].big_pose_world_vertex shs = np.random.random((num_pts, 3)) / 255.0 pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3))) storePly(ply_path, xyz, SH2RGB(shs) * 255) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info ################################## ZJUMoCapRefine ################################## def get_camera_extrinsics_zju_mocap_refine(view_index, val=False, camera_view_num=36): def norm_np_arr(arr): return arr / np.linalg.norm(arr) def lookat(eye, at, up): zaxis = norm_np_arr(at - eye) xaxis = norm_np_arr(np.cross(zaxis, up)) yaxis = np.cross(xaxis, zaxis) _viewMatrix = np.array([ [xaxis[0], xaxis[1], xaxis[2], -np.dot(xaxis, eye)], [yaxis[0], yaxis[1], yaxis[2], -np.dot(yaxis, eye)], [-zaxis[0], -zaxis[1], -zaxis[2], np.dot(zaxis, eye)], [0 , 0 , 0 , 1 ] ]) return _viewMatrix def fix_eye(phi, theta): camera_distance = 3 return np.array([ camera_distance * np.sin(theta) * np.cos(phi), camera_distance * np.sin(theta) * np.sin(phi), camera_distance * np.cos(theta) ]) if val: eye = fix_eye(np.pi + 2 * np.pi * view_index / camera_view_num + 1e-6, np.pi/2 + np.pi/12 + 1e-6).astype(np.float32) + np.array([0, 0, -0.8]).astype(np.float32) at = np.array([0, 0, -0.8]).astype(np.float32) extrinsics = lookat(eye, at, np.array([0, 0, -1])).astype(np.float32) return extrinsics def readCamerasZJUMoCapRefine(path, output_view, white_background, image_scaling=0.5, split='train', novel_view_vis=False): cam_infos = [] pose_start = 0 if split == 'train': pose_interval = 5 pose_num = 100 elif split == 'test': pose_start = 0 pose_interval = 30 pose_num = 17 ann_file = os.path.join(path, 'annots.npy') annots = np.load(ann_file, allow_pickle=True).item() cams = annots['cams'] ims = np.array([ np.array(ims_data['ims'])[output_view] for ims_data in annots['ims'][pose_start:pose_start + pose_num * pose_interval][::pose_interval] ]) cam_inds = np.array([ np.arange(len(ims_data['ims']))[output_view] for ims_data in annots['ims'][pose_start:pose_start + pose_num * pose_interval][::pose_interval] ]) if 'CoreView_313' in path or 'CoreView_315' in path: for i in range(ims.shape[0]): ims[i] = [x.split('/')[0] + '/' + x.split('/')[1].split('_')[4] + '.jpg' for x in ims[i]] smpl_model = SMPL(sex='neutral', model_dir='assets/SMPL_NEUTRAL_renderpeople.pkl') # SMPL in canonical space big_pose_smpl_param = {} big_pose_smpl_param['R'] = np.eye(3).astype(np.float32) big_pose_smpl_param['Th'] = np.zeros((1,3)).astype(np.float32) big_pose_smpl_param['shapes'] = np.zeros((1,10)).astype(np.float32) big_pose_smpl_param['poses'] = np.zeros((1,72)).astype(np.float32) big_pose_smpl_param['poses'][0, 5] = 45/180*np.array(np.pi) big_pose_smpl_param['poses'][0, 8] = -45/180*np.array(np.pi) big_pose_smpl_param['poses'][0, 23] = -30/180*np.array(np.pi) big_pose_smpl_param['poses'][0, 26] = 30/180*np.array(np.pi) big_pose_xyz, _ = smpl_model(big_pose_smpl_param['poses'], big_pose_smpl_param['shapes'].reshape(-1)) big_pose_xyz = (np.matmul(big_pose_xyz, big_pose_smpl_param['R'].transpose()) + big_pose_smpl_param['Th']).astype(np.float32) # obtain the original bounds for point sampling big_pose_min_xyz = np.min(big_pose_xyz, axis=0) big_pose_max_xyz = np.max(big_pose_xyz, axis=0) big_pose_min_xyz -= 0.05 big_pose_max_xyz += 0.05 big_pose_world_bound = np.stack([big_pose_min_xyz, big_pose_max_xyz], axis=0) idx = 0 for pose_index in range(pose_num): for view_index in range(len(output_view)): if novel_view_vis: view_index_look_at = view_index view_index = 0 # Load image, mask, K, D, R, T image_path = os.path.join(path, ims[pose_index][view_index].replace('\\', '/')) image_name = ims[pose_index][view_index].split('.')[0] image = np.array(imageio.imread(image_path).astype(np.float32)/255.) msk_path = image_path.replace('images', 'mask').replace('jpg', 'png') msk = imageio.imread(msk_path) msk = (msk != 0).astype(np.uint8) if not novel_view_vis: cam_ind = cam_inds[pose_index][view_index] K = np.array(cams['K'][cam_ind]) D = np.array(cams['D'][cam_ind]) R = np.array(cams['R'][cam_ind]) T = np.array(cams['T'][cam_ind]) / 1000. image = cv2.undistort(image, K, D) msk = cv2.undistort(msk, K, D) else: pose = np.matmul(np.array([[1,0,0,0], [0,-1,0,0], [0,0,-1,0], [0,0,0,1]]), get_camera_extrinsics_zju_mocap_refine(view_index_look_at, val=True)) R = pose[:3,:3] T = pose[:3, 3].reshape(-1, 1) cam_ind = cam_inds[pose_index][view_index] K = np.array(cams['K'][cam_ind]) image[msk == 0] = 1 if white_background else 0 # change from OpenGL/Blender camera axes (Y up, Z back) to COLMAP (Y down, Z forward) w2c = np.eye(4) w2c[:3,:3] = R w2c[:3,3:4] = T # get the world-to-camera transform and set R, T R = np.transpose(w2c[:3,:3]) # R is stored transposed due to 'glm' in CUDA code T = w2c[:3, 3] # Reduce the image resolution by ratio, then remove the back ground ratio = image_scaling if ratio != 1.: H, W = int(image.shape[0] * ratio), int(image.shape[1] * ratio) image = cv2.resize(image, (W, H), interpolation=cv2.INTER_AREA) msk = cv2.resize(msk, (W, H), interpolation=cv2.INTER_NEAREST) K[:2] = K[:2] * ratio image = Image.fromarray(np.array(image*255.0, dtype=np.byte), "RGB") focalX = K[0,0] focalY = K[1,1] FovX = focal2fov(focalX, image.size[0]) FovY = focal2fov(focalY, image.size[1]) # load smpl data i = int(os.path.basename(image_path)[:-4]) vertices_path = os.path.join(path, 'smpl_vertices', '{}.npy'.format(i)) xyz = np.load(vertices_path).astype(np.float32) smpl_param_path = os.path.join(path, "smpl_params", '{}.npy'.format(i)) smpl_param = np.load(smpl_param_path, allow_pickle=True).item() Rh = smpl_param['Rh'] smpl_param['R'] = cv2.Rodrigues(Rh)[0].astype(np.float32) smpl_param['Th'] = smpl_param['Th'].astype(np.float32) smpl_param['shapes'] = smpl_param['shapes'].astype(np.float32) smpl_param['poses'] = smpl_param['poses'].astype(np.float32) # obtain the original bounds for point sampling min_xyz = np.min(xyz, axis=0) max_xyz = np.max(xyz, axis=0) min_xyz -= 0.05 max_xyz += 0.05 world_bound = np.stack([min_xyz, max_xyz], axis=0) # get bounding mask and bcakground mask bound_mask = get_bound_2d_mask(world_bound, K, w2c[:3], image.size[1], image.size[0]) bound_mask = Image.fromarray(np.array(bound_mask*255.0, dtype=np.byte)) bkgd_mask = Image.fromarray(np.array(msk*255.0, dtype=np.byte)) cam_infos.append(CameraInfo(uid=idx, pose_id=pose_index, R=R, T=T, K=K, FovY=FovY, FovX=FovX, image=image, image_path=image_path, image_name=image_name, bkgd_mask=bkgd_mask, bound_mask=bound_mask, width=image.size[0], height=image.size[1], smpl_param=smpl_param, world_vertex=xyz, world_bound=world_bound, big_pose_smpl_param=big_pose_smpl_param, big_pose_world_vertex=big_pose_xyz, big_pose_world_bound=big_pose_world_bound)) idx += 1 return cam_infos def readZJUMoCapRefineInfo(path, white_background, output_path, eval): train_view = [4] test_view = [i for i in range(0, 23)] test_view.remove(train_view[0]) print("Reading Training Transforms") train_cam_infos = readCamerasZJUMoCapRefine(path, train_view, white_background, split='train') print("Reading Test Transforms") test_cam_infos = readCamerasZJUMoCapRefine(path, test_view, white_background, split='test', novel_view_vis=False) if not eval: train_cam_infos.extend(test_cam_infos) test_cam_infos = [] nerf_normalization = getNerfppNorm(train_cam_infos) if len(train_view) == 1: nerf_normalization['radius'] = 1 ply_path = os.path.join('output', output_path, "points3d.ply") if not os.path.exists(ply_path): # Since this data set has no colmap data, we start with random points num_pts = 6890 #100_000 print(f"Generating random point cloud ({num_pts})...") # We create random points inside the bounds of the synthetic Blender scenes xyz = train_cam_infos[0].big_pose_world_vertex shs = np.random.random((num_pts, 3)) / 255.0 pcd = BasicPointCloud(points=xyz, colors=SH2RGB(shs), normals=np.zeros((num_pts, 3))) storePly(ply_path, xyz, SH2RGB(shs) * 255) try: pcd = fetchPly(ply_path) except: pcd = None scene_info = SceneInfo(point_cloud=pcd, train_cameras=train_cam_infos, test_cameras=test_cam_infos, nerf_normalization=nerf_normalization, ply_path=ply_path) return scene_info ################################## DNARendering ################################## def readCamerasDNARendering(path, output_view, white_background, image_scaling=0.5, split='train'): cam_infos = [] if split == 'train': pose_start = 0 pose_interval = 1 pose_num = 100 else: pose_start = 0 pose_interval = 5 pose_num = 20 smc_reader = SMCReader(path) annots_file_path = path.replace('main', 'annotations').split('.')[0] + '_annots.smc' smc_annots_reader = SMCReader(annots_file_path) gender = smc_reader.actor_info['gender'] smpl_model = {}
smpl_model[gender] = SMPLX('assets/models/smplx/', smpl_type='smplx',
13
2023-11-29 07:10:39+00:00
24k
UX-Decoder/LLaVA-Grounding
llava/model/language_model/llava_llama_gd.py
[ { "identifier": "LlavaMetaModel", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaModel:\n\n def __init__(self, config):\n super(LlavaMetaModel, self).__init__(config)\n\n if hasattr(config, \"mm_vision_tower\"):\n self.vision_tower = build_vision_tower(config, delay_load=True)\n self.mm_projector = nn.Linear(config.mm_hidden_size, config.hidden_size)\n\n def get_vision_tower(self):\n vision_tower = getattr(self, 'vision_tower', None)\n if type(vision_tower) is list:\n vision_tower = vision_tower[0]\n return vision_tower\n\n def initialize_vision_modules(self, model_args, fsdp=None):\n vision_tower = model_args.vision_tower\n mm_vision_select_layer = model_args.mm_vision_select_layer\n mm_vision_select_feature = model_args.mm_vision_select_feature\n pretrain_mm_mlp_adapter = model_args.pretrain_mm_mlp_adapter\n\n self.config.mm_vision_tower = vision_tower\n\n vision_tower = build_vision_tower(model_args)\n\n if fsdp is not None and len(fsdp) > 0:\n self.vision_tower = [vision_tower]\n else:\n self.vision_tower = vision_tower\n\n self.config.use_mm_proj = True\n self.config.mm_hidden_size = vision_tower.hidden_size\n self.config.mm_vision_select_layer = mm_vision_select_layer\n self.config.mm_vision_select_feature = mm_vision_select_feature\n\n if not hasattr(self, 'mm_projector'):\n self.mm_projector = nn.Linear(self.config.mm_hidden_size, self.config.hidden_size)\n\n if pretrain_mm_mlp_adapter is not None:\n mm_projector_weights = torch.load(pretrain_mm_mlp_adapter, map_location='cpu')\n def get_w(weights, keyword):\n return {k.split(keyword + '.')[1]: v for k, v in weights.items() if keyword in k}\n\n # self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))\n self.mm_projector.load_state_dict(get_w(mm_projector_weights, 'mm_projector'))" }, { "identifier": "LlavaMetaForCausalLM", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaForCausalLM(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images):\n image_features = self.get_model().get_vision_tower()(images)\n image_features = self.get_model().mm_projector(image_features)\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]).detach())\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids).detach())\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n else:\n # import pdb; pdb.set_trace()\n num_new_tokens = tokenizer.add_tokens([grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n inits=['[',']','.']\n nums=[tokenizer.encode(init)[1] for init in inits]\n # inp_embs = self.get_input_embeddings().weight.data[nums]\n # out_embs = self.get_output_embeddings().weight.data[nums]\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n # print(\"Emb length:\", len(self.get_input_embeddings().weight.data))\n # if len(self.get_input_embeddings().weight.data) > 0:\n # if len(self.get_input_embeddings().weight.data) > 0:\n # self.get_input_embeddings().weight.data[-num_new_tokens:] = inp_embs\n # self.get_output_embeddings().weight.data[-num_new_tokens:] = out_embs\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n #\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n #\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True" }, { "identifier": "LlavaMetaForCausalLM_gd", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaForCausalLM_gd(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images):\n image_features = self.get_model().get_vision_tower()(images)\n image_features = self.get_model().mm_projector(image_features.to(self.get_model().mm_projector.state_dict()[\"weight\"].dtype))\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]))\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n else:\n # import pdb; pdb.set_trace()\n num_new_tokens = tokenizer.add_tokens([grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n inits=['[',']','.']\n nums=[tokenizer.encode(init)[1] for init in inits]\n\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n #\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n #\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n def initialize_seg_modules(self, cfg):\n seg_model = BaseModel(cfg, build_model(cfg))\n seg_model = seg_model.from_pretrained(cfg.MODEL.WEIGHTS)\n self.seg_model = seg_model\n\n def freeze_seg_modules(self):\n for p in self.seg_model.parameters():\n p.requires_grad = False" }, { "identifier": "LlavaMetaForCausalLM_gd_interactive", "path": "llava/model/llava_arch.py", "snippet": "class LlavaMetaForCausalLM_gd_interactive(ABC):\n\n @abstractmethod\n def get_model(self):\n pass\n\n def get_vision_tower(self):\n return self.get_model().get_vision_tower()\n\n def encode_images(self, images):\n image_features = self.get_model().get_vision_tower()(images)\n image_features = self.get_model().mm_projector(image_features.to(self.get_model().mm_projector.state_dict()[\"weight\"].dtype))\n return image_features\n\n def prepare_inputs_labels_for_multimodal(\n self, input_ids, attention_mask, past_key_values, labels, images,obj_feats=None,num_it=0\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]))\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if batch_idx >= len(input_ids) - num_it:\n obj_idx = cur_input_ids == 1273\n idx_in_inter=batch_idx-(len(input_ids)-num_it)\n cur_new_input_embeds[-1][obj_idx] = obj_feats[idx_in_inter].to(cur_new_input_embeds[-1].dtype)\n if labels is not None:\n cur_labels[cur_labels==1273]=IGNORE_INDEX\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n def prepare_inputs_labels_for_multimodal_NoInter(\n self, input_ids, attention_mask, past_key_values, labels, images\n ):\n vision_tower = self.get_vision_tower()\n if vision_tower is None or images is None or input_ids.shape[1] == 1:\n if past_key_values is not None and vision_tower is not None and images is not None and input_ids.shape[1] == 1:\n attention_mask = torch.ones((attention_mask.shape[0], past_key_values[-1][-1].shape[-2] + 1), dtype=attention_mask.dtype, device=attention_mask.device)\n return input_ids, attention_mask, past_key_values, None, labels\n\n if type(images) is list or images.ndim == 5:\n concat_images = torch.cat([image for image in images], dim=0)\n image_features = self.encode_images(concat_images)\n split_sizes = [image.shape[0] for image in images]\n image_features = torch.split(image_features, split_sizes, dim=0)\n image_features = [x.flatten(0, 1) for x in image_features]\n else:\n image_features = self.encode_images(images)\n\n new_input_embeds = []\n new_labels = [] if labels is not None else None\n cur_image_idx = 0\n orig_embeds_params = getattr(self, 'orig_embeds_params', None)\n if orig_embeds_params is not None:\n orig_embeds_params_in = orig_embeds_params[0]\n orig_embeds_params_out = orig_embeds_params[1]\n # st_inp=self.tokenizer.encode(grounding_start)[1]\n # st_out=self.tokenizer.encode(grounding_start)[1]\n with torch.no_grad():\n self.get_input_embeddings().weight[:-3] = orig_embeds_params_in[:-3].data\n # if self.tokenizer.decode([len(self.tokenizer)-1])=='<seg>':\n self.get_output_embeddings().weight[:-3] = orig_embeds_params_out[:-3].data\n\n for batch_idx, cur_input_ids in enumerate(input_ids):\n if (cur_input_ids == IMAGE_TOKEN_INDEX).sum() == 0:\n # multimodal LLM, but the current sample is not multimodal\n cur_input_embeds = self.get_model().embed_tokens(cur_input_ids)\n cur_input_embeds = cur_input_embeds + (0. * self.get_model().mm_projector(vision_tower.dummy_feature)).sum()\n new_input_embeds.append(cur_input_embeds)\n if labels is not None:\n new_labels.append(labels[batch_idx])\n cur_image_idx += 1\n continue\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n cur_new_input_embeds = []\n if labels is not None:\n cur_labels = labels[batch_idx]\n cur_new_labels = []\n assert cur_labels.shape == cur_input_ids.shape\n while image_token_indices.numel() > 0:\n cur_image_features = image_features[cur_image_idx]\n image_token_start = image_token_indices[0]\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start-1]))\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start-1:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[image_token_start+1:image_token_start+2]))\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_new_labels.append(cur_labels[image_token_start:image_token_start+1])\n cur_labels = cur_labels[image_token_start+2:]\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids[:image_token_start]))\n cur_new_input_embeds.append(cur_image_features)\n if labels is not None:\n cur_new_labels.append(cur_labels[:image_token_start])\n cur_new_labels.append(torch.full((cur_image_features.shape[0],), IGNORE_INDEX, device=labels.device, dtype=labels.dtype))\n cur_labels = cur_labels[image_token_start+1:]\n cur_image_idx += 1\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_input_ids = cur_input_ids[image_token_start+2:]\n else:\n cur_input_ids = cur_input_ids[image_token_start+1:]\n image_token_indices = torch.where(cur_input_ids == IMAGE_TOKEN_INDEX)[0]\n if cur_input_ids.numel() > 0:\n if getattr(self.config, 'tune_mm_mlp_adapter', False) and getattr(self.config, 'mm_use_im_start_end', False):\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n else:\n cur_new_input_embeds.append(self.get_model().embed_tokens(cur_input_ids))\n if labels is not None:\n cur_new_labels.append(cur_labels)\n cur_new_input_embeds = [x.to(device=self.device) for x in cur_new_input_embeds]\n cur_new_input_embeds = torch.cat(cur_new_input_embeds, dim=0)\n new_input_embeds.append(cur_new_input_embeds)\n if labels is not None:\n cur_new_labels = torch.cat(cur_new_labels, dim=0)\n new_labels.append(cur_new_labels)\n\n if any(x.shape != new_input_embeds[0].shape for x in new_input_embeds):\n max_len = max(x.shape[0] for x in new_input_embeds)\n\n new_input_embeds_align = []\n for cur_new_embed in new_input_embeds:\n cur_new_embed = torch.cat((cur_new_embed, torch.zeros((max_len - cur_new_embed.shape[0], cur_new_embed.shape[1]), dtype=cur_new_embed.dtype, device=cur_new_embed.device)), dim=0)\n new_input_embeds_align.append(cur_new_embed)\n new_input_embeds = torch.stack(new_input_embeds_align, dim=0)\n\n if labels is not None:\n new_labels_align = []\n _new_labels = new_labels\n for cur_new_label in new_labels:\n cur_new_label = torch.cat((cur_new_label, torch.full((max_len - cur_new_label.shape[0],), IGNORE_INDEX, dtype=cur_new_label.dtype, device=cur_new_label.device)), dim=0)\n new_labels_align.append(cur_new_label)\n new_labels = torch.stack(new_labels_align, dim=0)\n\n if attention_mask is not None:\n new_attention_mask = []\n for cur_attention_mask, cur_new_labels, cur_new_labels_align in zip(attention_mask, _new_labels, new_labels):\n new_attn_mask_pad_left = torch.full((cur_new_labels.shape[0] - labels.shape[1],), True, dtype=attention_mask.dtype, device=attention_mask.device)\n new_attn_mask_pad_right = torch.full((cur_new_labels_align.shape[0] - cur_new_labels.shape[0],), False, dtype=attention_mask.dtype, device=attention_mask.device)\n cur_new_attention_mask = torch.cat((new_attn_mask_pad_left, cur_attention_mask, new_attn_mask_pad_right), dim=0)\n new_attention_mask.append(cur_new_attention_mask)\n attention_mask = torch.stack(new_attention_mask, dim=0)\n assert attention_mask.shape == new_labels.shape\n else:\n new_input_embeds = torch.stack(new_input_embeds, dim=0)\n if labels is not None:\n new_labels = torch.stack(new_labels, dim=0)\n\n if attention_mask is not None:\n new_attn_mask_pad_left = torch.full((attention_mask.shape[0], new_input_embeds.shape[1] - input_ids.shape[1]), True, dtype=attention_mask.dtype, device=attention_mask.device)\n attention_mask = torch.cat((new_attn_mask_pad_left, attention_mask), dim=1)\n assert attention_mask.shape == new_input_embeds.shape[:2]\n\n return None, attention_mask, past_key_values, new_input_embeds, new_labels\n\n def initialize_vision_tokenizer(self, model_args, tokenizer):\n if model_args.mm_use_im_patch_token:\n tokenizer.add_tokens([DEFAULT_IMAGE_PATCH_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if model_args.mm_use_im_start_end:\n num_new_tokens = tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN, grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n if model_args.pretrain_mm_mlp_adapter:\n mm_projector_weights = torch.load(model_args.pretrain_mm_mlp_adapter, map_location='cpu')\n embed_tokens_weight = mm_projector_weights['model.embed_tokens.weight']\n assert num_new_tokens == 2\n if input_embeddings.shape == embed_tokens_weight.shape:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight[-num_new_tokens:]\n elif embed_tokens_weight.shape[0] == num_new_tokens:\n input_embeddings[-num_new_tokens:] = embed_tokens_weight\n else:\n raise ValueError(f\"Unexpected embed_tokens_weight shape. Pretrained: {embed_tokens_weight.shape}. Current: {input_embeddings.shape}. Numer of new tokens: {num_new_tokens}.\")\n elif model_args.mm_use_im_patch_token:\n if model_args.tune_mm_mlp_adapter:\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = False\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = False\n else:\n # import pdb; pdb.set_trace()\n num_new_tokens = tokenizer.add_tokens([grounding_start, grounding_end, SEG_TOKEN], special_tokens=True)\n inits=['[',']','.']\n nums=[tokenizer.encode(init)[1] for init in inits]\n # inp_embs = self.get_input_embeddings().weight.data[nums]\n # out_embs = self.get_output_embeddings().weight.data[nums]\n self.resize_token_embeddings(len(tokenizer))\n\n if num_new_tokens > 0:\n input_embeddings = self.get_input_embeddings().weight.data\n output_embeddings = self.get_output_embeddings().weight.data\n #\n input_embeddings_avg = input_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n output_embeddings_avg = output_embeddings[:-num_new_tokens].mean(\n dim=0, keepdim=True)\n #\n input_embeddings[-num_new_tokens:] = input_embeddings_avg\n output_embeddings[-num_new_tokens:] = output_embeddings_avg\n\n if model_args.tune_mm_mlp_adapter:\n self.orig_embeds_params = [self.get_input_embeddings().weight.data.clone().cuda(),\n self.get_output_embeddings().weight.data.clone().cuda()]\n\n for p in self.get_input_embeddings().parameters():\n p.requires_grad = True\n for p in self.get_output_embeddings().parameters():\n p.requires_grad = True\n\n def initialize_seg_modules(self, cfg):\n seg_model = BaseModel(cfg, build_model(cfg))\n seg_model = seg_model.from_pretrained(cfg.MODEL.WEIGHTS)\n self.seg_model = seg_model\n\n def initialize_interactive_modules(self, cfg):\n from .semsam.BaseModel import BaseModel as SemSamBaseModel\n from .semsam import build_model as build_semsam_model\n\n seg_model = SemSamBaseModel(cfg, build_semsam_model(cfg))\n if not (cfg.MODEL.WEIGHTS == \"None\"):\n seg_model = seg_model.from_pretrained(cfg.MODEL.WEIGHTS)\n self.interactive_model = seg_model\n def freeze_seg_modules(self):\n for p in self.seg_model.parameters():\n p.requires_grad = False" } ]
from typing import List, Optional, Tuple, Union from torch.nn import CrossEntropyLoss from transformers import AutoConfig, AutoModelForCausalLM, \ LlamaConfig, LlamaModel, LlamaForCausalLM from transformers.modeling_outputs import CausalLMOutputWithPast from ..llava_arch import LlavaMetaModel, LlavaMetaForCausalLM, LlavaMetaForCausalLM_gd,LlavaMetaForCausalLM_gd_interactive import torch import torch.nn as nn import transformers
15,136
llava_inputs['seg_inputs']=batched_inputs return self.forward_inner(**llava_inputs) def forward_inner( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, seg_inputs: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict _, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=None, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) flickr_len = len(seg_inputs['flickr']) ground_idx = [torch.argwhere(lb == 32002)[:, 0] for lb in labels] ##########flickr # if self.seg_model.model.coco_only: ground_idx_flickr = ground_idx[:flickr_len] padded_ground_idx_flickr = torch.nn.utils.rnn.pad_sequence(ground_idx_flickr, batch_first=True, padding_value=-1) padded_mask_flickr = padded_ground_idx_flickr != -1 padded_ground_idx_flickr[padded_ground_idx_flickr == -1] = 0 # ground_idx=[[-1] if len(idx)==0 else idx for idx in ground_idx] if self.seg_model.model.detach_seg: hidden_states = outputs[0].detach() else: hidden_states = outputs[0] hidden_states_flickr = hidden_states[:flickr_len] ground_hs_flickr = torch.gather(hidden_states_flickr, 1, padded_ground_idx_flickr[..., None].repeat(1, 1, hidden_states_flickr.shape[ -1])) seg_inputs['flickr_text_embeddings'] = (ground_hs_flickr, padded_mask_flickr) ##########coco coco_len = len(seg_inputs['coco']) ground_idx_coco = ground_idx[flickr_len:flickr_len+coco_len] if len(ground_idx_coco) > 0: for i, (idx, data) in enumerate(zip(ground_idx_coco, seg_inputs['coco'])): mask = data['grounding_mask'] ground_idx_coco[i] = idx[mask[:len(idx)]] padded_ground_idx_coco = torch.nn.utils.rnn.pad_sequence(ground_idx_coco, batch_first=True, padding_value=-1) padded_mask_coco = padded_ground_idx_coco != -1 padded_ground_idx_coco[padded_ground_idx_coco == -1] = 0 # hidden_states = outputs[0] hidden_states_coco = hidden_states[flickr_len:flickr_len+coco_len] ground_hs_coco = torch.gather(hidden_states_coco, 1, padded_ground_idx_coco[..., None].repeat(1, 1, hidden_states_coco.shape[ -1])) seg_inputs['coco_text_embeddings'] = (ground_hs_coco, padded_mask_coco) ground_loss = self.seg_model(seg_inputs) hidden_states_ = outputs[0] if self.seg_model.model.coco_only and len(ground_idx_coco) > 0: logits = self.lm_head(hidden_states_[flickr_len:]) else: logits = self.lm_head(hidden_states_) ############################################################ loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() if self.seg_model.model.coco_only and len(ground_idx_coco) > 0: shift_labels = labels[..., 1:][flickr_len:].contiguous() else: shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output ground_loss['llava']=loss ground_loss['loss_total']=sum(ground_loss.values()) ignore_list=[f'_{i}' for i in range(1,10)] ignore_list.append('interm') for key in list(ground_loss.keys()): if not key.endswith('_0') and key!='llava' and key !='loss_total': ground_loss.pop(key) return CausalLMOutputWithPast( loss=ground_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
# Copyright 2023 Haotian Liu # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. IGNORE_INDEX=-100 # @dataclass class DataCollatorForSupervisedDataset(object): """Collate examples for supervised fine-tuning.""" # tokenizer: transformers.PreTrainedTokenizer def __call__(self, instances,tokenizer): input_ids, labels = tuple([instance[key] for instance in instances] for key in ("input_ids", "labels")) input_ids = torch.nn.utils.rnn.pad_sequence( input_ids, batch_first=True, padding_value=tokenizer.pad_token_id) labels = torch.nn.utils.rnn.pad_sequence(labels, batch_first=True, padding_value=IGNORE_INDEX) input_ids = input_ids[:, :tokenizer.model_max_length] labels = labels[:, :tokenizer.model_max_length] batch = dict( input_ids=input_ids, labels=labels, attention_mask=input_ids.ne(tokenizer.pad_token_id), ) if 'image_clip' in instances[0]: images = [instance['image_clip'] for instance in instances] if all(x is not None and x.shape == images[0].shape for x in images): batch['images'] = torch.stack(images) else: batch['images'] = images return batch class LlavaConfig(LlamaConfig): model_type = "llava" class LlavaLlamaModel(LlavaMetaModel, LlamaModel): config_class = LlavaConfig def __init__(self, config: LlamaConfig): super(LlavaLlamaModel, self).__init__(config) class LlavaLlamaForCausalLM(LlamaForCausalLM, LlavaMetaForCausalLM): config_class = LlavaConfig def __init__(self, config): super(LlamaForCausalLM, self).__init__(config) self.model = LlavaLlamaModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_model(self): return self.model def forward( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict input_ids, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=input_ids, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) hidden_states = outputs[0] logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output return CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "images": kwargs.get("images", None), } ) return model_inputs class LlavaLlamaForCausalLM_gd(LlamaForCausalLM, LlavaMetaForCausalLM_gd): config_class = LlavaConfig def __init__(self, config): super(LlamaForCausalLM, self).__init__(config) self.model = LlavaLlamaModel(config) self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False) # Initialize weights and apply final processing self.post_init() def get_model(self): return self.model def forward(self,**batched_inputs): # print(kwargs.keys()) # images_for_llava=torch.stack([inp['image_clip'] for inp in batched_inputs['flickr']]) collator=DataCollatorForSupervisedDataset() if 'refcoco' in batched_inputs: if 'vg' in batched_inputs: llava_inputs = collator(batched_inputs['vg']+batched_inputs['refcoco'], tokenizer=batched_inputs['refcoco'][0]['tokenizer']) else: llava_inputs = collator( batched_inputs['refcoco'], tokenizer=batched_inputs['refcoco'][0]['tokenizer']) elif 'coco' in batched_inputs: llava_inputs=collator(batched_inputs['flickr']+batched_inputs['coco'],tokenizer=batched_inputs['flickr'][0]['tokenizer']) else: llava_inputs=collator(batched_inputs['flickr'],tokenizer=batched_inputs['flickr'][0]['tokenizer']) llava_inputs['seg_inputs']=batched_inputs return self.forward_inner(**llava_inputs) def forward_inner( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, seg_inputs: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict _, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=None, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) ground_idx_coco=[] ground_idx = [torch.argwhere(lb == 32002)[:, 0] for lb in labels] if 'refcoco' in seg_inputs: if 'vg' in seg_inputs: vg_len=len(seg_inputs['vg']) ground_idx_flickr = ground_idx[:vg_len] padded_ground_idx_flickr = torch.nn.utils.rnn.pad_sequence(ground_idx_flickr, batch_first=True, padding_value=-1) padded_mask_flickr = padded_ground_idx_flickr != -1 padded_ground_idx_flickr[padded_ground_idx_flickr == -1] = 0 # ground_idx=[[-1] if len(idx)==0 else idx for idx in ground_idx] hidden_states = outputs[0] hidden_states_flickr = hidden_states[:vg_len] ground_hs_flickr = torch.gather(hidden_states_flickr, 1, padded_ground_idx_flickr[..., None].repeat(1, 1, hidden_states_flickr.shape[ -1])) seg_inputs['vg_text_embeddings'] = (ground_hs_flickr, padded_mask_flickr) flickr_len = len(seg_inputs['refcoco']) ##########flickr # if self.seg_model.model.coco_only: ground_idx_flickr = ground_idx[vg_len:vg_len+flickr_len] if 'vg' in seg_inputs else ground_idx[:flickr_len] padded_ground_idx_flickr = torch.nn.utils.rnn.pad_sequence(ground_idx_flickr, batch_first=True, padding_value=-1) padded_mask_flickr = padded_ground_idx_flickr != -1 padded_ground_idx_flickr[padded_ground_idx_flickr == -1] = 0 # ground_idx=[[-1] if len(idx)==0 else idx for idx in ground_idx] hidden_states = outputs[0] hidden_states_flickr = hidden_states[vg_len:vg_len+flickr_len] if 'vg' in seg_inputs else hidden_states[:flickr_len] ground_hs_flickr = torch.gather(hidden_states_flickr, 1, padded_ground_idx_flickr[..., None].repeat(1, 1, hidden_states_flickr.shape[ -1])) seg_inputs['refcoco_text_embeddings'] = (ground_hs_flickr, padded_mask_flickr) # seg_inputs['flickr']=seg_inputs['refcoco'] else: flickr_len=len(seg_inputs['flickr']) ground_idx = [torch.argwhere(lb == 32002)[:, 0] for lb in labels] zero_mask = [0 if len(idx) == 0 else 1 for idx in ground_idx] ##########flickr # if self.seg_model.model.coco_only: ground_idx_flickr=ground_idx[:flickr_len] padded_ground_idx_flickr = torch.nn.utils.rnn.pad_sequence(ground_idx_flickr, batch_first=True, padding_value=-1) padded_mask_flickr=padded_ground_idx_flickr!=-1 padded_ground_idx_flickr[padded_ground_idx_flickr==-1]=0 # ground_idx=[[-1] if len(idx)==0 else idx for idx in ground_idx] hidden_states = outputs[0] hidden_states_flickr=hidden_states[:flickr_len] ground_hs_flickr=torch.gather(hidden_states_flickr,1,padded_ground_idx_flickr[...,None].repeat(1,1,hidden_states_flickr.shape[-1])) seg_inputs['flickr_text_embeddings']=(ground_hs_flickr,padded_mask_flickr) ##########coco ground_idx_coco = ground_idx[flickr_len:] if len(ground_idx_coco)>0: for i,(idx,data) in enumerate(zip(ground_idx_coco,seg_inputs['coco'])): mask=data['grounding_mask'] ground_idx_coco[i]=idx[mask[:len(idx)]] padded_ground_idx_coco = torch.nn.utils.rnn.pad_sequence(ground_idx_coco, batch_first=True, padding_value=-1) padded_mask_coco = padded_ground_idx_coco != -1 padded_ground_idx_coco[padded_ground_idx_coco == -1] = 0 hidden_states = outputs[0] hidden_states_coco = hidden_states[flickr_len:] ground_hs_coco = torch.gather(hidden_states_coco, 1, padded_ground_idx_coco[..., None].repeat(1, 1, hidden_states_coco.shape[ -1])) seg_inputs['coco_text_embeddings'] = (ground_hs_coco, padded_mask_coco) ground_loss=self.seg_model(seg_inputs) if self.seg_model.model.coco_only and len(ground_idx_coco)>0: logits = self.lm_head(hidden_states_coco) else: logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() if self.seg_model.model.coco_only and len(ground_idx_coco) > 0: shift_labels = labels[..., 1:][flickr_len:].contiguous() else: shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output ground_loss['llava']=loss ground_loss['loss_total']=sum(ground_loss.values()) return CausalLMOutputWithPast( loss=ground_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) def prepare_inputs_for_generation( self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs ): if past_key_values: input_ids = input_ids[:, -1:] # if `inputs_embeds` are passed, we only want to use them in the 1st generation step if inputs_embeds is not None and past_key_values is None: model_inputs = {"inputs_embeds": inputs_embeds} else: model_inputs = {"input_ids": input_ids} model_inputs.update( { "past_key_values": past_key_values, "use_cache": kwargs.get("use_cache"), "attention_mask": attention_mask, "images": kwargs.get("images", None), } ) return model_inputs def forward_eval(self, inputs): collator=DataCollatorForSupervisedDataset() llava_inputs=collator(inputs,tokenizer=inputs[0]['tokenizer']) llava_inputs['seg_inputs']=inputs return self.forward_inner_eval(**llava_inputs) def forward_inner_eval( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, seg_inputs: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict _, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) output_ids, seg_hidden_states = self.auto_regressive_generate(attention_mask, past_key_values, inputs_embeds, output_attentions, seg_inputs[0]["tokenizer"], return_dict) output_text = seg_inputs[0]["tokenizer"].batch_decode([output_ids], skip_special_tokens=True)[0] if len(seg_hidden_states)==0: return output_text, [], [] seg_tokens = torch.cat(seg_hidden_states, dim=1) padded_mask = seg_tokens.new_ones(seg_tokens.shape[:2]) > 0 predicted_boxes, predicted_masks=self.seg_model.model.forward_eval(seg_inputs, (seg_tokens,padded_mask)) return output_text, predicted_boxes, predicted_masks def auto_regressive_generate(self, attention_mask, past_key_values, inputs_embeds, output_attentions, tokenizer, return_dict, temporature=0.0 ): ######## # llm_inputs['obj_num'] = False seg_token = tokenizer.encode("<seg>")[1] seg_token_list = [] output_ids = [] output_logits = [] length = inputs_embeds.shape[1] for i in range(1000): # import pdb;pdb.set_trace() if i == 0: results = self.model( input_ids=None, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=True, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict ) else: attention_mask = cur_hidden.new_ones( 1, past_key_values[0][0].shape[-2] + 1, device="cuda") # print("Attention mask shape: ", attention_mask.shape) results = self.model( input_ids=torch.as_tensor([[cur_id]], device=inputs_embeds.device), attention_mask=attention_mask, past_key_values=past_key_values, # inputs_embeds=cur_hidden, use_cache=True, output_attentions=output_attentions, output_hidden_states=True, return_dict=return_dict ) cur_hidden = results.hidden_states[-1][:, -1:] # last layer last token logits = self.lm_head(results[0]) cur_logits = logits[0][-1] cur_id = int(torch.argmax(cur_logits)) if temporature < 1e-4: cur_id = int(torch.argmax(cur_logits)) else: probs = torch.softmax(cur_logits / temporature, dim=-1) cur_id = int(torch.multinomial(probs, num_samples=1)) past_key_values = results.past_key_values length += 1 if cur_id==seg_token: seg_token_list.append(cur_hidden) output_ids.append(cur_id) output_logits.append(cur_logits) if tokenizer.decode(output_ids).find("</s>")!=-1: break return output_ids,seg_token_list class LlavaLlamaForCausalLM_joint(LlavaLlamaForCausalLM_gd): def forward(self,**batched_inputs): # print(kwargs.keys()) # images_for_llava=torch.stack([inp['image_clip'] for inp in batched_inputs['flickr']]) collator=DataCollatorForSupervisedDataset() assert 'refcoco' in batched_inputs and 'flickr' in batched_inputs and 'llava' in batched_inputs for data in batched_inputs['llava']: data['image_clip']=data['image'] llava_inputs = collator( batched_inputs['flickr']+batched_inputs['refcoco']+batched_inputs['llava'], tokenizer=batched_inputs['refcoco'][0]['tokenizer']) # if 'refcoco' in batched_inputs: # llava_inputs = collator( batched_inputs['refcoco'], # tokenizer=batched_inputs['refcoco'][0]['tokenizer']) # elif 'coco' in batched_inputs: # llava_inputs=collator(batched_inputs['flickr']+batched_inputs['coco'],tokenizer=batched_inputs['flickr'][0]['tokenizer']) # else: # llava_inputs=collator(batched_inputs['flickr'],tokenizer=batched_inputs['flickr'][0]['tokenizer']) llava_inputs['seg_inputs']=batched_inputs return self.forward_inner(**llava_inputs) def forward_inner( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, seg_inputs: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict _, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=None, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) ground_idx_coco=[] # if 'refcoco' in seg_inputs: flickr_len = len(seg_inputs['flickr']) ground_idx = [torch.argwhere(lb == 32002)[:, 0] for lb in labels] ##########flickr # if self.seg_model.model.coco_only: ground_idx_flickr = ground_idx[:flickr_len] padded_ground_idx_flickr = torch.nn.utils.rnn.pad_sequence(ground_idx_flickr, batch_first=True, padding_value=-1) padded_mask_flickr = padded_ground_idx_flickr != -1 padded_ground_idx_flickr[padded_ground_idx_flickr == -1] = 0 # ground_idx=[[-1] if len(idx)==0 else idx for idx in ground_idx] hidden_states = outputs[0] hidden_states_flickr = hidden_states[:flickr_len] ground_hs_flickr = torch.gather(hidden_states_flickr, 1, padded_ground_idx_flickr[..., None].repeat(1, 1, hidden_states_flickr.shape[ -1])) seg_inputs['flickr_text_embeddings'] = (ground_hs_flickr, padded_mask_flickr) # seg_inputs['flickr']=seg_inputs['refcoco'] # else: ################################################# ################################################# refcoco_len=len(seg_inputs['refcoco']) ground_idx = [torch.argwhere(lb == 32002)[:, 0] for lb in labels] ##########flickr ground_idx_refcoco=ground_idx[flickr_len:flickr_len+refcoco_len] padded_ground_idx_refcoco = torch.nn.utils.rnn.pad_sequence(ground_idx_refcoco, batch_first=True, padding_value=-1) padded_mask_refcoco=padded_ground_idx_refcoco!=-1 padded_ground_idx_refcoco[padded_ground_idx_refcoco==-1]=0 # ground_idx=[[-1] if len(idx)==0 else idx for idx in ground_idx] # hidden_states = outputs[0] hidden_states_refcoco=hidden_states[flickr_len:flickr_len+refcoco_len] ground_hs_refcoco=torch.gather(hidden_states_refcoco,1,padded_ground_idx_refcoco[...,None].repeat(1,1,hidden_states_refcoco.shape[-1])) seg_inputs['refcoco_text_embeddings']=(ground_hs_refcoco,padded_mask_refcoco) ground_loss=self.seg_model(seg_inputs) # if self.seg_model.model.coco_only and len(ground_idx_coco)>0: # logits = self.lm_head(hidden_states_coco) # else: logits = self.lm_head(hidden_states) loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() if self.seg_model.model.coco_only and len(ground_idx_coco) > 0: shift_labels = labels[..., 1:][flickr_len:].contiguous() else: shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output ground_loss['llava']=loss ground_loss['loss_total']=sum(ground_loss.values()) return CausalLMOutputWithPast( loss=ground_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class LlavaLlamaForCausalLM_joint_2st(LlavaLlamaForCausalLM_gd): def forward(self,**batched_inputs): # print(kwargs.keys()) # images_for_llava=torch.stack([inp['image_clip'] for inp in batched_inputs['flickr']]) collator=DataCollatorForSupervisedDataset() assert 'coco' in batched_inputs and 'flickr' in batched_inputs and 'llava' in batched_inputs for data in batched_inputs['llava']: data['image_clip']=data['image'] llava_inputs = collator( batched_inputs['flickr']+batched_inputs['coco']+batched_inputs['llava'], tokenizer=batched_inputs['coco'][0]['tokenizer']) # if 'refcoco' in batched_inputs: # llava_inputs = collator( batched_inputs['refcoco'], # tokenizer=batched_inputs['refcoco'][0]['tokenizer']) # elif 'coco' in batched_inputs: # llava_inputs=collator(batched_inputs['flickr']+batched_inputs['coco'],tokenizer=batched_inputs['flickr'][0]['tokenizer']) # else: # llava_inputs=collator(batched_inputs['flickr'],tokenizer=batched_inputs['flickr'][0]['tokenizer']) llava_inputs['seg_inputs']=batched_inputs return self.forward_inner(**llava_inputs) def forward_inner( self, input_ids: torch.LongTensor = None, attention_mask: Optional[torch.Tensor] = None, past_key_values: Optional[List[torch.FloatTensor]] = None, inputs_embeds: Optional[torch.FloatTensor] = None, labels: Optional[torch.LongTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, images: Optional[torch.FloatTensor] = None, seg_inputs: Optional[torch.FloatTensor] = None, return_dict: Optional[bool] = None, ) -> Union[Tuple, CausalLMOutputWithPast]: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict _, attention_mask, past_key_values, inputs_embeds, labels = self.prepare_inputs_labels_for_multimodal(input_ids, attention_mask, past_key_values, labels, images) # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn) outputs = self.model( input_ids=None, attention_mask=attention_mask, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict ) flickr_len = len(seg_inputs['flickr']) ground_idx = [torch.argwhere(lb == 32002)[:, 0] for lb in labels] ##########flickr # if self.seg_model.model.coco_only: ground_idx_flickr = ground_idx[:flickr_len] padded_ground_idx_flickr = torch.nn.utils.rnn.pad_sequence(ground_idx_flickr, batch_first=True, padding_value=-1) padded_mask_flickr = padded_ground_idx_flickr != -1 padded_ground_idx_flickr[padded_ground_idx_flickr == -1] = 0 # ground_idx=[[-1] if len(idx)==0 else idx for idx in ground_idx] if self.seg_model.model.detach_seg: hidden_states = outputs[0].detach() else: hidden_states = outputs[0] hidden_states_flickr = hidden_states[:flickr_len] ground_hs_flickr = torch.gather(hidden_states_flickr, 1, padded_ground_idx_flickr[..., None].repeat(1, 1, hidden_states_flickr.shape[ -1])) seg_inputs['flickr_text_embeddings'] = (ground_hs_flickr, padded_mask_flickr) ##########coco coco_len = len(seg_inputs['coco']) ground_idx_coco = ground_idx[flickr_len:flickr_len+coco_len] if len(ground_idx_coco) > 0: for i, (idx, data) in enumerate(zip(ground_idx_coco, seg_inputs['coco'])): mask = data['grounding_mask'] ground_idx_coco[i] = idx[mask[:len(idx)]] padded_ground_idx_coco = torch.nn.utils.rnn.pad_sequence(ground_idx_coco, batch_first=True, padding_value=-1) padded_mask_coco = padded_ground_idx_coco != -1 padded_ground_idx_coco[padded_ground_idx_coco == -1] = 0 # hidden_states = outputs[0] hidden_states_coco = hidden_states[flickr_len:flickr_len+coco_len] ground_hs_coco = torch.gather(hidden_states_coco, 1, padded_ground_idx_coco[..., None].repeat(1, 1, hidden_states_coco.shape[ -1])) seg_inputs['coco_text_embeddings'] = (ground_hs_coco, padded_mask_coco) ground_loss = self.seg_model(seg_inputs) hidden_states_ = outputs[0] if self.seg_model.model.coco_only and len(ground_idx_coco) > 0: logits = self.lm_head(hidden_states_[flickr_len:]) else: logits = self.lm_head(hidden_states_) ############################################################ loss = None if labels is not None: # Shift so that tokens < n predict n shift_logits = logits[..., :-1, :].contiguous() if self.seg_model.model.coco_only and len(ground_idx_coco) > 0: shift_labels = labels[..., 1:][flickr_len:].contiguous() else: shift_labels = labels[..., 1:].contiguous() # Flatten the tokens loss_fct = CrossEntropyLoss() shift_logits = shift_logits.view(-1, self.config.vocab_size) shift_labels = shift_labels.view(-1) # Enable model/pipeline parallelism shift_labels = shift_labels.to(shift_logits.device) loss = loss_fct(shift_logits, shift_labels) if not return_dict: output = (logits,) + outputs[1:] return (loss,) + output if loss is not None else output ground_loss['llava']=loss ground_loss['loss_total']=sum(ground_loss.values()) ignore_list=[f'_{i}' for i in range(1,10)] ignore_list.append('interm') for key in list(ground_loss.keys()): if not key.endswith('_0') and key!='llava' and key !='loss_total': ground_loss.pop(key) return CausalLMOutputWithPast( loss=ground_loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, )
class LlavaLlamaForCausalLM_joint_2st_it_only_ref_instr(LlamaForCausalLM, LlavaMetaForCausalLM_gd_interactive):
3
2023-12-04 10:59:21+00:00
24k
Vchitect/VBench
vbench/third_party/umt/datasets/build.py
[ { "identifier": "TubeMaskingGenerator", "path": "vbench/third_party/umt/datasets/masking_generator.py", "snippet": "class TubeMaskingGenerator:\n def __init__(self, input_size, mask_ratio):\n self.frames, self.height, self.width = input_size\n self.num_patches_per_frame = self.height * self.width\n self.total_patches = self.frames * self.num_patches_per_frame \n self.num_masks_per_frame = int(mask_ratio * self.num_patches_per_frame)\n self.total_masks = self.frames * self.num_masks_per_frame\n\n def __repr__(self):\n repr_str = \"Maks: total patches {}, mask patches {}\".format(\n self.total_patches, self.total_masks\n )\n return repr_str\n\n def __call__(self):\n mask_per_frame = np.hstack([\n np.zeros(self.num_patches_per_frame - self.num_masks_per_frame),\n np.ones(self.num_masks_per_frame),\n ])\n np.random.shuffle(mask_per_frame)\n mask = np.tile(mask_per_frame, (self.frames, 1)).flatten()\n return mask " }, { "identifier": "RandomMaskingGenerator", "path": "vbench/third_party/umt/datasets/masking_generator.py", "snippet": "class RandomMaskingGenerator:\n def __init__(self, input_size, mask_ratio):\n if not isinstance(input_size, tuple):\n input_size = (input_size, ) * 3\n\n self.frames, self.height, self.width = input_size\n\n self.num_patches = self.frames * self.height * self.width # 8x14x14\n self.num_mask = int(mask_ratio * self.num_patches)\n\n def __repr__(self):\n repr_str = \"Maks: total patches {}, mask patches {}\".format(\n self.num_patches, self.num_mask)\n return repr_str\n\n def __call__(self):\n mask = np.hstack([\n np.zeros(self.num_patches - self.num_mask),\n np.ones(self.num_mask),\n ])\n np.random.shuffle(mask)\n return mask # [196*8]" }, { "identifier": "VideoMAE", "path": "vbench/third_party/umt/datasets/mae.py", "snippet": "class VideoMAE(torch.utils.data.Dataset):\n \"\"\"Load your own video classification dataset.\n Parameters\n ----------\n root : str, required.\n Path to the root folder storing the dataset.\n setting : str, required.\n A text file describing the dataset, each line per video sample.\n There are three items in each line: (1) video path; (2) video length and (3) video label.\n prefix : str, required.\n The prefix for loading data.\n split : str, required.\n The split character for metadata.\n train : bool, default True.\n Whether to load the training or validation set.\n test_mode : bool, default False.\n Whether to perform evaluation on the test set.\n Usually there is three-crop or ten-crop evaluation strategy involved.\n name_pattern : str, default None.\n The naming pattern of the decoded video frames.\n For example, img_00012.jpg.\n video_ext : str, default 'mp4'.\n If video_loader is set to True, please specify the video format accordinly.\n is_color : bool, default True.\n Whether the loaded image is color or grayscale.\n modality : str, default 'rgb'.\n Input modalities, we support only rgb video frames for now.\n Will add support for rgb difference image and optical flow image later.\n num_segments : int, default 1.\n Number of segments to evenly divide the video into clips.\n A useful technique to obtain global video-level information.\n Limin Wang, etal, Temporal Segment Networks: Towards Good Practices for Deep Action Recognition, ECCV 2016.\n num_crop : int, default 1.\n Number of crops for each image. default is 1.\n Common choices are three crops and ten crops during evaluation.\n new_length : int, default 1.\n The length of input video clip. Default is a single image, but it can be multiple video frames.\n For example, new_length=16 means we will extract a video clip of consecutive 16 frames.\n new_step : int, default 1.\n Temporal sampling rate. For example, new_step=1 means we will extract a video clip of consecutive frames.\n new_step=2 means we will extract a video clip of every other frame.\n temporal_jitter : bool, default False.\n Whether to temporally jitter if new_step > 1.\n video_loader : bool, default False.\n Whether to use video loader to load data.\n use_decord : bool, default True.\n Whether to use Decord video loader to load data. Otherwise load image.\n transform : function, default None.\n A function that takes data and label and transforms them.\n data_aug : str, default 'v1'.\n Different types of data augmentation auto. Supports v1, v2, v3 and v4.\n lazy_init : bool, default False.\n If set to True, build a dataset instance without loading any dataset.\n \"\"\"\n def __init__(self,\n root,\n setting,\n prefix='',\n split=' ',\n train=True,\n test_mode=False,\n name_pattern='img_%05d.jpg',\n video_ext='mp4',\n is_color=True,\n modality='rgb',\n num_segments=1,\n num_crop=1,\n new_length=1,\n new_step=1,\n transform=None,\n temporal_jitter=False,\n video_loader=False,\n use_decord=True,\n lazy_init=False,\n num_sample=1,\n ):\n\n super(VideoMAE, self).__init__()\n self.root = root\n self.setting = setting\n self.prefix = prefix\n self.split = split\n self.train = train\n self.test_mode = test_mode\n self.is_color = is_color\n self.modality = modality\n self.num_segments = num_segments\n self.num_crop = num_crop\n self.new_length = new_length\n self.new_step = new_step\n self.skip_length = self.new_length * self.new_step\n self.temporal_jitter = temporal_jitter\n self.name_pattern = name_pattern\n self.video_loader = video_loader\n self.video_ext = video_ext\n self.use_decord = use_decord\n self.transform = transform\n self.lazy_init = lazy_init\n self.num_sample = num_sample\n\n # sparse sampling, num_segments != 1\n if self.num_segments != 1:\n print('Use sparse sampling, change frame and stride')\n self.new_length = self.num_segments\n self.skip_length = 1\n\n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if not self.lazy_init:\n self.clips = self._make_dataset(root, setting)\n if len(self.clips) == 0:\n raise(RuntimeError(\"Found 0 video clips in subfolders of: \" + root + \"\\n\"\n \"Check your data directory (opt.data-dir).\"))\n\n def __getitem__(self, index):\n while True:\n try:\n images = None\n if self.use_decord:\n directory, target = self.clips[index]\n if self.video_loader:\n if '.' in directory.split('/')[-1]:\n # data in the \"setting\" file already have extension, e.g., demo.mp4\n video_name = directory\n else:\n # data in the \"setting\" file do not have extension, e.g., demo\n # So we need to provide extension (i.e., .mp4) to complete the file name.\n video_name = '{}.{}'.format(directory, self.video_ext)\n\n video_name = os.path.join(self.prefix, video_name)\n if video_name.startswith('s3'):\n video_bytes = self.client.get(video_name)\n decord_vr = VideoReader(io.BytesIO(video_bytes),\n num_threads=1,\n ctx=cpu(0))\n else:\n decord_vr = decord.VideoReader(video_name, num_threads=1, ctx=cpu(0))\n duration = len(decord_vr)\n \n segment_indices, skip_offsets = self._sample_train_indices(duration)\n images = self._video_TSN_decord_batch_loader(directory, decord_vr, duration, segment_indices, skip_offsets)\n \n else:\n video_name, total_frame, target = self.clips[index]\n video_name = os.path.join(self.prefix, video_name)\n\n segment_indices, skip_offsets = self._sample_train_indices(total_frame)\n frame_id_list = self._get_frame_id_list(total_frame, segment_indices, skip_offsets)\n images = []\n for idx in frame_id_list:\n frame_fname = os.path.join(video_name, self.name_pattern.format(idx))\n img_bytes = self.client.get(frame_fname)\n img_np = np.frombuffer(img_bytes, np.uint8)\n img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)\n cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)\n images.append(Image.fromarray(img)) \n if images is not None:\n break\n except Exception as e:\n print(\"Failed to load video from {} with error {}\".format(\n video_name, e))\n index = random.randint(0, len(self.clips) - 1)\n \n if self.num_sample > 1:\n process_data_list = []\n mask_list = []\n for _ in range(self.num_sample):\n process_data, mask = self.transform((images, None))\n process_data = process_data.view((self.new_length, 3) + process_data.size()[-2:]).transpose(0, 1)\n process_data_list.append(process_data)\n mask_list.append(mask)\n return process_data_list, mask_list\n else:\n process_data, mask = self.transform((images, None)) # T*C,H,W\n process_data = process_data.view((self.new_length, 3) + process_data.size()[-2:]).transpose(0, 1) # T*C,H,W -> T,C,H,W -> C,T,H,W\n return (process_data, mask)\n\n def __len__(self):\n return len(self.clips)\n\n def _make_dataset(self, directory, setting):\n if not os.path.exists(setting):\n raise(RuntimeError(\"Setting file %s doesn't exist. Check opt.train-list and opt.val-list. \" % (setting)))\n clips = []\n\n print(f'Load dataset using decord: {self.use_decord}')\n with open(setting) as split_f:\n data = split_f.readlines()\n for line in data:\n line_info = line.split(self.split)\n if len(line_info) < 2:\n raise(RuntimeError('Video input format is not correct, missing one or more element. %s' % line))\n if self.use_decord:\n # line format: video_path, video_label\n clip_path = os.path.join(line_info[0])\n target = int(line_info[1])\n item = (clip_path, target)\n else:\n # line format: video_path, video_duration, video_label\n clip_path = os.path.join(line_info[0])\n total_frame = int(line_info[1])\n target = int(line_info[2])\n item = (clip_path, total_frame, target)\n clips.append(item)\n return clips\n\n def _sample_train_indices(self, num_frames):\n average_duration = (num_frames - self.skip_length + 1) // self.num_segments\n if average_duration > 0:\n offsets = np.multiply(list(range(self.num_segments)),\n average_duration)\n offsets = offsets + np.random.randint(average_duration,\n size=self.num_segments)\n elif num_frames > max(self.num_segments, self.skip_length):\n offsets = np.sort(np.random.randint(\n num_frames - self.skip_length + 1,\n size=self.num_segments))\n else:\n offsets = np.zeros((self.num_segments,))\n\n if self.temporal_jitter:\n skip_offsets = np.random.randint(\n self.new_step, size=self.skip_length // self.new_step)\n else:\n skip_offsets = np.zeros(\n self.skip_length // self.new_step, dtype=int)\n return offsets + 1, skip_offsets\n\n def _get_frame_id_list(self, duration, indices, skip_offsets):\n frame_id_list = []\n for seg_ind in indices:\n offset = int(seg_ind)\n for i, _ in enumerate(range(0, self.skip_length, self.new_step)):\n if offset + skip_offsets[i] <= duration:\n frame_id = offset + skip_offsets[i] - 1\n else:\n frame_id = offset - 1\n frame_id_list.append(frame_id)\n if offset + self.new_step < duration:\n offset += self.new_step\n return frame_id_list\n\n def _video_TSN_decord_batch_loader(self, directory, video_reader, duration, indices, skip_offsets):\n sampled_list = []\n frame_id_list = []\n for seg_ind in indices:\n offset = int(seg_ind)\n for i, _ in enumerate(range(0, self.skip_length, self.new_step)):\n if offset + skip_offsets[i] <= duration:\n frame_id = offset + skip_offsets[i] - 1\n else:\n frame_id = offset - 1\n frame_id_list.append(frame_id)\n if offset + self.new_step < duration:\n offset += self.new_step\n try:\n video_data = video_reader.get_batch(frame_id_list).asnumpy()\n sampled_list = [Image.fromarray(video_data[vid, :, :, :]).convert('RGB') for vid, _ in enumerate(frame_id_list)]\n except:\n raise RuntimeError('Error occured in reading frames {} from video {} of duration {}.'.format(frame_id_list, directory, duration))\n return sampled_list" }, { "identifier": "VideoClsDataset", "path": "vbench/third_party/umt/datasets/kinetics.py", "snippet": "class VideoClsDataset(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self, anno_path, prefix='', split=' ', mode='train', clip_len=8,\n frame_sample_rate=2, crop_size=224, short_side_size=256,\n new_height=256, new_width=340, keep_aspect_ratio=True,\n num_segment=1, num_crop=1, test_num_segment=10, test_num_crop=3,\n args=None):\n self.anno_path = anno_path\n self.prefix = prefix\n self.split = split\n self.mode = mode\n self.clip_len = clip_len\n self.frame_sample_rate = frame_sample_rate\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n assert num_segment == 1\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=self.split)\n self.dataset_samples = list(cleaned.values[:, 0])\n self.label_array = list(cleaned.values[:, 1])\n\n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = Compose([\n Resize(self.short_side_size, interpolation='bilinear'),\n CenterCrop(size=(self.crop_size, self.crop_size)),\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = Compose([\n Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = Compose([\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args \n scale_t = 1\n\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=chunk_nb)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=chunk_nb)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n if self.test_num_crop == 1:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) / 2\n spatial_start = int(spatial_step)\n else:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[:, spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[:, :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True ,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n\n def loadvideo_decord(self, sample, sample_rate_scale=1, chunk_nb=0):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n fname = os.path.join(self.prefix, fname)\n\n try:\n if self.keep_aspect_ratio:\n if fname.startswith('s3'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n if fname.startswith('s3:'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n width=self.new_width,\n height=self.new_height,\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n\n # handle temporal segments\n converted_len = int(self.clip_len * self.frame_sample_rate)\n seg_len = len(vr) // self.num_segment\n\n if self.mode == 'test':\n temporal_step = max(1.0 * (len(vr) - converted_len) / (self.test_num_segment - 1), 0)\n temporal_start = int(chunk_nb * temporal_step)\n\n bound = min(temporal_start + converted_len, len(vr))\n all_index = [x for x in range(temporal_start, bound, self.frame_sample_rate)]\n while len(all_index) < self.clip_len:\n all_index.append(all_index[-1])\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n all_index = []\n for i in range(self.num_segment):\n if seg_len <= converted_len:\n index = np.linspace(0, seg_len, num=seg_len // self.frame_sample_rate)\n index = np.concatenate((index, np.ones(self.clip_len - seg_len // self.frame_sample_rate) * seg_len))\n index = np.clip(index, 0, seg_len - 1).astype(np.int64)\n else:\n if self.mode == 'validation':\n end_idx = (seg_len - converted_len) // 2\n else:\n end_idx = np.random.randint(converted_len, seg_len)\n str_idx = end_idx - converted_len\n index = np.linspace(str_idx, end_idx, num=self.clip_len)\n index = np.clip(index, str_idx, end_idx - 1).astype(np.int64)\n index = index + i*seg_len\n all_index.extend(list(index))\n\n all_index = all_index[::int(sample_rate_scale)]\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "VideoClsDataset_sparse", "path": "vbench/third_party/umt/datasets/kinetics_sparse.py", "snippet": "class VideoClsDataset_sparse(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self, anno_path, prefix='', split=' ', mode='train', clip_len=8,\n frame_sample_rate=2, crop_size=224, short_side_size=256,\n new_height=256, new_width=340, keep_aspect_ratio=True,\n num_segment=1, num_crop=1, test_num_segment=10, test_num_crop=3,\n args=None):\n self.anno_path = anno_path\n self.prefix = prefix\n self.split = split\n self.mode = mode\n self.clip_len = clip_len\n self.frame_sample_rate = frame_sample_rate\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n assert num_segment == 1\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=self.split)\n self.dataset_samples = list(cleaned.values[:, 0])\n self.label_array = list(cleaned.values[:, 1])\n\n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = Compose([\n Resize(self.short_side_size, interpolation='bilinear'),\n CenterCrop(size=(self.crop_size, self.crop_size)),\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = Compose([\n Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = Compose([\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args \n\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=-1) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=-1)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=0)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=0)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=chunk_nb)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample, chunk_nb=chunk_nb)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n if self.test_num_crop == 1:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) / 2\n spatial_start = int(spatial_step)\n else:\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[:, spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[:, :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True ,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n def _get_seq_frames(self, video_size, num_frames, clip_idx=-1):\n seg_size = max(0., float(video_size - 1) / num_frames)\n max_frame = int(video_size) - 1\n seq = []\n # index from 1, must add 1\n if clip_idx == -1:\n for i in range(num_frames):\n start = int(np.round(seg_size * i))\n end = int(np.round(seg_size * (i + 1)))\n idx = min(random.randint(start, end), max_frame)\n seq.append(idx)\n else:\n num_segment = 1\n if self.mode == 'test':\n num_segment = self.test_num_segment\n duration = seg_size / (num_segment + 1)\n for i in range(num_frames):\n start = int(np.round(seg_size * i))\n frame_index = start + int(duration * (clip_idx + 1))\n idx = min(frame_index, max_frame)\n seq.append(idx)\n return seq\n\n def loadvideo_decord(self, sample, chunk_nb=0):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n fname = os.path.join(self.prefix, fname)\n\n try:\n if self.keep_aspect_ratio:\n if fname.startswith('s3'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n if fname.startswith('s3:'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n width=self.new_width,\n height=self.new_height,\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n\n all_index = self._get_seq_frames(len(vr), self.clip_len, clip_idx=chunk_nb)\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "SSVideoClsDataset", "path": "vbench/third_party/umt/datasets/ssv2.py", "snippet": "class SSVideoClsDataset(Dataset):\n \"\"\"Load your own video classification dataset.\"\"\"\n\n def __init__(self, anno_path, prefix='', split=' ', mode='train', clip_len=8,\n crop_size=224, short_side_size=256, new_height=256,\n new_width=340, keep_aspect_ratio=True, num_segment=1,\n num_crop=1, test_num_segment=10, test_num_crop=3, args=None):\n self.anno_path = anno_path\n self.prefix = prefix\n self.split = split\n self.mode = mode\n self.clip_len = clip_len\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.args = args\n self.aug = False\n self.rand_erase = False\n \n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=self.split)\n self.dataset_samples = list(cleaned.values[:, 0])\n self.label_array = list(cleaned.values[:, 1])\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = Compose([\n Resize(self.short_side_size, interpolation='bilinear'),\n CenterCrop(size=(self.crop_size, self.crop_size)),\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = Compose([\n Resize(size=(short_side_size), interpolation='bilinear')\n ])\n self.data_transform = Compose([\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n sample_label = self.label_array[idx]\n self.test_label_array.append(sample_label)\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_seg.append((ck, cp))\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args \n scale_t = 1\n\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during training\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample, sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n \n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\"video {} not correctly loaded during validation\".format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.loadvideo_decord(sample)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.loadvideo_decord(sample)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n temporal_start = chunk_nb # 0/1\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start::2, \\\n spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start::2, \\\n :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [\n transforms.ToPILImage()(frame) for frame in buffer\n ]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C \n \n # T H W C \n buffer = tensor_normalize(\n buffer, [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n )\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False\n )\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n\n def loadvideo_decord(self, sample, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n fname = os.path.join(self.prefix, fname)\n\n try:\n if self.keep_aspect_ratio:\n if fname.startswith('s3'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, num_threads=1, ctx=cpu(0))\n else:\n if fname.startswith('s3:'):\n video_bytes = self.client.get(fname)\n vr = VideoReader(io.BytesIO(video_bytes),\n width=self.new_width,\n height=self.new_height,\n num_threads=1,\n ctx=cpu(0))\n else:\n vr = VideoReader(fname, width=self.new_width, height=self.new_height,\n num_threads=1, ctx=cpu(0))\n except:\n print(\"video cannot be loaded by decord: \", fname)\n return []\n\n if self.mode == 'test':\n tick = len(vr) / float(self.num_segment)\n all_index = list(np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)] +\n [int(tick * x) for x in range(self.num_segment)]))\n while len(all_index) < (self.num_segment * self.test_num_segment):\n all_index.append(all_index[-1])\n all_index = np.sort(np.array(all_index))\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n elif self.mode == 'validation':\n tick = len(vr) / float(self.num_segment)\n all_index = np.array([int(tick / 2.0 + tick * x) for x in range(self.num_segment)])\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n # handle temporal segments\n average_duration = len(vr) // self.num_segment\n if average_duration > 0:\n all_index = list(np.multiply(list(range(self.num_segment)), average_duration) + np.random.randint(average_duration,\n size=self.num_segment))\n elif len(vr) > self.num_segment:\n all_index = list(np.sort(np.random.randint(len(vr), size=self.num_segment)))\n else:\n all_index = list(np.zeros((self.num_segment,)))\n vr.seek(0)\n buffer = vr.get_batch(all_index).asnumpy()\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" }, { "identifier": "SSRawFrameClsDataset", "path": "vbench/third_party/umt/datasets/ssv2.py", "snippet": "class SSRawFrameClsDataset(Dataset):\n \"\"\"Load your own raw frame classification dataset.\"\"\"\n\n def __init__(self, anno_path, prefix='', split=' ', mode='train', clip_len=8,\n crop_size=224, short_side_size=256, new_height=256, new_width=340,\n keep_aspect_ratio=True, num_segment=1, num_crop=1, test_num_segment=10,\n test_num_crop=3, filename_tmpl='img_{:05}.jpg', args=None):\n self.anno_path = anno_path\n self.prefix = prefix\n self.split = split\n self.mode = mode\n self.clip_len = clip_len\n self.crop_size = crop_size\n self.short_side_size = short_side_size\n self.new_height = new_height\n self.new_width = new_width\n self.keep_aspect_ratio = keep_aspect_ratio\n self.num_segment = num_segment\n self.test_num_segment = test_num_segment\n self.num_crop = num_crop\n self.test_num_crop = test_num_crop\n self.filename_tmpl = filename_tmpl\n self.args = args\n self.aug = False\n self.rand_erase = False\n\n self.client = None\n if has_client:\n self.client = Client('~/petreloss.conf')\n\n if self.mode in ['train']:\n self.aug = True\n if self.args.reprob > 0:\n self.rand_erase = True\n if VideoReader is None:\n raise ImportError(\n \"Unable to import `decord` which is required to read videos.\")\n\n import pandas as pd\n cleaned = pd.read_csv(self.anno_path, header=None, delimiter=self.split)\n self.dataset_samples = list(cleaned.values[:, 0])\n self.total_frames = list(cleaned.values[:, 1])\n self.label_array = list(cleaned.values[:, -1])\n\n if (mode == 'train'):\n pass\n\n elif (mode == 'validation'):\n self.data_transform = Compose([\n Resize(self.short_side_size,\n interpolation='bilinear'),\n CenterCrop(size=(self.crop_size,\n self.crop_size)),\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n elif mode == 'test':\n self.data_resize = Compose([\n Resize(size=(short_side_size),\n interpolation='bilinear')\n ])\n self.data_transform = Compose([\n ClipToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n ])\n self.test_seg = []\n self.test_dataset = []\n self.test_total_frames = []\n self.test_label_array = []\n for ck in range(self.test_num_segment):\n for cp in range(self.test_num_crop):\n for idx in range(len(self.label_array)):\n self.test_seg.append((ck, cp))\n self.test_dataset.append(self.dataset_samples[idx])\n self.test_total_frames.append(self.total_frames[idx])\n self.test_label_array.append(self.label_array[idx])\n\n def __getitem__(self, index):\n if self.mode == 'train':\n args = self.args\n scale_t = 1\n\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(sample,\n total_frame,\n sample_rate_scale=scale_t) # T H W C\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\n \"video {} not correctly loaded during training\".format(\n sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(sample,\n total_frame,\n sample_rate_scale=scale_t)\n\n if args.num_sample > 1:\n frame_list = []\n label_list = []\n index_list = []\n for _ in range(args.num_sample):\n new_frames = self._aug_frame(buffer, args)\n label = self.label_array[index]\n frame_list.append(new_frames)\n label_list.append(label)\n index_list.append(index)\n return frame_list, label_list, index_list, {}\n else:\n buffer = self._aug_frame(buffer, args)\n\n return buffer, self.label_array[index], index, {}\n\n elif self.mode == 'validation':\n sample = self.dataset_samples[index]\n total_frame = self.total_frames[index]\n buffer = self.load_frame(sample, total_frame)\n if len(buffer) == 0:\n while len(buffer) == 0:\n warnings.warn(\n \"video {} not correctly loaded during validation\".\n format(sample))\n index = np.random.randint(self.__len__())\n sample = self.dataset_samples[index]\n buffer = self.load_frame(sample, total_frame)\n buffer = self.data_transform(buffer)\n return buffer, self.label_array[index], sample.split(\n \"/\")[-1].split(\".\")[0]\n\n elif self.mode == 'test':\n sample = self.test_dataset[index]\n total_frame = self.test_total_frames[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.load_frame(sample, total_frame)\n\n while len(buffer) == 0:\n warnings.warn(\"video {}, temporal {}, spatial {} not found during testing\".format(\\\n str(self.test_dataset[index]), chunk_nb, split_nb))\n index = np.random.randint(self.__len__())\n sample = self.test_dataset[index]\n total_frame = self.test_total_frames[index]\n chunk_nb, split_nb = self.test_seg[index]\n buffer = self.load_frame(sample, total_frame)\n\n buffer = self.data_resize(buffer)\n if isinstance(buffer, list):\n buffer = np.stack(buffer, 0)\n\n spatial_step = 1.0 * (max(buffer.shape[1], buffer.shape[2]) - self.short_side_size) \\\n / (self.test_num_crop - 1)\n temporal_start = chunk_nb\n spatial_start = int(split_nb * spatial_step)\n if buffer.shape[1] >= buffer.shape[2]:\n buffer = buffer[temporal_start::self.test_num_segment, \\\n spatial_start:spatial_start + self.short_side_size, :, :]\n else:\n buffer = buffer[temporal_start::self.test_num_segment, \\\n :, spatial_start:spatial_start + self.short_side_size, :]\n\n buffer = self.data_transform(buffer)\n return buffer, self.test_label_array[index], sample.split(\"/\")[-1].split(\".\")[0], \\\n chunk_nb, split_nb\n else:\n raise NameError('mode {} unkown'.format(self.mode))\n\n def _aug_frame(\n self,\n buffer,\n args,\n ):\n\n aug_transform = create_random_augment(\n input_size=(self.crop_size, self.crop_size),\n auto_augment=args.aa,\n interpolation=args.train_interpolation,\n )\n\n buffer = [transforms.ToPILImage()(frame) for frame in buffer]\n\n buffer = aug_transform(buffer)\n\n buffer = [transforms.ToTensor()(img) for img in buffer]\n buffer = torch.stack(buffer) # T C H W\n buffer = buffer.permute(0, 2, 3, 1) # T H W C\n\n # T H W C\n buffer = tensor_normalize(buffer, [0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225])\n # T H W C -> C T H W.\n buffer = buffer.permute(3, 0, 1, 2)\n # Perform data augmentation.\n scl, asp = (\n [0.08, 1.0],\n [0.75, 1.3333],\n )\n\n buffer = spatial_sampling(\n buffer,\n spatial_idx=-1,\n min_scale=256,\n max_scale=320,\n crop_size=self.crop_size,\n random_horizontal_flip=False if args.data_set == 'SSV2' else True,\n inverse_uniform_sampling=False,\n aspect_ratio=asp,\n scale=scl,\n motion_shift=False)\n\n if self.rand_erase:\n erase_transform = RandomErasing(\n args.reprob,\n mode=args.remode,\n max_count=args.recount,\n num_splits=args.recount,\n device=\"cpu\",\n )\n buffer = buffer.permute(1, 0, 2, 3)\n buffer = erase_transform(buffer)\n buffer = buffer.permute(1, 0, 2, 3)\n\n return buffer\n\n def load_frame(self, sample, num_frames, sample_rate_scale=1):\n \"\"\"Load video content using Decord\"\"\"\n fname = sample\n fname = os.path.join(self.prefix, fname)\n\n if self.mode == 'test':\n tick = num_frames / float(self.num_segment)\n all_index = []\n for t_seg in range(self.test_num_segment):\n tmp_index = [\n int(t_seg * tick / self.test_num_segment + tick * x)\n for x in range(self.num_segment)\n ]\n all_index.extend(tmp_index)\n all_index = list(np.sort(np.array(all_index)))\n imgs = []\n for idx in all_index:\n frame_fname = os.path.join(fname, self.filename_tmpl.format(idx + 1)) \n img_bytes = self.client.get(frame_fname)\n img_np = np.frombuffer(img_bytes, np.uint8)\n img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)\n cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)\n imgs.append(img)\n buffer = np.array(imgs)\n return buffer\n\n # handle temporal segments\n average_duration = num_frames // self.num_segment\n all_index = []\n if average_duration > 0:\n if self.mode == 'validation':\n all_index = list(\n np.multiply(list(range(self.num_segment)),\n average_duration) +\n np.ones(self.num_segment, dtype=int) *\n (average_duration // 2))\n else:\n all_index = list(\n np.multiply(list(range(self.num_segment)),\n average_duration) +\n np.random.randint(average_duration, size=self.num_segment))\n elif num_frames > self.num_segment:\n if self.mode == 'validation':\n all_index = list(range(self.num_segment))\n else:\n all_index = list(\n np.sort(\n np.random.randint(num_frames, size=self.num_segment)))\n else:\n all_index = [0] * (self.num_segment - num_frames) + list(\n range(num_frames))\n all_index = list(np.array(all_index))\n imgs = []\n for idx in all_index:\n frame_fname = os.path.join(fname, self.filename_tmpl.format(idx + 1))\n img_bytes = self.client.get(frame_fname)\n img_np = np.frombuffer(img_bytes, np.uint8)\n img = cv2.imdecode(img_np, cv2.IMREAD_COLOR)\n cv2.cvtColor(img, cv2.COLOR_BGR2RGB, img)\n imgs.append(img)\n buffer = np.array(imgs)\n return buffer\n\n def __len__(self):\n if self.mode != 'test':\n return len(self.dataset_samples)\n else:\n return len(self.test_dataset)" } ]
import os from torchvision import transforms from .transforms import * from .masking_generator import TubeMaskingGenerator, RandomMaskingGenerator from .mae import VideoMAE from .kinetics import VideoClsDataset from .kinetics_sparse import VideoClsDataset_sparse from .ssv2 import SSVideoClsDataset, SSRawFrameClsDataset
14,564
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) if args.color_jitter > 0: self.transform = transforms.Compose([ self.train_augmentation, GroupColorJitter(args.color_jitter), GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) else: self.transform = transforms.Compose([ self.train_augmentation, GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) elif args.mask_type == 'random': self.masked_position_generator = RandomMaskingGenerator( args.window_size, args.mask_ratio ) elif args.mask_type in 'attention': self.masked_position_generator = None def __call__(self, images): process_data, _ = self.transform(images) if self.masked_position_generator is None: return process_data, -1 else: return process_data, self.masked_position_generator() def __repr__(self): repr = "(DataAugmentationForVideoMAE,\n" repr += " transform = %s,\n" % str(self.transform) repr += " Masked position generator = %s,\n" % str(self.masked_position_generator) repr += ")" return repr def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAE(args)
class DataAugmentationForVideoMAE(object): def __init__(self, args): self.input_mean = [0.485, 0.456, 0.406] # IMAGENET_DEFAULT_MEAN self.input_std = [0.229, 0.224, 0.225] # IMAGENET_DEFAULT_STD normalize = GroupNormalize(self.input_mean, self.input_std) self.train_augmentation = GroupMultiScaleCrop(args.input_size, [1, .875, .75, .66]) if args.color_jitter > 0: self.transform = transforms.Compose([ self.train_augmentation, GroupColorJitter(args.color_jitter), GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) else: self.transform = transforms.Compose([ self.train_augmentation, GroupRandomHorizontalFlip(flip=args.flip), Stack(roll=False), ToTorchFormatTensor(div=True), normalize, ]) if args.mask_type == 'tube': self.masked_position_generator = TubeMaskingGenerator( args.window_size, args.mask_ratio ) elif args.mask_type == 'random': self.masked_position_generator = RandomMaskingGenerator( args.window_size, args.mask_ratio ) elif args.mask_type in 'attention': self.masked_position_generator = None def __call__(self, images): process_data, _ = self.transform(images) if self.masked_position_generator is None: return process_data, -1 else: return process_data, self.masked_position_generator() def __repr__(self): repr = "(DataAugmentationForVideoMAE,\n" repr += " transform = %s,\n" % str(self.transform) repr += " Masked position generator = %s,\n" % str(self.masked_position_generator) repr += ")" return repr def build_pretraining_dataset(args): transform = DataAugmentationForVideoMAE(args)
dataset = VideoMAE(
2
2023-11-27 12:41:46+00:00
24k
cswry/SeeSR
test_seesr.py
[ { "identifier": "StableDiffusionControlNetPipeline", "path": "pipelines/pipeline_seesr.py", "snippet": "class StableDiffusionControlNetPipeline(DiffusionPipeline, TextualInversionLoaderMixin):\n r\"\"\"\n Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance.\n\n This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the\n library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.)\n\n In addition the pipeline inherits the following loading methods:\n - *Textual-Inversion*: [`loaders.TextualInversionLoaderMixin.load_textual_inversion`]\n\n Args:\n vae ([`AutoencoderKL`]):\n Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.\n text_encoder ([`CLIPTextModel`]):\n Frozen text-encoder. Stable Diffusion uses the text portion of\n [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically\n the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant.\n tokenizer (`CLIPTokenizer`):\n Tokenizer of class\n [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).\n unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents.\n controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):\n Provides additional conditioning to the unet during the denoising process. If you set multiple ControlNets\n as a list, the outputs from each ControlNet are added together to create one combined additional\n conditioning.\n scheduler ([`SchedulerMixin`]):\n A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of\n [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].\n safety_checker ([`StableDiffusionSafetyChecker`]):\n Classification module that estimates whether generated images could be considered offensive or harmful.\n Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details.\n feature_extractor ([`CLIPImageProcessor`]):\n Model that extracts features from generated images to be used as inputs for the `safety_checker`.\n \"\"\"\n _optional_components = [\"safety_checker\", \"feature_extractor\"]\n\n def __init__(\n self,\n vae: AutoencoderKL,\n text_encoder: CLIPTextModel,\n tokenizer: CLIPTokenizer,\n unet: UNet2DConditionModel,\n controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],\n scheduler: KarrasDiffusionSchedulers,\n safety_checker: StableDiffusionSafetyChecker,\n feature_extractor: CLIPImageProcessor,\n requires_safety_checker: bool = True,\n ):\n super().__init__()\n\n if safety_checker is None and requires_safety_checker:\n logger.warning(\n f\"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure\"\n \" that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered\"\n \" results in services or applications open to the public. Both the diffusers team and Hugging Face\"\n \" strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling\"\n \" it only for use-cases that involve analyzing network behavior or auditing its results. For more\"\n \" information, please have a look at https://github.com/huggingface/diffusers/pull/254 .\"\n )\n\n if safety_checker is not None and feature_extractor is None:\n raise ValueError(\n \"Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety\"\n \" checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead.\"\n )\n\n if isinstance(controlnet, (list, tuple)):\n controlnet = MultiControlNetModel(controlnet)\n\n self.register_modules(\n vae=vae,\n text_encoder=text_encoder,\n tokenizer=tokenizer,\n unet=unet,\n controlnet=controlnet,\n scheduler=scheduler,\n safety_checker=safety_checker,\n feature_extractor=feature_extractor,\n )\n self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)\n self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)\n self.register_to_config(requires_safety_checker=requires_safety_checker)\n\n def _init_tiled_vae(self,\n encoder_tile_size = 256,\n decoder_tile_size = 256,\n fast_decoder = False,\n fast_encoder = False,\n color_fix = False,\n vae_to_gpu = True):\n # save original forward (only once)\n if not hasattr(self.vae.encoder, 'original_forward'):\n setattr(self.vae.encoder, 'original_forward', self.vae.encoder.forward)\n if not hasattr(self.vae.decoder, 'original_forward'):\n setattr(self.vae.decoder, 'original_forward', self.vae.decoder.forward)\n\n encoder = self.vae.encoder\n decoder = self.vae.decoder\n\n self.vae.encoder.forward = VAEHook(\n encoder, encoder_tile_size, is_decoder=False, fast_decoder=fast_decoder, fast_encoder=fast_encoder, color_fix=color_fix, to_gpu=vae_to_gpu)\n self.vae.decoder.forward = VAEHook(\n decoder, decoder_tile_size, is_decoder=True, fast_decoder=fast_decoder, fast_encoder=fast_encoder, color_fix=color_fix, to_gpu=vae_to_gpu)\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_slicing\n def enable_vae_slicing(self):\n r\"\"\"\n Enable sliced VAE decoding.\n\n When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several\n steps. This is useful to save some memory and allow larger batch sizes.\n \"\"\"\n self.vae.enable_slicing()\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_slicing\n def disable_vae_slicing(self):\n r\"\"\"\n Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to\n computing decoding in one step.\n \"\"\"\n self.vae.disable_slicing()\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_vae_tiling\n def enable_vae_tiling(self):\n r\"\"\"\n Enable tiled VAE decoding.\n\n When this option is enabled, the VAE will split the input tensor into tiles to compute decoding and encoding in\n several steps. This is useful to save a large amount of memory and to allow the processing of larger images.\n \"\"\"\n self.vae.enable_tiling()\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.disable_vae_tiling\n def disable_vae_tiling(self):\n r\"\"\"\n Disable tiled VAE decoding. If `enable_vae_tiling` was previously invoked, this method will go back to\n computing decoding in one step.\n \"\"\"\n self.vae.disable_tiling()\n\n def enable_sequential_cpu_offload(self, gpu_id=0):\n r\"\"\"\n Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,\n text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a\n `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.\n Note that offloading happens on a submodule basis. Memory savings are higher than with\n `enable_model_cpu_offload`, but performance is lower.\n \"\"\"\n if is_accelerate_available():\n from accelerate import cpu_offload\n else:\n raise ImportError(\"Please install accelerate via `pip install accelerate`\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]:\n cpu_offload(cpu_offloaded_model, device)\n\n if self.safety_checker is not None:\n cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)\n\n def enable_model_cpu_offload(self, gpu_id=0):\n r\"\"\"\n Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared\n to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`\n method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with\n `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.\n \"\"\"\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.17.0.dev0\"):\n from accelerate import cpu_offload_with_hook\n else:\n raise ImportError(\"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.\")\n\n device = torch.device(f\"cuda:{gpu_id}\")\n\n hook = None\n for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:\n _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)\n\n if self.safety_checker is not None:\n # the safety checker can offload the vae again\n _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)\n\n # control net hook has be manually offloaded as it alternates with unet\n cpu_offload_with_hook(self.controlnet, device)\n\n # We'll offload the last model manually.\n self.final_offload_hook = hook\n\n @property\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device\n def _execution_device(self):\n r\"\"\"\n Returns the device on which the pipeline's models will be executed. After calling\n `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module\n hooks.\n \"\"\"\n if not hasattr(self.unet, \"_hf_hook\"):\n return self.device\n for module in self.unet.modules():\n if (\n hasattr(module, \"_hf_hook\")\n and hasattr(module._hf_hook, \"execution_device\")\n and module._hf_hook.execution_device is not None\n ):\n return torch.device(module._hf_hook.execution_device)\n return self.device\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt\n def _encode_prompt(\n self,\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt=None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n ram_encoder_hidden_states: Optional[torch.FloatTensor] = None,\n ):\n r\"\"\"\n Encodes the prompt into text encoder hidden states.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n prompt to be encoded\n device: (`torch.device`):\n torch device\n num_images_per_prompt (`int`):\n number of images that should be generated per prompt\n do_classifier_free_guidance (`bool`):\n whether to use classifier free guidance or not\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is\n less than `1`).\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n \"\"\"\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n if prompt_embeds is None:\n # textual inversion: procecss multi-vector tokens if necessary\n if isinstance(self, TextualInversionLoaderMixin):\n prompt = self.maybe_convert_prompt(prompt, self.tokenizer)\n\n text_inputs = self.tokenizer(\n prompt,\n padding=\"max_length\",\n max_length=self.tokenizer.model_max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n text_input_ids = text_inputs.input_ids\n untruncated_ids = self.tokenizer(prompt, padding=\"longest\", return_tensors=\"pt\").input_ids\n\n if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(\n text_input_ids, untruncated_ids\n ):\n removed_text = self.tokenizer.batch_decode(\n untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]\n )\n logger.warning(\n \"The following part of your input was truncated because CLIP can only handle sequences up to\"\n f\" {self.tokenizer.model_max_length} tokens: {removed_text}\"\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = text_inputs.attention_mask.to(device)\n else:\n attention_mask = None\n\n prompt_embeds = self.text_encoder(\n text_input_ids.to(device),\n attention_mask=attention_mask,\n )\n prompt_embeds = prompt_embeds[0]\n\n prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)\n\n bs_embed, seq_len, _ = prompt_embeds.shape\n # duplicate text embeddings for each generation per prompt, using mps friendly method\n prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)\n prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)\n\n # get unconditional embeddings for classifier free guidance\n if do_classifier_free_guidance and negative_prompt_embeds is None:\n uncond_tokens: List[str]\n if negative_prompt is None:\n uncond_tokens = [\"\"] * batch_size\n elif prompt is not None and type(prompt) is not type(negative_prompt):\n raise TypeError(\n f\"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=\"\n f\" {type(prompt)}.\"\n )\n elif isinstance(negative_prompt, str):\n uncond_tokens = [negative_prompt]\n elif batch_size != len(negative_prompt):\n raise ValueError(\n f\"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:\"\n f\" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches\"\n \" the batch size of `prompt`.\"\n )\n else:\n uncond_tokens = negative_prompt\n\n # textual inversion: procecss multi-vector tokens if necessary\n if isinstance(self, TextualInversionLoaderMixin):\n uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)\n\n max_length = prompt_embeds.shape[1]\n uncond_input = self.tokenizer(\n uncond_tokens,\n padding=\"max_length\",\n max_length=max_length,\n truncation=True,\n return_tensors=\"pt\",\n )\n\n if hasattr(self.text_encoder.config, \"use_attention_mask\") and self.text_encoder.config.use_attention_mask:\n attention_mask = uncond_input.attention_mask.to(device)\n else:\n attention_mask = None\n\n negative_prompt_embeds = self.text_encoder(\n uncond_input.input_ids.to(device),\n attention_mask=attention_mask,\n )\n negative_prompt_embeds = negative_prompt_embeds[0]\n\n if do_classifier_free_guidance:\n # duplicate unconditional embeddings for each generation per prompt, using mps friendly method\n seq_len = negative_prompt_embeds.shape[1]\n\n negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)\n\n negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)\n negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)\n\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n ram_encoder_hidden_states = torch.cat([ram_encoder_hidden_states, ram_encoder_hidden_states])\n\n return prompt_embeds, ram_encoder_hidden_states\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker\n def run_safety_checker(self, image, device, dtype):\n if self.safety_checker is None:\n has_nsfw_concept = None\n else:\n if torch.is_tensor(image):\n feature_extractor_input = self.image_processor.postprocess(image, output_type=\"pil\")\n else:\n feature_extractor_input = self.image_processor.numpy_to_pil(image)\n safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors=\"pt\").to(device)\n image, has_nsfw_concept = self.safety_checker(\n images=image, clip_input=safety_checker_input.pixel_values.to(dtype)\n )\n return image, has_nsfw_concept\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents\n def decode_latents(self, latents):\n warnings.warn(\n \"The decode_latents method is deprecated and will be removed in a future version. Please\"\n \" use VaeImageProcessor instead\",\n FutureWarning,\n )\n latents = 1 / self.vae.config.scaling_factor * latents\n image = self.vae.decode(latents, return_dict=False)[0]\n image = (image / 2 + 0.5).clamp(0, 1)\n # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16\n image = image.cpu().permute(0, 2, 3, 1).float().numpy()\n return image\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs\n def prepare_extra_step_kwargs(self, generator, eta):\n # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature\n # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.\n # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502\n # and should be between [0, 1]\n\n accepts_eta = \"eta\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n extra_step_kwargs = {}\n if accepts_eta:\n extra_step_kwargs[\"eta\"] = eta\n\n # check if the scheduler accepts generator\n accepts_generator = \"generator\" in set(inspect.signature(self.scheduler.step).parameters.keys())\n if accepts_generator:\n extra_step_kwargs[\"generator\"] = generator\n #extra_step_kwargs[\"generator\"] = generator\n return extra_step_kwargs\n\n def check_inputs(\n self,\n prompt,\n image,\n height,\n width,\n callback_steps,\n negative_prompt=None,\n prompt_embeds=None,\n negative_prompt_embeds=None,\n controlnet_conditioning_scale=1.0,\n ):\n if height % 8 != 0 or width % 8 != 0:\n raise ValueError(f\"`height` and `width` have to be divisible by 8 but are {height} and {width}.\")\n\n if (callback_steps is None) or (\n callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)\n ):\n raise ValueError(\n f\"`callback_steps` has to be a positive integer but is {callback_steps} of type\"\n f\" {type(callback_steps)}.\"\n )\n\n if prompt is not None and prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to\"\n \" only forward one of the two.\"\n )\n elif prompt is None and prompt_embeds is None:\n raise ValueError(\n \"Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined.\"\n )\n elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):\n raise ValueError(f\"`prompt` has to be of type `str` or `list` but is {type(prompt)}\")\n\n if negative_prompt is not None and negative_prompt_embeds is not None:\n raise ValueError(\n f\"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:\"\n f\" {negative_prompt_embeds}. Please make sure to only forward one of the two.\"\n )\n\n if prompt_embeds is not None and negative_prompt_embeds is not None:\n if prompt_embeds.shape != negative_prompt_embeds.shape:\n raise ValueError(\n \"`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but\"\n f\" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`\"\n f\" {negative_prompt_embeds.shape}.\"\n )\n\n # `prompt` needs more sophisticated handling when there are multiple\n # conditionings.\n if isinstance(self.controlnet, MultiControlNetModel):\n if isinstance(prompt, list):\n logger.warning(\n f\"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}\"\n \" prompts. The conditionings will be fixed across the prompts.\"\n )\n\n # Check `image`\n is_compiled = hasattr(F, \"scaled_dot_product_attention\") and isinstance(\n self.controlnet, torch._dynamo.eval_frame.OptimizedModule\n )\n if (\n isinstance(self.controlnet, ControlNetModel)\n or is_compiled\n and isinstance(self.controlnet._orig_mod, ControlNetModel)\n ):\n self.check_image(image, prompt, prompt_embeds)\n elif (\n isinstance(self.controlnet, MultiControlNetModel)\n or is_compiled\n and isinstance(self.controlnet._orig_mod, MultiControlNetModel)\n ):\n if not isinstance(image, list):\n raise TypeError(\"For multiple controlnets: `image` must be type `list`\")\n\n # When `image` is a nested list:\n # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])\n elif any(isinstance(i, list) for i in image):\n raise ValueError(\"A single batch of multiple conditionings are supported at the moment.\")\n elif len(image) != len(self.controlnet.nets):\n raise ValueError(\n \"For multiple controlnets: `image` must have the same length as the number of controlnets.\"\n )\n\n for image_ in image:\n self.check_image(image_, prompt, prompt_embeds)\n else:\n assert False\n\n # Check `controlnet_conditioning_scale`\n if (\n isinstance(self.controlnet, ControlNetModel)\n or is_compiled\n and isinstance(self.controlnet._orig_mod, ControlNetModel)\n ):\n if not isinstance(controlnet_conditioning_scale, float):\n raise TypeError(\"For single controlnet: `controlnet_conditioning_scale` must be type `float`.\")\n elif (\n isinstance(self.controlnet, MultiControlNetModel)\n or is_compiled\n and isinstance(self.controlnet._orig_mod, MultiControlNetModel)\n ):\n if isinstance(controlnet_conditioning_scale, list):\n if any(isinstance(i, list) for i in controlnet_conditioning_scale):\n raise ValueError(\"A single batch of multiple conditionings are supported at the moment.\")\n elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(\n self.controlnet.nets\n ):\n raise ValueError(\n \"For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have\"\n \" the same length as the number of controlnets\"\n )\n else:\n assert False\n\n def check_image(self, image, prompt, prompt_embeds):\n image_is_pil = isinstance(image, PIL.Image.Image)\n image_is_tensor = isinstance(image, torch.Tensor)\n image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)\n image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)\n\n if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:\n raise TypeError(\n \"image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors\"\n )\n\n if image_is_pil:\n image_batch_size = 1\n elif image_is_tensor:\n image_batch_size = image.shape[0]\n elif image_is_pil_list:\n image_batch_size = len(image)\n elif image_is_tensor_list:\n image_batch_size = len(image)\n\n if prompt is not None and isinstance(prompt, str):\n prompt_batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n prompt_batch_size = len(prompt)\n elif prompt_embeds is not None:\n prompt_batch_size = prompt_embeds.shape[0]\n\n if image_batch_size != 1 and image_batch_size != prompt_batch_size:\n raise ValueError(\n f\"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}\"\n )\n\n def prepare_image(\n self,\n image,\n width,\n height,\n batch_size,\n num_images_per_prompt,\n device,\n dtype,\n do_classifier_free_guidance=False,\n guess_mode=False,\n ):\n if not isinstance(image, torch.Tensor):\n if isinstance(image, PIL.Image.Image):\n image = [image]\n\n if isinstance(image[0], PIL.Image.Image):\n images = []\n\n for image_ in image:\n image_ = image_.convert(\"RGB\")\n #image_ = image_.resize((width, height), resample=PIL_INTERPOLATION[\"lanczos\"])\n image_ = np.array(image_)\n image_ = image_[None, :]\n images.append(image_)\n\n image = images\n\n image = np.concatenate(image, axis=0)\n image = np.array(image).astype(np.float32) / 255.0\n image = image.transpose(0, 3, 1, 2)\n image = torch.from_numpy(image)#.flip(1)\n elif isinstance(image[0], torch.Tensor):\n image = torch.cat(image, dim=0)\n\n image_batch_size = image.shape[0]\n\n if image_batch_size == 1:\n repeat_by = batch_size\n else:\n # image batch size is the same as prompt batch size\n repeat_by = num_images_per_prompt\n\n image = image.repeat_interleave(repeat_by, dim=0)\n\n image = image.to(device=device, dtype=dtype)\n\n if do_classifier_free_guidance and not guess_mode:\n image = torch.cat([image] * 2)\n\n return image\n\n # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents\n def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):\n shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)\n if isinstance(generator, list) and len(generator) != batch_size:\n raise ValueError(\n f\"You have passed a list of generators of length {len(generator)}, but requested an effective batch\"\n f\" size of {batch_size}. Make sure the batch size matches the length of the generators.\"\n )\n\n if latents is None:\n latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)\n #latents = randn_tensor(shape, generator=None, device=device, dtype=dtype)\n #offset_noise = torch.randn(batch_size, num_channels_latents, 1, 1, device=device)\n #latents = latents + 0.1 * offset_noise\n else:\n latents = latents.to(device)\n\n # scale the initial noise by the standard deviation required by the scheduler\n latents = latents * self.scheduler.init_noise_sigma\n return latents\n\n def _default_height_width(self, height, width, image):\n # NOTE: It is possible that a list of images have different\n # dimensions for each image, so just checking the first image\n # is not _exactly_ correct, but it is simple.\n while isinstance(image, list):\n image = image[0]\n\n if height is None:\n if isinstance(image, PIL.Image.Image):\n height = image.height\n elif isinstance(image, torch.Tensor):\n height = image.shape[2]\n\n height = (height // 8) * 8 # round down to nearest multiple of 8\n\n if width is None:\n if isinstance(image, PIL.Image.Image):\n width = image.width\n elif isinstance(image, torch.Tensor):\n width = image.shape[3]\n\n width = (width // 8) * 8 # round down to nearest multiple of 8\n\n return height, width\n\n # override DiffusionPipeline\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n safe_serialization: bool = False,\n variant: Optional[str] = None,\n ):\n if isinstance(self.controlnet, ControlNetModel):\n super().save_pretrained(save_directory, safe_serialization, variant)\n else:\n raise NotImplementedError(\"Currently, the `save_pretrained()` is not implemented for Multi-ControlNet.\")\n \n def _gaussian_weights(self, tile_width, tile_height, nbatches):\n \"\"\"Generates a gaussian mask of weights for tile contributions\"\"\"\n from numpy import pi, exp, sqrt\n import numpy as np\n\n latent_width = tile_width\n latent_height = tile_height\n\n var = 0.01\n midpoint = (latent_width - 1) / 2 # -1 because index goes from 0 to latent_width - 1\n x_probs = [exp(-(x-midpoint)*(x-midpoint)/(latent_width*latent_width)/(2*var)) / sqrt(2*pi*var) for x in range(latent_width)]\n midpoint = latent_height / 2\n y_probs = [exp(-(y-midpoint)*(y-midpoint)/(latent_height*latent_height)/(2*var)) / sqrt(2*pi*var) for y in range(latent_height)]\n\n weights = np.outer(y_probs, x_probs)\n return torch.tile(torch.tensor(weights, device=self.device), (nbatches, self.unet.config.in_channels, 1, 1))\n\n @perfcount\n @torch.no_grad()\n @replace_example_docstring(EXAMPLE_DOC_STRING)\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n image: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n conditioning_scale: Union[float, List[float]] = 1.0,\n guess_mode: bool = False,\n image_sr = None,\n start_steps = 999,\n start_point = 'noise',\n ram_encoder_hidden_states=None,\n latent_tiled_size=320,\n latent_tiled_overlap=4,\n args=None\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.\n instead.\n image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]`, `List[PIL.Image.Image]`,\n `List[List[torch.FloatTensor]]`, or `List[List[PIL.Image.Image]]`):\n The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If\n the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. `PIL.Image.Image` can\n also be accepted as an image. The dimensions of the output image defaults to `image`'s dimensions. If\n height and/or width are passed, `image` is resized according to them. If multiple ControlNets are\n specified in init, images must be passed as a list such that each element of the list can be correctly\n batched for input to a single controlnet.\n height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The height in pixels of the generated image.\n width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The width in pixels of the generated image.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality image at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,\n usually at the expense of lower image quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the image generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is\n less than `1`).\n num_images_per_prompt (`int`, *optional*, defaults to 1):\n The number of images to generate per prompt.\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`.\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n output_type (`str`, *optional*, defaults to `\"pil\"`):\n The output format of the generate image. Choose between\n [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):\n The outputs of the controlnet are multiplied by `conditioning_scale` before they are added\n to the residual in the original unet. If multiple ControlNets are specified in init, you can set the\n corresponding scale as a list.\n guess_mode (`bool`, *optional*, defaults to `False`):\n In this mode, the ControlNet encoder will try best to recognize the content of the input image even if\n you remove all prompts. The `guidance_scale` between 3.0 and 5.0 is recommended.\n\n Examples:\n\n Returns:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.\n When returning a tuple, the first element is a list with the generated images, and the second element is a\n list of `bool`s denoting whether the corresponding generated image likely represents \"not-safe-for-work\"\n (nsfw) content, according to the `safety_checker`.\n \"\"\"\n # 0. Default height and width to unet\n height, width = self._default_height_width(height, width, image)\n \n # 1. Check inputs. Raise error if not correct\n \"\"\"\n self.check_inputs(\n prompt,\n image,\n height,\n width,\n callback_steps,\n negative_prompt,\n prompt_embeds,\n negative_prompt_embeds,\n conditioning_scale,\n )\n \"\"\"\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet\n \"\"\"\n if isinstance(controlnet, MultiControlNetModel) and isinstance(conditioning_scale, float):\n conditioning_scale = [conditioning_scale] * len(controlnet.nets)\n \n global_pool_conditions = (\n controlnet.config.global_pool_conditions\n if isinstance(controlnet, ControlNetModel)\n else controlnet.nets[0].config.global_pool_conditions\n )\n \n guess_mode = guess_mode or global_pool_conditions\n \"\"\"\n\n # 3. Encode input prompt\n prompt_embeds, ram_encoder_hidden_states = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n ram_encoder_hidden_states=ram_encoder_hidden_states\n )\n\n # 4. Prepare image\n image = self.prepare_image(\n image=image,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n\n # 5. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps = self.scheduler.timesteps\n\n # 6. Prepare latent variables\n num_channels_latents = self.unet.config.in_channels\n latents = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n )\n\n # 6. Prepare the start point\n if start_point == 'noise':\n latents = latents\n elif start_point == 'lr': # LRE Strategy\n latents_condition_image = self.vae.encode(image*2-1).latent_dist.sample()\n latents_condition_image = latents_condition_image * self.vae.config.scaling_factor\n start_steps_tensor = torch.randint(start_steps, start_steps+1, (latents.shape[0],), device=latents.device)\n start_steps_tensor = start_steps_tensor.long()\n latents = self.scheduler.add_noise(latents_condition_image[0:1, ...], latents, start_steps_tensor)\n \n\n # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 8. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n \n _, _, h, w = latents.size()\n tile_size, tile_overlap = (latent_tiled_size, latent_tiled_overlap) if args is not None else (256, 8)\n if h*w<=tile_size*tile_size:\n print(f\"[Tiled Latent]: the input size is tiny and unnecessary to tile.\")\n else:\n print(f\"[Tiled Latent]: the input size is {image.shape[-2]}x{image.shape[-1]}, need to tiled\")\n\n for i, t in enumerate(timesteps):\n # pass, if the timestep is larger than start_steps\n if t > start_steps:\n print(f'pass {t} steps.')\n continue\n\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # controlnet(s) inference\n if guess_mode and do_classifier_free_guidance:\n # Infer ControlNet only for the conditional batch.\n controlnet_latent_model_input = latents\n controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]\n \n else:\n controlnet_latent_model_input = latent_model_input\n controlnet_prompt_embeds = prompt_embeds\n\n if h*w<=tile_size*tile_size: # tiled latent input\n down_block_res_samples, mid_block_res_sample = [None]*10, None\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n controlnet_latent_model_input,\n t,\n encoder_hidden_states=controlnet_prompt_embeds,\n controlnet_cond=image,\n conditioning_scale=conditioning_scale,\n guess_mode=guess_mode,\n return_dict=False,\n image_encoder_hidden_states = ram_encoder_hidden_states,\n )\n\n\n if guess_mode and do_classifier_free_guidance:\n # Infered ControlNet only for the conditional batch.\n # To apply the output of ControlNet to both the unconditional and conditional batches,\n # add 0 to the unconditional batch to keep it unchanged.\n down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]\n mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample,\n return_dict=False,\n image_encoder_hidden_states = ram_encoder_hidden_states,\n )[0]\n else:\n tile_weights = self._gaussian_weights(tile_size, tile_size, 1)\n tile_size = min(tile_size, min(h, w))\n tile_weights = self._gaussian_weights(tile_size, tile_size, 1)\n\n grid_rows = 0\n cur_x = 0\n while cur_x < latent_model_input.size(-1):\n cur_x = max(grid_rows * tile_size-tile_overlap * grid_rows, 0)+tile_size\n grid_rows += 1\n\n grid_cols = 0\n cur_y = 0\n while cur_y < latent_model_input.size(-2):\n cur_y = max(grid_cols * tile_size-tile_overlap * grid_cols, 0)+tile_size\n grid_cols += 1\n\n input_list = []\n cond_list = []\n img_list = []\n noise_preds = []\n for row in range(grid_rows):\n noise_preds_row = []\n for col in range(grid_cols):\n if col < grid_cols-1 or row < grid_rows-1:\n # extract tile from input image\n ofs_x = max(row * tile_size-tile_overlap * row, 0)\n ofs_y = max(col * tile_size-tile_overlap * col, 0)\n # input tile area on total image\n if row == grid_rows-1:\n ofs_x = w - tile_size\n if col == grid_cols-1:\n ofs_y = h - tile_size\n\n input_start_x = ofs_x\n input_end_x = ofs_x + tile_size\n input_start_y = ofs_y\n input_end_y = ofs_y + tile_size\n\n # input tile dimensions\n input_tile = latent_model_input[:, :, input_start_y:input_end_y, input_start_x:input_end_x]\n input_list.append(input_tile)\n cond_tile = controlnet_latent_model_input[:, :, input_start_y:input_end_y, input_start_x:input_end_x]\n cond_list.append(cond_tile)\n img_tile = image[:, :, input_start_y*8:input_end_y*8, input_start_x*8:input_end_x*8]\n img_list.append(img_tile)\n\n if len(input_list) == batch_size or col == grid_cols-1:\n input_list_t = torch.cat(input_list, dim=0)\n cond_list_t = torch.cat(cond_list, dim=0)\n img_list_t = torch.cat(img_list, dim=0)\n #print(input_list_t.shape, cond_list_t.shape, img_list_t.shape, fg_mask_list_t.shape)\n\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n cond_list_t,\n t,\n encoder_hidden_states=controlnet_prompt_embeds,\n controlnet_cond=img_list_t,\n conditioning_scale=conditioning_scale,\n guess_mode=guess_mode,\n return_dict=False,\n image_encoder_hidden_states = ram_encoder_hidden_states,\n )\n\n if guess_mode and do_classifier_free_guidance:\n # Infered ControlNet only for the conditional batch.\n # To apply the output of ControlNet to both the unconditional and conditional batches,\n # add 0 to the unconditional batch to keep it unchanged.\n down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]\n mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])\n\n # predict the noise residual\n model_out = self.unet(\n input_list_t,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample,\n return_dict=False,\n image_encoder_hidden_states = ram_encoder_hidden_states,\n )[0]\n\n #for sample_i in range(model_out.size(0)):\n # noise_preds_row.append(model_out[sample_i].unsqueeze(0))\n input_list = []\n cond_list = []\n img_list = []\n\n noise_preds.append(model_out)\n\n # Stitch noise predictions for all tiles\n noise_pred = torch.zeros(latent_model_input.shape, device=latent_model_input.device)\n contributors = torch.zeros(latent_model_input.shape, device=latent_model_input.device)\n # Add each tile contribution to overall latents\n for row in range(grid_rows):\n for col in range(grid_cols):\n if col < grid_cols-1 or row < grid_rows-1:\n # extract tile from input image\n ofs_x = max(row * tile_size-tile_overlap * row, 0)\n ofs_y = max(col * tile_size-tile_overlap * col, 0)\n # input tile area on total image\n if row == grid_rows-1:\n ofs_x = w - tile_size\n if col == grid_cols-1:\n ofs_y = h - tile_size\n\n input_start_x = ofs_x\n input_end_x = ofs_x + tile_size\n input_start_y = ofs_y\n input_end_y = ofs_y + tile_size\n \n noise_pred[:, :, input_start_y:input_end_y, input_start_x:input_end_x] += noise_preds[row*grid_cols + col] * tile_weights\n contributors[:, :, input_start_y:input_end_y, input_start_x:input_end_x] += tile_weights\n # Average overlapping areas with more than 1 contributor\n noise_pred /= contributors\n \n \n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n # If we do sequential model offloading, let's offload unet and controlnet\n # manually for max memory savings\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.unet.to(\"cpu\")\n self.controlnet.to(\"cpu\")\n torch.cuda.empty_cache()\n\n has_nsfw_concept = None\n if not output_type == \"latent\":\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]#.flip(1)\n #image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n else:\n image = latents\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" }, { "identifier": "load_dreambooth_lora", "path": "utils/misc.py", "snippet": "def load_dreambooth_lora(unet, vae=None, model_path=None, alpha=1.0, model_base=\"\"):\n if model_path is None: return unet\n \n if model_path.endswith(\".ckpt\"):\n base_state_dict = torch.load(model_path)['state_dict']\n elif model_path.endswith(\".safetensors\"):\n state_dict = {}\n with safe_open(model_path, framework=\"pt\", device=\"cpu\") as f:\n for key in f.keys():\n state_dict[key] = f.get_tensor(key)\n \n is_lora = all(\"lora\" in k for k in state_dict.keys())\n if not is_lora:\n base_state_dict = state_dict\n else:\n base_state_dict = {}\n with safe_open(model_base, framework=\"pt\", device=\"cpu\") as f:\n for key in f.keys():\n base_state_dict[key] = f.get_tensor(key)\n \n converted_unet_checkpoint = convert_ldm_unet_checkpoint(base_state_dict, unet.config)\n unet_state_dict = unet.state_dict()\n for key in converted_unet_checkpoint:\n converted_unet_checkpoint[key] = alpha * converted_unet_checkpoint[key] + (1.0-alpha) * unet_state_dict[key]\n unet.load_state_dict(converted_unet_checkpoint, strict=False)\n\n if vae is not None:\n converted_vae_checkpoint = convert_ldm_vae_checkpoint(base_state_dict, vae.config)\n vae.load_state_dict(converted_vae_checkpoint)\n \n return unet, vae" }, { "identifier": "wavelet_color_fix", "path": "utils/wavelet_color_fix.py", "snippet": "def wavelet_color_fix(target: Image, source: Image):\n # Convert images to tensors\n to_tensor = ToTensor()\n target_tensor = to_tensor(target).unsqueeze(0)\n source_tensor = to_tensor(source).unsqueeze(0)\n\n # Apply wavelet reconstruction\n result_tensor = wavelet_reconstruction(target_tensor, source_tensor)\n\n # Convert tensor back to image\n to_image = ToPILImage()\n result_image = to_image(result_tensor.squeeze(0).clamp_(0.0, 1.0))\n\n return result_image" }, { "identifier": "adain_color_fix", "path": "utils/wavelet_color_fix.py", "snippet": "def adain_color_fix(target: Image, source: Image):\n # Convert images to tensors\n to_tensor = ToTensor()\n target_tensor = to_tensor(target).unsqueeze(0)\n source_tensor = to_tensor(source).unsqueeze(0)\n\n # Apply adaptive instance normalization\n result_tensor = adaptive_instance_normalization(target_tensor, source_tensor)\n\n # Convert tensor back to image\n to_image = ToPILImage()\n result_image = to_image(result_tensor.squeeze(0).clamp_(0.0, 1.0))\n\n return result_image" }, { "identifier": "ram", "path": "ram/models/ram_lora.py", "snippet": "def ram(pretrained='', pretrained_condition='', **kwargs):\n model = RAMLora(**kwargs)\n\n if pretrained:\n if kwargs['vit'] == 'swin_b':\n model, msg = load_checkpoint_swinbase(model, pretrained, kwargs)\n elif kwargs['vit'] == 'swin_l':\n model, msg = load_checkpoint_swinlarge(model, pretrained, kwargs)\n else:\n model, msg = load_checkpoint(model, pretrained)\n print('vit:', kwargs['vit'])\n \n if pretrained_condition:\n model.load_state_dict(torch.load(pretrained_condition), strict=False)\n print(f'load lora weights from {pretrained_condition}')\n\n return model" }, { "identifier": "inference_ram", "path": "ram/inference.py", "snippet": "def inference_ram(image, model):\n\n with torch.no_grad():\n tags, tags_chinese = model.generate_tag(image)\n\n return tags[0],tags_chinese[0]" }, { "identifier": "get_transform", "path": "ram/transform.py", "snippet": "def get_transform(image_size=384):\n return Compose([\n convert_to_rgb,\n Resize((image_size, image_size)),\n ToTensor(),\n Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])" } ]
import os import sys import cv2 import glob import argparse import numpy as np import torch import torch.utils.checkpoint import torch.nn as nn import torch.nn.functional as F from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers import AutoencoderKL, DDPMScheduler from diffusers.utils import check_min_version from diffusers.utils.import_utils import is_xformers_available from transformers import CLIPTextModel, CLIPTokenizer, CLIPImageProcessor from pipelines.pipeline_seesr import StableDiffusionControlNetPipeline from utils.misc import load_dreambooth_lora from utils.wavelet_color_fix import wavelet_color_fix, adain_color_fix from ram.models.ram_lora import ram from ram import inference_ram as inference from ram import get_transform from typing import Mapping, Any from torchvision import transforms from torchvision import transforms from models.controlnet import ControlNetModel from models.unet_2d_condition import UNet2DConditionModel
16,009
weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu and cast to weight_dtype text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) unet.to(accelerator.device, dtype=weight_dtype) controlnet.to(accelerator.device, dtype=weight_dtype) return validation_pipeline def load_tag_model(args, device='cuda'): model = ram(pretrained='preset/models/ram_swin_large_14m.pth', pretrained_condition=args.ram_ft_path, image_size=384, vit='swin_l') model.eval() model.to(device) return model def get_validation_prompt(args, image, model, device='cuda'): validation_prompt = "" lq = tensor_transforms(image).unsqueeze(0).to(device) lq = ram_transforms(lq) res = inference(lq, model) ram_encoder_hidden_states = model.generate_image_embeds(lq) validation_prompt = f"{res[0]}, {args.prompt}," return validation_prompt, ram_encoder_hidden_states def main(args, enable_xformers_memory_efficient_attention=True,): txt_path = os.path.join(args.output_dir, 'txt') os.makedirs(txt_path, exist_ok=True) accelerator = Accelerator( mixed_precision=args.mixed_precision, ) # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the output folder creation if accelerator.is_main_process: os.makedirs(args.output_dir, exist_ok=True) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("SeeSR") pipeline = load_seesr_pipeline(args, accelerator, enable_xformers_memory_efficient_attention) model = load_tag_model(args, accelerator.device) if accelerator.is_main_process: generator = torch.Generator(device=accelerator.device) if args.seed is not None: generator.manual_seed(args.seed) if os.path.isdir(args.image_path): image_names = sorted(glob.glob(f'{args.image_path}/*.*')) else: image_names = [args.image_path] for image_idx, image_name in enumerate(image_names[:]): print(f'================== process {image_idx} imgs... ===================') validation_image = Image.open(image_name).convert("RGB") validation_prompt, ram_encoder_hidden_states = get_validation_prompt(args, validation_image, model) validation_prompt += args.added_prompt # clean, extremely detailed, best quality, sharp, clean negative_prompt = args.negative_prompt #dirty, messy, low quality, frames, deformed, if args.save_prompts: txt_save_path = f"{txt_path}/{os.path.basename(image_name).split('.')[0]}.txt" file = open(txt_save_path, "w") file.write(validation_prompt) file.close() print(f'{validation_prompt}') ori_width, ori_height = validation_image.size resize_flag = False rscale = args.upscale if ori_width < args.process_size//rscale or ori_height < args.process_size//rscale: scale = (args.process_size//rscale)/min(ori_width, ori_height) tmp_image = validation_image.resize((int(scale*ori_width), int(scale*ori_height))) validation_image = tmp_image resize_flag = True validation_image = validation_image.resize((validation_image.size[0]*rscale, validation_image.size[1]*rscale)) validation_image = validation_image.resize((validation_image.size[0]//8*8, validation_image.size[1]//8*8)) width, height = validation_image.size resize_flag = True # print(f'input size: {height}x{width}') for sample_idx in range(args.sample_times): os.makedirs(f'{args.output_dir}/sample{str(sample_idx).zfill(2)}/', exist_ok=True) for sample_idx in range(args.sample_times): with torch.autocast("cuda"): image = pipeline( validation_prompt, validation_image, num_inference_steps=args.num_inference_steps, generator=generator, height=height, width=width, guidance_scale=args.guidance_scale, negative_prompt=negative_prompt, conditioning_scale=args.conditioning_scale, start_point=args.start_point, ram_encoder_hidden_states=ram_encoder_hidden_states, latent_tiled_size=args.latent_tiled_size, latent_tiled_overlap=args.latent_tiled_overlap, args=args, ).images[0] if args.align_method == 'nofix': image = image else: if args.align_method == 'wavelet':
''' * SeeSR: Towards Semantics-Aware Real-World Image Super-Resolution * Modified from diffusers by Rongyuan Wu * 24/12/2023 ''' sys.path.append(os.getcwd()) logger = get_logger(__name__, log_level="INFO") tensor_transforms = transforms.Compose([ transforms.ToTensor(), ]) ram_transforms = transforms.Compose([ transforms.Resize((384, 384)), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) def load_state_dict_diffbirSwinIR(model: nn.Module, state_dict: Mapping[str, Any], strict: bool=False) -> None: state_dict = state_dict.get("state_dict", state_dict) is_model_key_starts_with_module = list(model.state_dict().keys())[0].startswith("module.") is_state_dict_key_starts_with_module = list(state_dict.keys())[0].startswith("module.") if ( is_model_key_starts_with_module and (not is_state_dict_key_starts_with_module) ): state_dict = {f"module.{key}": value for key, value in state_dict.items()} if ( (not is_model_key_starts_with_module) and is_state_dict_key_starts_with_module ): state_dict = {key[len("module."):]: value for key, value in state_dict.items()} model.load_state_dict(state_dict, strict=strict) def load_seesr_pipeline(args, accelerator, enable_xformers_memory_efficient_attention): # Load scheduler, tokenizer and models. scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_path, subfolder="scheduler") text_encoder = CLIPTextModel.from_pretrained(args.pretrained_model_path, subfolder="text_encoder") tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_path, subfolder="tokenizer") vae = AutoencoderKL.from_pretrained(args.pretrained_model_path, subfolder="vae") feature_extractor = CLIPImageProcessor.from_pretrained(f"{args.pretrained_model_path}/feature_extractor") unet = UNet2DConditionModel.from_pretrained(args.seesr_model_path, subfolder="unet") controlnet = ControlNetModel.from_pretrained(args.seesr_model_path, subfolder="controlnet") # Freeze vae and text_encoder vae.requires_grad_(False) text_encoder.requires_grad_(False) unet.requires_grad_(False) controlnet.requires_grad_(False) if enable_xformers_memory_efficient_attention: if is_xformers_available(): unet.enable_xformers_memory_efficient_attention() controlnet.enable_xformers_memory_efficient_attention() else: raise ValueError("xformers is not available. Make sure it is installed correctly") # Get the validation pipeline validation_pipeline = StableDiffusionControlNetPipeline( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, feature_extractor=feature_extractor, unet=unet, controlnet=controlnet, scheduler=scheduler, safety_checker=None, requires_safety_checker=False, ) validation_pipeline._init_tiled_vae(encoder_tile_size=args.vae_encoder_tiled_size, decoder_tile_size=args.vae_decoder_tiled_size) # For mixed precision training we cast the text_encoder and vae weights to half-precision # as these models are only used for inference, keeping weights in full precision is not required. weight_dtype = torch.float32 if accelerator.mixed_precision == "fp16": weight_dtype = torch.float16 elif accelerator.mixed_precision == "bf16": weight_dtype = torch.bfloat16 # Move text_encode and vae to gpu and cast to weight_dtype text_encoder.to(accelerator.device, dtype=weight_dtype) vae.to(accelerator.device, dtype=weight_dtype) unet.to(accelerator.device, dtype=weight_dtype) controlnet.to(accelerator.device, dtype=weight_dtype) return validation_pipeline def load_tag_model(args, device='cuda'): model = ram(pretrained='preset/models/ram_swin_large_14m.pth', pretrained_condition=args.ram_ft_path, image_size=384, vit='swin_l') model.eval() model.to(device) return model def get_validation_prompt(args, image, model, device='cuda'): validation_prompt = "" lq = tensor_transforms(image).unsqueeze(0).to(device) lq = ram_transforms(lq) res = inference(lq, model) ram_encoder_hidden_states = model.generate_image_embeds(lq) validation_prompt = f"{res[0]}, {args.prompt}," return validation_prompt, ram_encoder_hidden_states def main(args, enable_xformers_memory_efficient_attention=True,): txt_path = os.path.join(args.output_dir, 'txt') os.makedirs(txt_path, exist_ok=True) accelerator = Accelerator( mixed_precision=args.mixed_precision, ) # If passed along, set the training seed now. if args.seed is not None: set_seed(args.seed) # Handle the output folder creation if accelerator.is_main_process: os.makedirs(args.output_dir, exist_ok=True) # We need to initialize the trackers we use, and also store our configuration. # The trackers initializes automatically on the main process. if accelerator.is_main_process: accelerator.init_trackers("SeeSR") pipeline = load_seesr_pipeline(args, accelerator, enable_xformers_memory_efficient_attention) model = load_tag_model(args, accelerator.device) if accelerator.is_main_process: generator = torch.Generator(device=accelerator.device) if args.seed is not None: generator.manual_seed(args.seed) if os.path.isdir(args.image_path): image_names = sorted(glob.glob(f'{args.image_path}/*.*')) else: image_names = [args.image_path] for image_idx, image_name in enumerate(image_names[:]): print(f'================== process {image_idx} imgs... ===================') validation_image = Image.open(image_name).convert("RGB") validation_prompt, ram_encoder_hidden_states = get_validation_prompt(args, validation_image, model) validation_prompt += args.added_prompt # clean, extremely detailed, best quality, sharp, clean negative_prompt = args.negative_prompt #dirty, messy, low quality, frames, deformed, if args.save_prompts: txt_save_path = f"{txt_path}/{os.path.basename(image_name).split('.')[0]}.txt" file = open(txt_save_path, "w") file.write(validation_prompt) file.close() print(f'{validation_prompt}') ori_width, ori_height = validation_image.size resize_flag = False rscale = args.upscale if ori_width < args.process_size//rscale or ori_height < args.process_size//rscale: scale = (args.process_size//rscale)/min(ori_width, ori_height) tmp_image = validation_image.resize((int(scale*ori_width), int(scale*ori_height))) validation_image = tmp_image resize_flag = True validation_image = validation_image.resize((validation_image.size[0]*rscale, validation_image.size[1]*rscale)) validation_image = validation_image.resize((validation_image.size[0]//8*8, validation_image.size[1]//8*8)) width, height = validation_image.size resize_flag = True # print(f'input size: {height}x{width}') for sample_idx in range(args.sample_times): os.makedirs(f'{args.output_dir}/sample{str(sample_idx).zfill(2)}/', exist_ok=True) for sample_idx in range(args.sample_times): with torch.autocast("cuda"): image = pipeline( validation_prompt, validation_image, num_inference_steps=args.num_inference_steps, generator=generator, height=height, width=width, guidance_scale=args.guidance_scale, negative_prompt=negative_prompt, conditioning_scale=args.conditioning_scale, start_point=args.start_point, ram_encoder_hidden_states=ram_encoder_hidden_states, latent_tiled_size=args.latent_tiled_size, latent_tiled_overlap=args.latent_tiled_overlap, args=args, ).images[0] if args.align_method == 'nofix': image = image else: if args.align_method == 'wavelet':
image = wavelet_color_fix(image, validation_image)
2
2023-11-27 08:50:33+00:00
24k
xmu-xiaoma666/X-Dreamer
train_x_dreamer.py
[ { "identifier": "DatasetMesh", "path": "dataset/dataset_mesh.py", "snippet": "class DatasetMesh(torch.utils.data.Dataset):\n\n\n def __init__(self, glctx, FLAGS, validate=False, gif=False):\n # Init \n self.glctx = glctx\n self.FLAGS = FLAGS\n self.validate = validate\n self.gif = gif\n self.aspect = FLAGS.train_res[1] / FLAGS.train_res[0]\n self.fovy_range_min = np.deg2rad(FLAGS.fovy_range[0])\n self.fovy_range_max = np.deg2rad(FLAGS.fovy_range[1])\n self.elevation_range_min= np.deg2rad(FLAGS.elevation_range[0])\n self.elevation_range_max= np.deg2rad(FLAGS.elevation_range[1])\n self.angle_front = np.deg2rad(FLAGS.front_threshold)\n \n\n def _gif_scene(self, itr):\n fovy = np.deg2rad(45)\n proj_mtx = util.perspective(fovy, self.FLAGS.display_res[1] / self.FLAGS.display_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n ang = (itr / 100) * np.pi * 2\n rotate_x = np.deg2rad(20)\n prompt_index = 0\n mv = util.translate(0, 0, -3) @ (util.rotate_x(-rotate_x) @ util.rotate_y(ang ))\n normal_rotate = util.rotate_y_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(ang), torch.tensor([fovy])\n \n \n\n def _validate_scene(self, itr):\n fovy = np.deg2rad(45)\n proj_mtx = util.perspective(fovy, self.FLAGS.train_res[1] / self.FLAGS.train_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n ang = (itr / 4) * np.pi * 2\n rotate_x = np.random.uniform(-np.pi/4,np.pi/18)\n prompt_index = 0\n mv = util.translate(0, 0, -3) @ (util.rotate_x(rotate_x) @ util.rotate_y( ang ))\n normal_rotate = util.rotate_y_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(ang), torch.tensor([fovy])\n\n def _train_scene(self, itr):\n fovy = np.random.uniform(self.fovy_range_min, self.fovy_range_max)\n proj_mtx = util.perspective(fovy, self.FLAGS.train_res[1] / self.FLAGS.train_res[0], self.FLAGS.cam_near_far[0], self.FLAGS.cam_near_far[1])\n if self.FLAGS.gpu_number == 8: # All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n if self.FLAGS.local_rank in [0,4]:\n rotate_y = np.random.uniform(np.deg2rad(-45), np.deg2rad(45))\n elif self.FLAGS.local_rank in [1,5]:\n rotate_y = np.random.uniform(np.deg2rad(45), np.deg2rad(135))\n elif self.FLAGS.local_rank in [2,6]:#back\n rotate_y = np.random.uniform( np.deg2rad(135), np.deg2rad(225))\n elif self.FLAGS.local_rank in [3,7]:\n rotate_y = np.random.uniform(np.deg2rad(-135), np.deg2rad(-45)) \n if rotate_y > np.pi:\n rotate_y = rotate_y - np.pi*2\n elif self.FLAGS.gpu_number == 4: #All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n if self.FLAGS.local_rank in [0]:\n rotate_y = np.random.uniform(np.deg2rad(-45), np.deg2rad(45))\n elif self.FLAGS.local_rank in [1]:\n rotate_y = np.random.uniform(np.deg2rad(45), np.deg2rad(135))\n elif self.FLAGS.local_rank in [2]:#back\n rotate_y = np.random.uniform( np.deg2rad(135), np.deg2rad(225))\n elif self.FLAGS.local_rank in [3]:\n rotate_y = np.random.uniform(np.deg2rad(-135), np.deg2rad(-45)) \n if rotate_y > np.pi:\n rotate_y = rotate_y - np.pi*2\n else:\n rotate_y = np.random.uniform(np.deg2rad(-180), np.deg2rad(180)) #All the results in the paper were generated using 8 3090 GPUs. We cannot guarantee that fewer than 8 GPUs can achieve the same effect.\n \n rotate_x = -np.random.uniform(self.elevation_range_min, self.elevation_range_max)\n # angle_front = np.deg2rad(45)\n prompt_index = get_view_direction(thetas= rotate_x, phis = rotate_y, front= self.angle_front)\n cam_radius = 3\n x = np.random.uniform(-self.FLAGS.camera_random_jitter, self.FLAGS.camera_random_jitter)\n y = np.random.uniform(-self.FLAGS.camera_random_jitter, self.FLAGS.camera_random_jitter)\n mv = util.translate(x, y, -cam_radius) @ (util.rotate_x(rotate_x) @ util.rotate_y(rotate_y))\n if ((itr+1)/self.FLAGS.batch) <=self.FLAGS.coarse_iter:\n rotate_y1 = np.random.uniform(0,np.pi*2) \n rotate_x1 = np.random.uniform(-np.pi,np.pi)\n normal_rotate = util.rotate_y_1(rotate_y1 )@ util.rotate_x_1(rotate_x1) \n else:\n normal_rotate = util.rotate_y_1(0)@util.rotate_x_1(0)\n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n return mv[None, ...], mvp[None, ...], campos[None, ...], self.FLAGS.display_res, self.FLAGS.spp, normal_rotate[None,...], prompt_index, np.rad2deg(rotate_x), np.rad2deg(rotate_y), torch.tensor([fovy])\n\n def __len__(self):\n if self.gif == True:\n return 100\n else:\n return 4 if self.validate else (self.FLAGS.iter + 1) * self.FLAGS.batch\n\n def __getitem__(self, itr):\n if self.gif:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._gif_scene(itr)\n elif self.validate:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._validate_scene(itr)\n else:\n mv, mvp, campos, iter_res, iter_spp, normal_rotate, prompt_index, elev, azim, fov = self._train_scene(itr)\n\n return {\n 'mv' : mv,\n 'mvp' : mvp,\n 'campos' : campos,\n 'resolution' : iter_res,\n 'spp' : iter_spp,\n 'normal_rotate': normal_rotate,\n 'prompt_index' : prompt_index,\n 'elev': elev,\n 'azim': azim,\n 'fov': fov\n }\n def collate(self, batch):\n iter_res, iter_spp = batch[0]['resolution'], batch[0]['spp']\n return {\n 'mv' : torch.cat(list([item['mv'] for item in batch]), dim=0),\n 'mvp' : torch.cat(list([item['mvp'] for item in batch]), dim=0),\n 'campos' : torch.cat(list([item['campos'] for item in batch]), dim=0),\n 'resolution' : iter_res,\n 'spp' : iter_spp,\n 'normal_rotate' : torch.cat(list([item['normal_rotate'] for item in batch]), dim=0),\n # 'prompt_index' : torch.cat(list([item['prompt_index'] for item in batch]), dim=0),\n 'prompt_index' : np.array([item['prompt_index'] for item in batch], dtype=np.int32),\n 'elev' : np.array([item['elev'] for item in batch], dtype=np.float16),\n 'azim' : np.array([item['azim'] for item in batch], dtype=np.float16),\n 'fov' : torch.cat(list([item['fov'] for item in batch]), dim=0),\n }" }, { "identifier": "get_camera_params", "path": "dataset/dataset_mesh.py", "snippet": "def get_camera_params(resolution= 512, fov=45, elev_angle=-20, azim_angle=0):\n fovy = np.deg2rad(fov) \n elev = np.radians( elev_angle )\n azim = np.radians( azim_angle ) \n proj_mtx = util.perspective(fovy, resolution /resolution, 1, 50)\n mv = util.translate(0, 0, -3) @ (util.rotate_x(elev) @ util.rotate_y(azim))\n normal_rotate = util.rotate_y_1(-azim ) @ util.rotate_x_1(-elev) \n # nomral_rotate = util.rotate_y_1(0) @ util.rotate_x_1(0) \n mvp = proj_mtx @ mv\n campos = torch.linalg.inv(mv)[:3, 3]\n bkgs = torch.ones(1, resolution, resolution, 3, dtype=torch.float32, device='cuda')\n return {\n 'mvp' : mvp[None, ...].cuda(),\n 'mv' : mv[None, ...].cuda(),\n 'campos' : campos[None, ...].cuda(),\n 'resolution' : [resolution, resolution], \n 'spp' : 1,\n 'background' : bkgs,\n 'normal_rotate' : normal_rotate[None,...].cuda(),\n 'elev_angle' : torch.tensor(elev_angle).cuda(),\n 'azim_angle' : torch.tensor(azim_angle).cuda(),\n 'fov' : torch.tensor(fovy).cuda(),\n }" }, { "identifier": "DMTetGeometry", "path": "geometry/dmtet_x_dreamer.py", "snippet": "class DMTetGeometry(torch.nn.Module):\n def __init__(self, grid_res, scale, FLAGS):\n super(DMTetGeometry, self).__init__()\n\n self.FLAGS = FLAGS\n self.grid_res = grid_res\n self.marching_tets = DMTet()\n \n tets = np.load('data/tets/{}_tets.npz'.format(self.grid_res))\n self.verts = torch.tensor(tets['vertices'], dtype=torch.float32, device='cuda') * scale\n print(\"tet grid min/max\", torch.min(self.verts).item(), torch.max(self.verts).item())\n self.decoder = Decoder(multires=0 , AABB= self.getAABB(), mesh_scale= scale)\n self.indices = torch.tensor(tets['indices'], dtype=torch.long, device='cuda')\n self.generate_edges()\n self.pos_encoder = CameraEncoder().to(self.verts.device)\n\n def generate_edges(self):\n with torch.no_grad():\n edges = torch.tensor([0,1,0,2,0,3,1,2,1,3,2,3], dtype = torch.long, device = \"cuda\")\n all_edges = self.indices[:,edges].reshape(-1,2) \n all_edges_sorted = torch.sort(all_edges, dim=1)[0]\n self.all_edges = torch.unique(all_edges_sorted, dim=0)\n\n @torch.no_grad()\n def getAABB(self):\n return torch.min(self.verts, dim=0).values, torch.max(self.verts, dim=0).values\n\n def getMesh(self, material):\n pred= self.decoder(self.verts)\n \n self.sdf , self.deform = pred[:, 0], pred[:, 1:] \n v_deformed = self.verts + 1 / (self.grid_res ) * torch.tanh(self.deform)\n verts, faces = self.marching_tets(v_deformed, self.sdf, self.indices)\n \n imesh = mesh.Mesh(verts, faces, material=material)\n imesh = mesh.auto_normals(imesh)\n return imesh\n\n def render(self, glctx, target, lgt, opt_material, bsdf=None, if_normal=False, mode = 'geometry_modeling', if_flip_the_normal = False, if_use_bump = False):\n opt_mesh = self.getMesh(opt_material) \n return render.render_mesh(glctx, \n opt_mesh, \n target['mvp'], \n target['campos'], \n lgt, \n target['resolution'], \n spp=target['spp'], \n msaa= True,\n background= target['background'],\n bsdf= bsdf,\n if_normal= if_normal,\n normal_rotate= target['normal_rotate'],\n mode = mode,\n if_flip_the_normal = if_flip_the_normal,\n if_use_bump = if_use_bump\n )\n\n \n def tick(self, glctx, target, lgt, opt_material, iteration, if_normal, guidance, mode, if_flip_the_normal, if_use_bump):\n # ==============================================================================================\n # Render optimizable object with identical conditions\n # ==============================================================================================\n buffers= self.render(glctx, target, lgt, opt_material, if_normal= if_normal, mode = mode, if_flip_the_normal = if_flip_the_normal, if_use_bump = if_use_bump)\n if self.FLAGS.add_directional_text:\n text_embeddings = torch.cat([guidance.uncond_z[target['prompt_index']], guidance.text_z[target['prompt_index']]]) # [B*2, 77, 1024]\n indexs = torch.cat([guidance.uncond_index[target['prompt_index']], guidance.index[target['prompt_index']]]) # [B*2, 77, 1024]\n else:\n text_embeddings = torch.cat([guidance.uncond_z, guidance.text_z]) # [B * 2, 77, 1024]\n indexs = torch.cat([guidance.uncond_index, guidance.index]) # [B*2, 77, 1024]\n\n \n if iteration <=self.FLAGS.coarse_iter:\n t = torch.randint( guidance.min_step_early, guidance.max_step_early + 1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n pred_rgb_512 = buffers['shaded'][..., 0:4].permute(0, 3, 1, 2).contiguous() # [B, 4, 64, 64]\n latents = F.interpolate(pred_rgb_512, (64, 64), mode='bilinear', align_corners=False)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n \n else:\n t = torch.randint(guidance.min_step_late, guidance.max_step_late + 1, [self.FLAGS.batch], dtype=torch.long, device='cuda')\n srgb = buffers['shaded'][...,0:3] #* buffers['shaded'][..., 3:4] # normal * mask\n # \n pred_rgb_512 = srgb.permute(0, 3, 1, 2).contiguous() # [B, 3, 512, 512]\n latents = guidance.encode_imgs(pred_rgb_512)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n\n ### calculate camera pos feature\n came_pos = torch.cat([target['campos'],torch.from_numpy(target['elev']).unsqueeze(-1).cuda(),torch.from_numpy(target['azim']).cuda().unsqueeze(-1),target['fov'].unsqueeze(-1)],dim=-1)\n came_pos = torch.cat([came_pos,came_pos],dim=0) #bs*2, 5\n came_pos = normalize_camera(came_pos,self.FLAGS)\n came_posfeat = self.pos_encoder(came_pos)\n\n # add noise\n noise = torch.randn_like(latents)\n latents_noisy = guidance.scheduler.add_noise(latents, noise, t)\n # pred noise\n latent_model_input = torch.cat([latents_noisy] * 2)\n tt = torch.cat([t] * 2)\n noise_pred, attention_map = guidance.unet(latent_model_input, tt, encoder_hidden_states=text_embeddings, index=indexs, came_posfeat=came_posfeat)\n noise_pred = noise_pred.sample\n\n attention_map[0] = attention_map[0].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n attention_map[1] = attention_map[1].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[2] = attention_map[2].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[3] = attention_map[3].reshape(self.FLAGS.batch*2, 8 , 8 ).contiguous()\n attention_map[4] = attention_map[4].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[5] = attention_map[5].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[6] = attention_map[6].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred =noise_pred_uncond + guidance.guidance_weight * (noise_pred_text - noise_pred_uncond) # [B, 4, 64, 64]\n if iteration <= self.FLAGS.coarse_iter:\n w = (1 - guidance.alphas[t]) # [B]\n else:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n w = w[:, None, None, None] # [B, 1, 1, 1]\n grad = w * (noise_pred - noise ) #*w1\n grad = torch.nan_to_num(grad)\n \n sds_loss = SpecifyGradient.apply(latents, grad) \n img_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n reg_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n\n attention_loss = 0\n mask_sizes = [(64, 64), (32,32), (16,16), (8,8), (16,16), (32,32), (64,64)]\n for i in range(7):\n _, attention_map_text = attention_map[i].chunk(2)\n if(self.FLAGS.batch==1):\n mask2 = F.interpolate(mask2.unsqueeze(0).unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n else:\n mask2 = F.interpolate(mask2.unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n attention_map_text = (attention_map_text - attention_map_text.min())/(attention_map_text.max() - attention_map_text.min()+1e-6)\n attention_map_text = F.interpolate(attention_map_text.unsqueeze(0), size=mask_sizes[i], mode='bilinear', align_corners=False).squeeze()\n attention_loss = 0.1*F.l1_loss(mask2.float(), attention_map_text.float(), reduction=\"mean\") #0.1 1 10\n attention_loss = attention_loss/7\n \n return sds_loss, img_loss, reg_loss, attention_loss" }, { "identifier": "DLMesh", "path": "geometry/dlmesh_x_dreamer.py", "snippet": "class DLMesh(torch.nn.Module):\n def __init__(self, initial_guess, FLAGS):\n super(DLMesh, self).__init__()\n self.FLAGS = FLAGS\n self.initial_guess = initial_guess\n self.mesh = initial_guess.clone()\n self.pos_encoder = CameraEncoder().cuda()\n print(\"Base mesh has %d triangles and %d vertices.\" % (self.mesh.t_pos_idx.shape[0], self.mesh.v_pos.shape[0]))\n \n @torch.no_grad()\n def getAABB(self):\n return mesh.aabb(self.mesh)\n\n def getMesh(self, material):\n self.mesh.material = material\n\n imesh = mesh.Mesh(base=self.mesh)\n # Compute normals and tangent space\n imesh = mesh.auto_normals(imesh)\n imesh = mesh.compute_tangents(imesh)\n return imesh\n\n def render(self, glctx, target, lgt, opt_material, bsdf=None,if_normal=False, mode = 'appearance_modeling', if_flip_the_normal = False, if_use_bump = False):\n opt_mesh = self.getMesh(opt_material)\n return render.render_mesh(glctx, \n opt_mesh,\n target['mvp'],\n target['campos'],\n lgt,\n target['resolution'], \n spp=target['spp'], \n msaa=True,\n background= target['background'] ,\n bsdf= bsdf,\n if_normal=if_normal,\n normal_rotate=target['normal_rotate'], \n mode = mode,\n if_flip_the_normal = if_flip_the_normal,\n if_use_bump = if_use_bump\n )\n\n def tick(self, glctx, target, lgt, opt_material, iteration, if_normal, guidance, mode, if_flip_the_normal, if_use_bump):\n # ==============================================================================================\n # Render optimizable object with identical conditions\n # ==============================================================================================\n buffers= self.render(glctx, target, lgt, opt_material, if_normal = if_normal, mode = mode, if_flip_the_normal = if_flip_the_normal, if_use_bump = if_use_bump)\n if self.FLAGS.add_directional_text:\n text_embeddings = torch.cat([guidance.uncond_z[target['prompt_index']], guidance.text_z[target['prompt_index']]])\n indexs = torch.cat([guidance.uncond_index[target['prompt_index']], guidance.index[target['prompt_index']]]) # [B*2, 77, 1024]\n else:\n text_embeddings = torch.cat([guidance.uncond_z, guidance.text_z])\n indexs = torch.cat([guidance.uncond_index, guidance.index]) # [B*2, 77, 1024]\n\n\n if iteration <= self.FLAGS.coarse_iter:\n srgb = buffers['shaded'][...,0:3]\n srgb = util.rgb_to_srgb(srgb)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n t = torch.randint( guidance.min_step_early, guidance.max_step_early+1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n else:\n srgb = buffers['shaded'][...,0:3]\n srgb = util.rgb_to_srgb(srgb)\n mask = (buffers['shaded'][..., 3:4]).permute(0, 3, 1, 2).contiguous()\n mask2 = mask.squeeze()\n t = torch.randint( guidance.min_step_late, guidance.max_step_late+1, [self.FLAGS.batch], dtype=torch.long, device='cuda') # [B]\n\n pred_rgb_512 = srgb.permute(0, 3, 1, 2).contiguous() # [1, 3, H, W]\n latents = guidance.encode_imgs(pred_rgb_512)\n \n ### calculate camera pos feature\n came_pos = torch.cat([target['campos'],torch.from_numpy(target['elev']).unsqueeze(-1).cuda(),torch.from_numpy(target['azim']).cuda().unsqueeze(-1),target['fov'].unsqueeze(-1)],dim=-1)\n came_pos = torch.cat([came_pos,came_pos],dim=0) #bs*2, 5\n came_pos = normalize_camera(came_pos,self.FLAGS)\n came_posfeat = self.pos_encoder(came_pos)\n\n\n # add noise\n noise = torch.randn_like(latents)\n latents_noisy = guidance.scheduler.add_noise(latents, noise, t)\n # pred noise\n latent_model_input = torch.cat([latents_noisy] * 2)\n tt = torch.cat([t] * 2)\n noise_pred, attention_map = guidance.unet(latent_model_input, tt, encoder_hidden_states= text_embeddings, index=indexs, came_posfeat=came_posfeat)#.sample######################\n noise_pred = noise_pred.sample\n\n attention_map[0] = attention_map[0].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n attention_map[1] = attention_map[1].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[2] = attention_map[2].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[3] = attention_map[3].reshape(self.FLAGS.batch*2, 8 , 8 ).contiguous()\n attention_map[4] = attention_map[4].reshape(self.FLAGS.batch*2, 16, 16).contiguous()\n attention_map[5] = attention_map[5].reshape(self.FLAGS.batch*2, 32, 32).contiguous()\n attention_map[6] = attention_map[6].reshape(self.FLAGS.batch*2, 64, 64).contiguous()\n\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance.guidance_weight * (noise_pred_text - noise_pred_uncond)\n \n if guidance.sds_weight_strategy == 0:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n elif guidance.sds_weight_strategy == 1:\n w = 1 / (1 - guidance.alphas[t])\n elif guidance.sds_weight_strategy == 2:\n if iteration <= self.FLAGS.coarse_iter:\n w = guidance.alphas[t] ** 0.5 * (1 - guidance.alphas[t])\n else:\n w = 1 / (1 - guidance.alphas[t])\n w = w[:, None, None, None] # [B, 1, 1, 1]\n grad = w* (noise_pred -noise) \n grad = torch.nan_to_num(grad)\n sds_loss = SpecifyGradient.apply(latents, grad) \n img_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n reg_loss = torch.tensor([0], dtype=torch.float32, device=\"cuda\")\n \n attention_loss = 0\n mask_sizes = [(64, 64), (32,32), (16,16), (8,8), (16,16), (32,32), (64,64)]\n for i in range(7):\n _, attention_map_text = attention_map[i].chunk(2)\n if(self.FLAGS.batch==1):\n mask2 = F.interpolate(mask2.unsqueeze(0).unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n else:\n mask2 = F.interpolate(mask2.unsqueeze(0), mask_sizes[i], mode='bilinear').squeeze()\n attention_map_text = (attention_map_text - attention_map_text.min())/(attention_map_text.max() - attention_map_text.min()+1e-6)\n attention_map_text = F.interpolate(attention_map_text.unsqueeze(0), size=mask2.shape, mode='bilinear', align_corners=False).squeeze()\n attention_loss = 0.1*F.l1_loss(mask2.float(), attention_map_text.float(), reduction=\"mean\") #0.1 1 10\n attention_loss = attention_loss/7\n \n return sds_loss, img_loss, reg_loss, attention_loss" }, { "identifier": "obj", "path": "render/obj.py", "snippet": "def _find_mat(materials, name):\ndef load_obj(filename, clear_ks=True, mtl_override=None):\ndef write_obj(folder, mesh, save_material=True):" }, { "identifier": "material", "path": "render/material.py", "snippet": "class Material(torch.nn.Module):\n def __init__(self, mat_dict):\n def __contains__(self, key):\n def __getitem__(self, key):\n def __setitem__(self, key, val):\n def __delitem__(self, key):\n def keys(self):\ndef load_mtl(fn, clear_ks=True):\ndef save_mtl(fn, material):\ndef _upscale_replicate(x, full_res):\ndef merge_materials(materials, texcoords, tfaces, mfaces):" }, { "identifier": "util", "path": "render/util.py", "snippet": "def dot(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\ndef reflect(x: torch.Tensor, n: torch.Tensor) -> torch.Tensor:\ndef length(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef safe_normalize(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef to_hvec(x: torch.Tensor, w: float) -> torch.Tensor:\ndef _rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef _srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef reinhard(f: torch.Tensor) -> torch.Tensor:\ndef mse_to_psnr(mse):\ndef psnr_to_mse(psnr):\ndef get_miplevels(texture: np.ndarray) -> float:\ndef tex_2d(tex_map : torch.Tensor, coords : torch.Tensor, filter='nearest') -> torch.Tensor:\ndef cube_to_dir(s, x, y):\ndef latlong_to_cubemap(latlong_map, res):\ndef cubemap_to_latlong(cubemap, res):\ndef scale_img_hwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef scale_img_nhwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef avg_pool_nhwc(x : torch.Tensor, size) -> torch.Tensor:\ndef segment_sum(data: torch.Tensor, segment_ids: torch.Tensor) -> torch.Tensor:\ndef fovx_to_fovy(fovx, aspect):\ndef focal_length_to_fovy(focal_length, sensor_height):\ndef perspective(fovy=0.7854, aspect=1.0, n=0.1, f= 1000.0, device=None):\ndef perspective_offcenter(fovy, fraction, rx, ry, aspect=1.0, n=0.1, f=1000.0, device=None):\ndef translate(x, y, z, device=None):\ndef rotate_x(a, device=None):\ndef rotate_x_1(a, device=None):\ndef rotate_y(a, device=None):\ndef rotate_y_1(a, device=None):\ndef rotate_y_2(a, device=None):\ndef rotate_x_2(a, device=None):\ndef scale(s, device=None):\ndef lookAt(eye, at, up):\ndef random_rotation_translation(t, device=None):\ndef random_rotation(device=None):\ndef lines_focal(o, d):\ndef cosine_sample(N, size=None):\ndef bilinear_downsample(x : torch.tensor) -> torch.Tensor:\ndef bilinear_downsample(x : torch.tensor, spp) -> torch.Tensor:\ndef init_glfw():\ndef save_image(fn, x : np.ndarray):\ndef save_image_raw(fn, x : np.ndarray):\ndef load_image_raw(fn) -> np.ndarray:\ndef load_image(fn) -> np.ndarray:\ndef time_to_text(x):\ndef checkerboard(res, checker_size) -> np.ndarray:\ndef get_random_bg(h, w):\n R, L = aspect*y, -aspect*y\n T, B = y, -y\n I = torch.eye(3, dtype=o.dtype, device=o.device)\n S = torch.sum(d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...], dim=0)\n C = torch.sum((d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...]) @ o[..., None], dim=0).squeeze(1)\n N = N/torch.linalg.norm(N)" }, { "identifier": "mesh", "path": "render/mesh.py", "snippet": "class Mesh:\n def __init__(self, v_pos=None, t_pos_idx=None, v_nrm=None, t_nrm_idx=None, v_tex=None, t_tex_idx=None, v_tng=None, t_tng_idx=None, material=None, base=None):\n def copy_none(self, other):\n def clone(self):\ndef load_mesh(filename, mtl_override=None):\ndef aabb(mesh):\ndef compute_edges(attr_idx, return_inverse=False):\ndef compute_edge_to_face_mapping(attr_idx, return_inverse=False):\ndef unit_size(mesh):\ndef center_by_reference(base_mesh, ref_aabb, scale):\ndef auto_normals(imesh):\ndef compute_tangents(imesh):" }, { "identifier": "texture", "path": "render/texture.py", "snippet": "class texture2d_mip(torch.autograd.Function):\nclass Texture2D(torch.nn.Module):\n def forward(ctx, texture):\n def backward(ctx, dout):\n def __init__(self, init, min_max=None):\n def sample(self, texc, texc_deriv, filter_mode='linear-mipmap-linear'):\n def getRes(self):\n def getChannels(self):\n def getMips(self):\n def clamp_(self):\n def normalize_(self):\ndef create_trainable(init, res=None, auto_mipmaps=True, min_max=None):\ndef srgb_to_rgb(texture):\ndef rgb_to_srgb(texture):\ndef _load_mip2D(fn, lambda_fn=None, channels=None):\ndef load_texture2D(fn, lambda_fn=None, channels=None):\ndef _save_mip2D(fn, mip, mipidx, lambda_fn):\ndef save_texture2D(fn, tex, lambda_fn=None):" }, { "identifier": "mlptexture", "path": "render/mlptexture.py", "snippet": "class _MLP(torch.nn.Module):\nclass MLPTexture3D(torch.nn.Module):\n def __init__(self, cfg, loss_scale=1.0):\n def forward(self, x):\n def _init_weights(m):\n def __init__(self, AABB, channels = 3, internal_dims = 32, hidden = 1, min_max = None):\n def sample(self, texc):\n def clamp_(self):\n def cleanup(self):" }, { "identifier": "light", "path": "render/light.py", "snippet": "class cubemap_mip(torch.autograd.Function):\nclass EnvironmentLight(torch.nn.Module):\n def forward(ctx, cubemap):\n def backward(ctx, dout):\n def __init__(self, base):\n def xfm(self, mtx):\n def clone(self):\n def clamp_(self, min=None, max=None):\n def get_mip(self, roughness):\n def build_mips(self, cutoff=0.99):\n def regularizer(self):\n def shade(self, gb_pos, gb_normal, kd, ks, view_pos, specular=True):\ndef _load_env_hdr(fn, scale=1.0):\ndef load_env(fn, scale=1.0):\ndef save_env_map(fn, light):\ndef create_trainable_env_rnd(base_res, scale=0.5, bias=0.25):\n LIGHT_MIN_RES = 16\n MIN_ROUGHNESS = 0.08\n MAX_ROUGHNESS = 0.5" }, { "identifier": "render", "path": "render/render.py", "snippet": "def interpolate(attr, rast, attr_idx, rast_db=None):\ndef shade(\n gb_pos,\n gb_geometric_normal,\n gb_normal,\n gb_tangent,\n gb_texc,\n gb_texc_deriv,\n view_pos,\n lgt,\n material,\n bsdf,\n if_normal,\n normal_rotate,\n mode,\n if_flip_the_normal,\n if_use_bump\n ):\ndef render_layer(\n rast,\n rast_deriv,\n mesh,\n view_pos,\n lgt,\n resolution,\n spp,\n msaa,\n bsdf,\n if_normal,\n normal_rotate,\n mode,\n if_flip_the_normal,\n if_use_bump\n ):\ndef render_mesh(\n ctx,\n mesh,\n mtx_in,\n view_pos,\n lgt,\n resolution,\n spp = 1,\n num_layers = 1,\n msaa = False,\n background = None, \n bsdf = None,\n if_normal = False,\n normal_rotate = None,\n mode = 'geometry_modeling',\n if_flip_the_normal = False,\n if_use_bump = False\n ):\n def prepare_input_vector(x):\n def composite_buffer(key, layers, background, antialias):\ndef render_uv(ctx, mesh, resolution, mlp_texture):\ndef uv_padding(image, hole_mask, padding = 2, uv_padding_block = 4):\ndef render_uv1(ctx, mesh, resolution, mlp_texture, uv_padding_block):" }, { "identifier": "StableDiffusion", "path": "sd_cglora.py", "snippet": "class StableDiffusion(nn.Module):\n def __init__(self, \n device, \n mode='geometry', \n text= '', \n add_directional_text= False, \n batch = 1, \n guidance_weight = 100, \n sds_weight_strategy = 0,\n early_time_step_range = [0.02, 0.5],\n late_time_step_range = [0.02, 0.5]):\n super().__init__()\n\n self.device = device\n self.mode = mode\n self.text= text\n self.add_directional_text = add_directional_text\n self.batch = batch \n print(f'[INFO] loading stable diffusion...')\n model_key = \"stabilityai/stable-diffusion-2-1-base\"\n self.vae = AutoencoderKL.from_pretrained(model_key, subfolder=\"vae\",torch_dtype=torch.float16).to(self.device)\n self.tokenizer = CLIPTokenizer.from_pretrained(model_key, subfolder=\"tokenizer\",torch_dtype=torch.float16)\n self.text_encoder = CLIPTextModel.from_pretrained(model_key, subfolder=\"text_encoder\",torch_dtype=torch.float16).to(self.device)\n self.unet = UNet2DConditionModel.from_pretrained(model_key, subfolder=\"unet\",torch_dtype=torch.float16).to(self.device)\n if is_xformers_available():\n self.unet.enable_xformers_memory_efficient_attention()\n self.negative_text = ''\n if add_directional_text:\n self.text_z = []\n self.uncond_z = []\n self.index = []\n self.uncond_index = []\n for d in ['front', 'side', 'back', 'side']:\n text = f\"{self.text}, {d} view\"\n # text = f\"{d} view of {self.text}\"\n negative_text = f\"{self.negative_text}\"\n # if d == 'back': negative_text += \"face\"\n text_z, index = self.get_text_embeds([text], batch = 1)\n uncond_z, uncond_index =self.get_uncond_embeds([negative_text], batch = 1)\n self.text_z.append(text_z)\n self.uncond_z.append(uncond_z)\n self.index.append(index)\n self.uncond_index.append(uncond_index)\n self.text_z = torch.cat(self.text_z)\n self.uncond_z = torch.cat(self.uncond_z)\n self.index = torch.cat(self.index)\n self.uncond_index = torch.cat(self.uncond_index)\n else: \n self.text_z, self.index = self.get_text_embeds([self.text], batch = self.batch)\n self.uncond_z =self.get_uncond_embeds([self.negative_text], batch = self.batch)\n # del self.text_encoder\n self.scheduler = DPMSolverMultistepScheduler.from_pretrained(model_key, subfolder=\"scheduler\", torch_dtype=torch.float16)\n self.num_train_timesteps = self.scheduler.config.num_train_timesteps\n self.min_step_early = int(self.num_train_timesteps * early_time_step_range[0])\n self.max_step_early = int(self.num_train_timesteps * early_time_step_range[1])\n self.min_step_late = int(self.num_train_timesteps * late_time_step_range[0])\n self.max_step_late = int(self.num_train_timesteps * late_time_step_range[1])\n self.alphas = self.scheduler.alphas_cumprod.to(self.device) # for convenience\n self.guidance_weight = guidance_weight\n self.sds_weight_strategy = sds_weight_strategy\n print(f'[INFO] loaded stable diffusion!')\n\n for p in self.parameters():\n p.requires_grad_(False)\n self.unet_lora_params, self.names = inject_trainable_cglora(self.unet) # This will\n\n\n def get_text_embeds_global(self, prompt, batch=1):\n text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt')\n with torch.no_grad():\n text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]\n if batch > 1:\n text_embeddings = text_embeddings.repeat(batch, 1, 1)\n \n global_embedding = text_embeddings[:,text_input['input_ids'].argmax(dim=-1),:].squeeze()\n \n return global_embedding\n\n\n def get_text_embeds(self, prompt, batch=1):\n text_input = self.tokenizer(prompt, padding='max_length', max_length=self.tokenizer.model_max_length, truncation=True, return_tensors='pt')\n with torch.no_grad():\n text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]\n if batch > 1:\n text_embeddings = text_embeddings.repeat(batch, 1, 1)\n ###################################################################\n index = text_input['input_ids'].argmax(dim=-1)\n #global_embedding = text_embeddings[:, index, :].squeeze()\n ##################################################################\n \n return text_embeddings, index\n \n def get_uncond_embeds(self, negative_prompt, batch):\n uncond_input = self.tokenizer(negative_prompt, padding='max_length', max_length=self.tokenizer.model_max_length, return_tensors='pt')\n with torch.no_grad():\n uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]\n \n if batch > 1:\n uncond_embeddings = uncond_embeddings.repeat(batch, 1, 1)\n ###################################################################\n index = uncond_input['input_ids'].argmax(dim=-1)\n # global_embedding = uncond_embeddings[:, index, :].squeeze()\n ##################################################################\n return uncond_embeddings,index\n\n def encode_imgs(self, imgs):\n # imgs: [B, 3, H, W]\n if self.mode == 'appearance_modeling':\n \n imgs = 2 * imgs - 1\n\n posterior = self.vae.encode(imgs).latent_dist\n latents = posterior.sample() * 0.18215\n\n return latents" }, { "identifier": "util", "path": "render/util.py", "snippet": "def dot(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\ndef reflect(x: torch.Tensor, n: torch.Tensor) -> torch.Tensor:\ndef length(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef safe_normalize(x: torch.Tensor, eps: float =1e-20) -> torch.Tensor:\ndef to_hvec(x: torch.Tensor, w: float) -> torch.Tensor:\ndef _rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef rgb_to_srgb(f: torch.Tensor) -> torch.Tensor:\ndef _srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef srgb_to_rgb(f: torch.Tensor) -> torch.Tensor:\ndef reinhard(f: torch.Tensor) -> torch.Tensor:\ndef mse_to_psnr(mse):\ndef psnr_to_mse(psnr):\ndef get_miplevels(texture: np.ndarray) -> float:\ndef tex_2d(tex_map : torch.Tensor, coords : torch.Tensor, filter='nearest') -> torch.Tensor:\ndef cube_to_dir(s, x, y):\ndef latlong_to_cubemap(latlong_map, res):\ndef cubemap_to_latlong(cubemap, res):\ndef scale_img_hwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef scale_img_nhwc(x : torch.Tensor, size, mag='bilinear', min='area') -> torch.Tensor:\ndef avg_pool_nhwc(x : torch.Tensor, size) -> torch.Tensor:\ndef segment_sum(data: torch.Tensor, segment_ids: torch.Tensor) -> torch.Tensor:\ndef fovx_to_fovy(fovx, aspect):\ndef focal_length_to_fovy(focal_length, sensor_height):\ndef perspective(fovy=0.7854, aspect=1.0, n=0.1, f= 1000.0, device=None):\ndef perspective_offcenter(fovy, fraction, rx, ry, aspect=1.0, n=0.1, f=1000.0, device=None):\ndef translate(x, y, z, device=None):\ndef rotate_x(a, device=None):\ndef rotate_x_1(a, device=None):\ndef rotate_y(a, device=None):\ndef rotate_y_1(a, device=None):\ndef rotate_y_2(a, device=None):\ndef rotate_x_2(a, device=None):\ndef scale(s, device=None):\ndef lookAt(eye, at, up):\ndef random_rotation_translation(t, device=None):\ndef random_rotation(device=None):\ndef lines_focal(o, d):\ndef cosine_sample(N, size=None):\ndef bilinear_downsample(x : torch.tensor) -> torch.Tensor:\ndef bilinear_downsample(x : torch.tensor, spp) -> torch.Tensor:\ndef init_glfw():\ndef save_image(fn, x : np.ndarray):\ndef save_image_raw(fn, x : np.ndarray):\ndef load_image_raw(fn) -> np.ndarray:\ndef load_image(fn) -> np.ndarray:\ndef time_to_text(x):\ndef checkerboard(res, checker_size) -> np.ndarray:\ndef get_random_bg(h, w):\n R, L = aspect*y, -aspect*y\n T, B = y, -y\n I = torch.eye(3, dtype=o.dtype, device=o.device)\n S = torch.sum(d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...], dim=0)\n C = torch.sum((d[..., None] @ torch.transpose(d[..., None], 1, 2) - I[None, ...]) @ o[..., None], dim=0).squeeze(1)\n N = N/torch.linalg.norm(N)" }, { "identifier": "Video", "path": "render/video.py", "snippet": "class Video():\n def __init__(self, path, name='video_log.mp4', mode='I', fps=30, codec='libx264', bitrate='16M') -> None:\n \n if path[-1] != \"/\":\n path += \"/\"\n \n self.writer = imageio.get_writer(path+name, mode=mode, fps=fps, codec=codec, bitrate=bitrate)\n \n def ready_image(self, image, write_video=True):\n # assuming channels last - as renderer returns it\n if len(image.shape) == 4: \n image = image.squeeze(0)[..., :3].detach().cpu().numpy()\n else:\n image = image[..., :3].detach().cpu().numpy()\n\n image = np.clip(np.rint(image*255.0), 0, 255).astype(np.uint8)\n\n if write_video:\n self.writer.append_data(image)\n\n return image\n\n def close(self):\n self.writer.close()" } ]
import os import time import argparse import json import math import numpy as np import torch import nvdiffrast.torch as dr import itertools import xatlas import open3d as o3d import random import imageio import os.path as osp import pickle from dataset.dataset_mesh import DatasetMesh from dataset.dataset_mesh import get_camera_params from geometry.dmtet_x_dreamer import DMTetGeometry from geometry.dlmesh_x_dreamer import DLMesh from render import obj from render import material from render import util from render import mesh from render import texture from render import mlptexture from render import light from render import render from sd_cglora import StableDiffusion from tqdm import tqdm from render import util from render.video import Video
15,112
FLAGS.gpu_number = 1 FLAGS.sdf_init_shape_scale=[1.0, 1.0, 1.0] FLAGS.multi_gpu = "WORLD_SIZE" in os.environ and int(os.environ["WORLD_SIZE"]) > 1 if FLAGS.multi_gpu: FLAGS.gpu_number = int(os.environ["WORLD_SIZE"]) FLAGS.local_rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group(backend="nccl", world_size = FLAGS.gpu_number, rank = FLAGS.local_rank) torch.cuda.set_device(FLAGS.local_rank) if FLAGS.config is not None: data = json.load(open(FLAGS.config, 'r')) for key in data: FLAGS.__dict__[key] = data[key] if FLAGS.display_res is None: FLAGS.display_res = FLAGS.train_res if FLAGS.local_rank == 0: print("Config / Flags:") print("---------") for key in FLAGS.__dict__.keys(): print(key, FLAGS.__dict__[key]) print("---------") seed_everything(FLAGS.seed, FLAGS.local_rank) os.makedirs(FLAGS.out_dir, exist_ok=True) glctx = dr.RasterizeCudaContext() # ============================================================================================== # Create data pipeline # ============================================================================================== dataset_train = DatasetMesh(glctx, FLAGS, validate=False) dataset_validate = DatasetMesh(glctx, FLAGS, validate=True) dataset_gif = DatasetMesh(glctx, FLAGS, gif=True) # ============================================================================================== # Create env light with trainable parameters # ============================================================================================== if FLAGS.mode == 'appearance_modeling' and FLAGS.base_mesh is not None: if FLAGS.learn_light: lgt = light.create_trainable_env_rnd(512, scale=0.0, bias=1) else: lgt = light.load_env(FLAGS.envmap, scale=FLAGS.env_scale) else: lgt = None if FLAGS.sdf_init_shape in ['ellipsoid', 'cylinder', 'custom_mesh'] and FLAGS.mode == 'geometry_modeling': if FLAGS.sdf_init_shape == 'ellipsoid': init_shape = o3d.geometry.TriangleMesh.create_sphere(1) elif FLAGS.sdf_init_shape == 'cylinder': init_shape = o3d.geometry.TriangleMesh.create_cylinder(radius=0.75, height=1.2, resolution=20, split=4, create_uv_map=False) elif FLAGS.sdf_init_shape == 'custom_mesh': if FLAGS.base_mesh: init_shape = get_normalize_mesh(FLAGS.base_mesh) else: assert False, "[Error] The path of custom mesh is invalid ! (geometry modeling)" else: assert False, "Invalid init type" vertices = np.asarray(init_shape.vertices) vertices[...,0]=vertices[...,0] * FLAGS.sdf_init_shape_scale[0] vertices[...,1]=vertices[...,1] * FLAGS.sdf_init_shape_scale[1] vertices[...,2]=vertices[...,2] * FLAGS.sdf_init_shape_scale[2] vertices = vertices @ util.rotate_x_2(np.deg2rad(FLAGS.sdf_init_shape_rotate_x)) vertices[...,1]=vertices[...,1] + FLAGS.translation_y init_shape.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) points_surface = np.asarray(init_shape.sample_points_poisson_disk(5000).points) init_shape = o3d.t.geometry.TriangleMesh.from_legacy(init_shape) scene = o3d.t.geometry.RaycastingScene() scene.add_triangles(init_shape) scene_and_vertices = [scene, points_surface] guidance = StableDiffusion(device = 'cuda', mode = FLAGS.mode, text = FLAGS.text, add_directional_text = FLAGS.add_directional_text, batch = FLAGS.batch, guidance_weight = FLAGS.guidance_weight, sds_weight_strategy = FLAGS.sds_weight_strategy, early_time_step_range = FLAGS.early_time_step_range, late_time_step_range= FLAGS.late_time_step_range) if FLAGS.mode == 'geometry_modeling' : geometry = DMTetGeometry(FLAGS.dmtet_grid, FLAGS.mesh_scale, FLAGS) mat = initial_guness_material(geometry, True, FLAGS) # Run optimization geometry, mat = optimize_mesh(glctx, geometry, mat, lgt, dataset_train, dataset_validate, FLAGS, optimize_light=FLAGS.learn_light,optimize_geometry= not FLAGS.lock_pos, guidance= guidance, scene_and_vertices= scene_and_vertices) if FLAGS.local_rank == 0 and FLAGS.validate: validate(glctx, geometry, mat, lgt, dataset_gif, os.path.join(FLAGS.out_dir, "validate"), FLAGS) # Create textured mesh from result if FLAGS.local_rank == 0: base_mesh = xatlas_uvmap(glctx, geometry, mat, FLAGS) # # Free temporaries / cached memory torch.cuda.empty_cache() mat['kd_ks_normal'].cleanup() del mat['kd_ks_normal'] if FLAGS.local_rank == 0: # Dump mesh for debugging. os.makedirs(os.path.join(FLAGS.out_dir, "dmtet_mesh"), exist_ok=True) obj.write_obj(os.path.join(FLAGS.out_dir, "dmtet_mesh/"), base_mesh) elif FLAGS.mode == 'appearance_modeling': # ============================================================================================== # Train with fixed topology (mesh) # ============================================================================================== if FLAGS.base_mesh is None: assert False, "[Error] The path of custom mesh is invalid ! (appearance modeling)" base_mesh = mesh.load_mesh(FLAGS.base_mesh)
############################################################################### # Mix background into a dataset image ############################################################################### @torch.no_grad() def prepare_batch(target, background= 'black'): target['mv'] = target['mv'].cuda() target['mvp'] = target['mvp'].cuda() target['campos'] = target['campos'].cuda() target['fov'] = target['fov'].cuda() target['normal_rotate'] = target['normal_rotate'].cuda() batch_size = target['mv'].shape[0] resolution = target['resolution'] if background == 'white': target['background']= torch.ones(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') if background == 'black': target['background'] = torch.zeros(batch_size, resolution[0], resolution[1], 3, dtype=torch.float32, device='cuda') return target ############################################################################### # UV - map geometry & convert to a mesh ############################################################################### @torch.no_grad() def xatlas_uvmap(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) # Create uvs with xatlas v_pos = eval_mesh.v_pos.detach().cpu().numpy() t_pos_idx = eval_mesh.t_pos_idx.detach().cpu().numpy() vmapping, indices, uvs = xatlas.parametrize(v_pos, t_pos_idx) # Convert to tensors indices_int64 = indices.astype(np.uint64, casting='same_kind').view(np.int64) uvs = torch.tensor(uvs, dtype=torch.float32, device='cuda') faces = torch.tensor(indices_int64, dtype=torch.int64, device='cuda') new_mesh = mesh.Mesh(v_tex=uvs, t_tex_idx=faces, base=eval_mesh) mask, kd, ks, normal = render.render_uv(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal']) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'], 'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]), 'ks' : texture.Texture2D(ks, min_max=[ks_min, ks_max]), 'normal' : texture.Texture2D(normal, min_max=[nrm_min, nrm_max]) }) return new_mesh @torch.no_grad() def xatlas_uvmap1(glctx, geometry, mat, FLAGS): eval_mesh = geometry.getMesh(mat) new_mesh = mesh.Mesh( base=eval_mesh) mask, kd, ks, normal = render.render_uv1(glctx, new_mesh, FLAGS.texture_res, eval_mesh.material['kd_ks_normal'], FLAGS.uv_padding_block) if FLAGS.layers > 1: kd = torch.cat((kd, torch.rand_like(kd[...,0:1])), dim=-1) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') new_mesh.material = material.Material({ 'bsdf' : mat['bsdf'], 'kd' : texture.Texture2D(kd, min_max=[kd_min, kd_max]), 'ks' : texture.Texture2D(ks, min_max=[ks_min, ks_max]), 'normal' : texture.Texture2D(normal, min_max=[nrm_min, nrm_max]) }) return new_mesh ############################################################################### # Utility functions for material ############################################################################### def get_normalize_mesh(pro_path): mesh = o3d.io.read_triangle_mesh(pro_path) vertices = np.asarray(mesh.vertices) shift = np.mean(vertices,axis=0) scale = np.max(np.linalg.norm(vertices-shift, ord=2, axis=1)) vertices = (vertices-shift) / scale mesh.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) return mesh def initial_guness_material(geometry, mlp, FLAGS, init_mat=None): # ipdb.set_trace(()) kd_min, kd_max = torch.tensor(FLAGS.kd_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.kd_max, dtype=torch.float32, device='cuda') ks_min, ks_max = torch.tensor(FLAGS.ks_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.ks_max, dtype=torch.float32, device='cuda') nrm_min, nrm_max = torch.tensor(FLAGS.nrm_min, dtype=torch.float32, device='cuda'), torch.tensor(FLAGS.nrm_max, dtype=torch.float32, device='cuda') if mlp: mlp_min = torch.cat((kd_min[0:3], ks_min, nrm_min), dim=0) mlp_max = torch.cat((kd_max[0:3], ks_max, nrm_max), dim=0) mlp_map_opt = mlptexture.MLPTexture3D(geometry.getAABB(), channels=9, min_max=[mlp_min, mlp_max]) mat = material.Material({'kd_ks_normal' : mlp_map_opt}) else: # Setup Kd (albedo) and Ks (x, roughness, metalness) textures if FLAGS.random_textures or init_mat is None: num_channels = 4 if FLAGS.layers > 1 else 3 kd_init = torch.rand(size=FLAGS.texture_res + [num_channels], device='cuda') * (kd_max - kd_min)[None, None, 0:num_channels] + kd_min[None, None, 0:num_channels] kd_map_opt = texture.create_trainable(kd_init , FLAGS.texture_res, not FLAGS.custom_mip, [kd_min, kd_max]) ksR = np.random.uniform(size=FLAGS.texture_res + [1], low=0.0, high=0.01) ksG = np.random.uniform(size=FLAGS.texture_res + [1], low=ks_min[1].cpu(), high=ks_max[1].cpu()) ksB = np.random.uniform(size=FLAGS.texture_res + [1], low=ks_min[2].cpu(), high=ks_max[2].cpu()) ks_map_opt = texture.create_trainable(np.concatenate((ksR, ksG, ksB), axis=2), FLAGS.texture_res, not FLAGS.custom_mip, [ks_min, ks_max]) else: kd_map_opt = texture.create_trainable(init_mat['kd'], FLAGS.texture_res, not FLAGS.custom_mip, [kd_min, kd_max]) ks_map_opt = texture.create_trainable(init_mat['ks'], FLAGS.texture_res, not FLAGS.custom_mip, [ks_min, ks_max]) # Setup normal map if FLAGS.random_textures or init_mat is None or 'normal' not in init_mat: normal_map_opt = texture.create_trainable(np.array([0, 0, 1]), FLAGS.texture_res, not FLAGS.custom_mip, [nrm_min, nrm_max]) else: normal_map_opt = texture.create_trainable(init_mat['normal'], FLAGS.texture_res, not FLAGS.custom_mip, [nrm_min, nrm_max]) mat = material.Material({ 'kd' : kd_map_opt, 'ks' : ks_map_opt, 'normal' : normal_map_opt }) if init_mat is not None: mat['bsdf'] = init_mat['bsdf'] else: mat['bsdf'] = 'pbr' return mat ############################################################################### # Validation & testing ############################################################################### # @torch.no_grad() def validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight = None): result_dict = {} with torch.no_grad(): if FLAGS.mode == 'appearance_modeling': with torch.no_grad(): lgt.build_mips() if FLAGS.camera_space_light: lgt.xfm(target['mv']) if relight != None: relight.build_mips() buffers = geometry.render(glctx, target, lgt, opt_material, if_use_bump = FLAGS.if_use_bump) result_dict['shaded'] = buffers['shaded'][0, ..., 0:3] result_dict['shaded'] = util.rgb_to_srgb(result_dict['shaded']) if relight != None: result_dict['relight'] = geometry.render(glctx, target, relight, opt_material, if_use_bump = FLAGS.if_use_bump)['shaded'][0, ..., 0:3] result_dict['relight'] = util.rgb_to_srgb(result_dict['relight']) result_dict['mask'] = (buffers['shaded'][0, ..., 3:4]) result_image = result_dict['shaded'] if FLAGS.display is not None : # white_bg = torch.ones_like(target['background']) for layer in FLAGS.display: if 'latlong' in layer and layer['latlong']: if isinstance(lgt, light.EnvironmentLight): result_dict['light_image'] = util.cubemap_to_latlong(lgt.base, FLAGS.display_res) result_image = torch.cat([result_image, result_dict['light_image']], axis=1) elif 'bsdf' in layer: buffers = geometry.render(glctx, target, lgt, opt_material, bsdf=layer['bsdf'], if_use_bump = FLAGS.if_use_bump) if layer['bsdf'] == 'kd': result_dict[layer['bsdf']] = util.rgb_to_srgb(buffers['shaded'][0, ..., 0:3]) elif layer['bsdf'] == 'normal': result_dict[layer['bsdf']] = (buffers['shaded'][0, ..., 0:3] + 1) * 0.5 else: result_dict[layer['bsdf']] = buffers['shaded'][0, ..., 0:3] result_image = torch.cat([result_image, result_dict[layer['bsdf']]], axis=1) return result_image, result_dict def save_gif(dir,fps): imgpath = dir frames = [] for idx in sorted(os.listdir(imgpath)): img = osp.join(imgpath,idx) frames.append(imageio.imread(img)) imageio.mimsave(os.path.join(dir, 'eval.gif'),frames,'GIF',duration=1/fps,loop=0) @torch.no_grad() def validate(glctx, geometry, opt_material, lgt, dataset_validate, out_dir, FLAGS, relight= None): # ============================================================================================== # Validation loop # ============================================================================================== mse_values = [] psnr_values = [] dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_validate.collate) os.makedirs(out_dir, exist_ok=True) shaded_dir = os.path.join(out_dir, "shaded") relight_dir = os.path.join(out_dir, "relight") kd_dir = os.path.join(out_dir, "kd") ks_dir = os.path.join(out_dir, "ks") normal_dir = os.path.join(out_dir, "normal") mask_dir = os.path.join(out_dir, "mask") os.makedirs(shaded_dir, exist_ok=True) os.makedirs(relight_dir, exist_ok=True) os.makedirs(kd_dir, exist_ok=True) os.makedirs(ks_dir, exist_ok=True) os.makedirs(normal_dir, exist_ok=True) os.makedirs(mask_dir, exist_ok=True) print("Running validation") dataloader_validate = tqdm(dataloader_validate) for it, target in enumerate(dataloader_validate): # Mix validation background target = prepare_batch(target, 'white') result_image, result_dict = validate_itr(glctx, target, geometry, opt_material, lgt, FLAGS, relight) for k in result_dict.keys(): np_img = result_dict[k].detach().cpu().numpy() if k == 'shaded': util.save_image(shaded_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'relight': util.save_image(relight_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'kd': util.save_image(kd_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'ks': util.save_image(ks_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'normal': util.save_image(normal_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) elif k == 'mask': util.save_image(mask_dir + '/' + ('val_%06d_%s.png' % (it, k)), np_img) if 'shaded' in result_dict.keys(): save_gif(shaded_dir,30) if 'relight' in result_dict.keys(): save_gif(relight_dir,30) if 'kd' in result_dict.keys(): save_gif(kd_dir,30) if 'ks' in result_dict.keys(): save_gif(ks_dir,30) if 'normal' in result_dict.keys(): save_gif(normal_dir,30) return 0 ############################################################################### # Main shape fitter function / optimization loop ############################################################################### class Trainer(torch.nn.Module): def __init__(self, glctx, geometry, lgt, mat, optimize_geometry, optimize_light, FLAGS, guidance): super(Trainer, self).__init__() self.glctx = glctx self.geometry = geometry self.light = lgt self.material = mat self.optimize_geometry = optimize_geometry self.optimize_light = optimize_light self.FLAGS = FLAGS self.guidance = guidance self.if_flip_the_normal = FLAGS.if_flip_the_normal self.if_use_bump = FLAGS.if_use_bump if self.FLAGS.mode == 'appearance_modeling': if not self.optimize_light: with torch.no_grad(): self.light.build_mips() self.params = list(self.material.parameters()) self.params += list(self.geometry.pos_encoder.parameters()) self.params += list(self.light.parameters()) if optimize_light else [] self.geo_params = list(self.geometry.parameters()) if optimize_geometry else [] def forward(self, target, it, if_normal, if_pretrain, scene_and_vertices ): if self.FLAGS.mode == 'appearance_modeling': if self.optimize_light: self.light.build_mips() if self.FLAGS.camera_space_light: self.light.xfm(target['mv']) if if_pretrain: return self.geometry.decoder.pre_train_ellipsoid(it, scene_and_vertices) else: return self.geometry.tick(glctx, target, self.light, self.material, it , if_normal, self.guidance, self.FLAGS.mode, self.if_flip_the_normal, self.if_use_bump) def optimize_mesh( glctx, geometry, opt_material, lgt, dataset_train, dataset_validate, FLAGS, log_interval=10, optimize_light=True, optimize_geometry=True, guidance = None, scene_and_vertices = None, ): dataloader_train = torch.utils.data.DataLoader(dataset_train, batch_size=FLAGS.batch, collate_fn=dataset_train.collate, shuffle=False) dataloader_validate = torch.utils.data.DataLoader(dataset_validate, batch_size=1, collate_fn=dataset_train.collate) model = Trainer(glctx, geometry, lgt, opt_material, optimize_geometry, optimize_light, FLAGS, guidance) if optimize_geometry: optimizer_mesh = torch.optim.AdamW(model.geo_params, lr=0.001, betas=(0.9, 0.99), eps=1e-15) optimizer = torch.optim.AdamW(model.params, lr=0.01, betas=(0.9, 0.99), eps=1e-15) optimizer_lora = torch.optim.SGD(itertools.chain(*guidance.unet_lora_params), lr=1e-5) if FLAGS.multi_gpu: model = model.cuda() model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[FLAGS.local_rank], find_unused_parameters= True ) img_cnt = 0 img_loss_vec = [] reg_loss_vec = [] iter_dur_vec = [] def cycle(iterable): iterator = iter(iterable) while True: try: yield next(iterator) except StopIteration: iterator = iter(iterable) v_it = cycle(dataloader_validate) scaler = torch.cuda.amp.GradScaler(enabled=True) rot_ang = 0 if FLAGS.local_rank == 0: video = Video(FLAGS.out_dir) if FLAGS.local_rank == 0: dataloader_train = tqdm(dataloader_train) for it, target in enumerate(dataloader_train): # Mix randomized background into dataset image target = prepare_batch(target, FLAGS.train_background) # Show/save image before training step (want to get correct rendering of input) if FLAGS.local_rank == 0: save_image = FLAGS.save_interval and (it % FLAGS.save_interval == 0) save_video = FLAGS.video_interval and (it % FLAGS.video_interval == 0) if save_image: result_image, result_dict = validate_itr(glctx, prepare_batch(next(v_it), FLAGS.train_background), geometry, opt_material, lgt, FLAGS) #prepare_batch(next(v_it), FLAGS.background) np_result_image = result_image.detach().cpu().numpy() util.save_image(FLAGS.out_dir + '/' + ('img_%s_%06d.png' % (FLAGS.mode, img_cnt)), np_result_image) util.save_image(FLAGS.out_dir + '/' + ('mask_%s_%06d.png' % (FLAGS.mode, img_cnt)), result_dict['mask'].detach().cpu().numpy()) img_cnt = img_cnt+1 if save_video: with torch.no_grad(): params = get_camera_params( resolution=512, fov=45, elev_angle=-20, azim_angle =rot_ang, ) rot_ang += 1 if FLAGS.mode =='geometry_modeling': buffers = geometry.render(glctx, params, lgt, opt_material, bsdf='normal', if_use_bump = FLAGS.if_use_bump) video_image = (buffers['shaded'][0, ..., 0:3]+1)/2 else: buffers = geometry.render(glctx, params, lgt, opt_material, bsdf='pbr', if_use_bump = FLAGS.if_use_bump) video_image = util.rgb_to_srgb(buffers['shaded'][0, ..., 0:3]) video_image = video.ready_image(video_image) iter_start_time = time.time() if FLAGS.mode =='geometry_modeling': if it<=400: if_pretrain = True else: if_pretrain = False if_normal =True else: if_pretrain = False if_normal = False with torch.cuda.amp.autocast(enabled= True): if if_pretrain== True: reg_loss = model(target, it, if_normal, if_pretrain= if_pretrain, scene_and_vertices = scene_and_vertices) img_loss = 0 sds_loss = 0 attention_loss = 0 if if_pretrain == False: sds_loss, img_loss, reg_loss, attention_loss = model(target, it, if_normal, if_pretrain= if_pretrain, scene_and_vertices =None) if FLAGS.mode =='geometry_modeling': if(it<1000): attention_loss = 0 else: if(it<500): attention_loss = 0 # ============================================================================================== # Final loss # ============================================================================================== total_loss = img_loss + reg_loss + sds_loss + attention_loss if if_pretrain == True: scaler.scale(total_loss).backward() if if_pretrain == False: scaler.scale(total_loss).backward() img_loss_vec.append(img_loss.item()) reg_loss_vec.append(reg_loss.item()) # ============================================================================================== # Backpropagate # ============================================================================================== if if_normal == False and if_pretrain == False: scaler.step(optimizer) optimizer.zero_grad() if if_normal == True or if_pretrain == True: if optimize_geometry: scaler.step(optimizer_mesh) optimizer_mesh.zero_grad() for param in guidance.parameters(): if param.grad is not None and torch.isnan(param.grad).any(): param.grad = torch.nan_to_num(param.grad, nan=0.0) max_norm = 5.0 torch.nn.utils.clip_grad_norm_(guidance.parameters(), max_norm) if if_pretrain == False: optimizer_lora.step() optimizer_lora.zero_grad() for param in guidance.parameters(): param.data = torch.nan_to_num(param.data, nan=0.0, posinf=None, neginf=None) scaler.update() # ============================================================================================== # Clamp trainables to reasonable range # ============================================================================================== with torch.no_grad(): if 'kd' in opt_material: opt_material['kd'].clamp_() if 'ks' in opt_material: opt_material['ks'].clamp_() if 'normal' in opt_material: opt_material['normal'].clamp_() opt_material['normal'].normalize_() if lgt is not None: lgt.clamp_(min=0.0) torch.cuda.current_stream().synchronize() iter_dur_vec.append(time.time() - iter_start_time) return geometry, opt_material def seed_everything(seed, local_rank): random.seed(seed + local_rank) os.environ['PYTHONHASHSEED'] = str(seed) np.random.seed(seed + local_rank) torch.manual_seed(seed) torch.cuda.manual_seed(seed) if __name__ == "__main__": parser = argparse.ArgumentParser(description='nvdiffrec') parser.add_argument('--config', type=str, default='configs_clean3/icecream_geometry_debug.json', help='Config file') parser.add_argument('-i', '--iter', type=int, default=5000) parser.add_argument('-b', '--batch', type=int, default=1) parser.add_argument('-s', '--spp', type=int, default=1) parser.add_argument('-l', '--layers', type=int, default=1) parser.add_argument('-r', '--train-res', nargs=2, type=int, default=[512, 512]) parser.add_argument('-dr', '--display-res', type=int, default=None) parser.add_argument('-tr', '--texture-res', nargs=2, type=int, default=[1024, 1024]) parser.add_argument('-si', '--save-interval', type=int, default=1000, help="The interval of saving an image") parser.add_argument('-vi', '--video_interval', type=int, default=10, help="The interval of saving a frame of the video") parser.add_argument('-mr', '--min-roughness', type=float, default=0.08) parser.add_argument('-mip', '--custom-mip', action='store_true', default=False) parser.add_argument('-rt', '--random-textures', action='store_true', default=False) parser.add_argument('-bg', '--train_background', default='black', choices=['black', 'white', 'checker', 'reference']) parser.add_argument('-o', '--out-dir', type=str, default='results/result_debug/icecream_geometry') parser.add_argument('-rm', '--ref_mesh', type=str) parser.add_argument('-bm', '--base-mesh', type=str, default=None) parser.add_argument('--validate', type=bool, default=True) parser.add_argument("--local_rank", type=int, default=0, help="For distributed training: local_rank") parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.") parser.add_argument("--add_directional_text", action='store_true', default=False) parser.add_argument('--mode', default='geometry_modeling', choices=['geometry_modeling', 'appearance_modeling']) parser.add_argument('--text', default=None, help="text prompt") parser.add_argument('--sdf_init_shape', default='ellipsoid', choices=['ellipsoid', 'cylinder', 'custom_mesh']) parser.add_argument('--camera_random_jitter', type= float, default=0.4, help="A large value is advantageous for the extension of objects such as ears or sharp corners to grow.") parser.add_argument('--fovy_range', nargs=2, type=float, default=[25.71, 45.00]) parser.add_argument('--elevation_range', nargs=2, type=int, default=[-10, 45], help="The elevatioin range must in [-90, 90].") parser.add_argument("--guidance_weight", type=int, default=100, help="The weight of classifier-free guidance") parser.add_argument("--sds_weight_strategy", type=int, nargs=1, default=0, choices=[0, 1, 2], help="The strategy of the sds loss's weight") parser.add_argument("--translation_y", type= float, nargs=1, default= 0 , help="translation of the initial shape on the y-axis") parser.add_argument("--coarse_iter", type= int, nargs=1, default= 1000 , help="The iteration number of the coarse stage.") parser.add_argument('--early_time_step_range', nargs=2, type=float, default=[0.02, 0.5], help="The time step range in early phase") parser.add_argument('--late_time_step_range', nargs=2, type=float, default=[0.02, 0.5], help="The time step range in late phase") parser.add_argument("--sdf_init_shape_rotate_x", type= int, nargs=1, default= 0 , help="rotation of the initial shape on the x-axis") parser.add_argument("--if_flip_the_normal", action='store_true', default=False , help="Flip the x-axis positive half-axis of Normal. We find this process helps to alleviate the Janus problem.") parser.add_argument("--front_threshold", type= int, nargs=1, default= 45 , help="the range of front view would be [-front_threshold, front_threshold") parser.add_argument("--if_use_bump", type=bool, default= True , help="whether to use perturbed normals during appearing modeling") parser.add_argument("--uv_padding_block", type= int, default= 4 , help="The block of uv padding.") FLAGS = parser.parse_args() FLAGS.mtl_override = None # Override material of model FLAGS.dmtet_grid = 64 # Resolution of initial tet grid. We provide 64, 128 and 256 resolution grids. Other resolutions can be generated with https://github.com/crawforddoran/quartet FLAGS.mesh_scale = 2.1 # Scale of tet grid box. Adjust to cover the model FLAGS.env_scale = 1.0 # Env map intensity multiplier FLAGS.envmap = None # HDR environment probe FLAGS.relight = None # HDR environment probe(relight) FLAGS.display = None # Conf validation window/display. E.g. [{"relight" : <path to envlight>}] FLAGS.camera_space_light = False # Fixed light in camera space. This is needed for setups like ethiopian head where the scanned object rotates on a stand. FLAGS.lock_light = False # Disable light optimization in the second pass FLAGS.lock_pos = False # Disable vertex position optimization in the second pass FLAGS.pre_load = True # Pre-load entire dataset into memory for faster training FLAGS.kd_min = [ 0.0, 0.0, 0.0, 0.0] # Limits for kd FLAGS.kd_max = [ 1.0, 1.0, 1.0, 1.0] FLAGS.ks_min = [ 0.0, 0.08, 0.0] # Limits for ks FLAGS.ks_max = [ 1.0, 1.0, 1.0] FLAGS.nrm_min = [-1.0, -1.0, 0.0] # Limits for normal map FLAGS.nrm_max = [ 1.0, 1.0, 1.0] FLAGS.cam_near_far = [1, 50] FLAGS.learn_light = False FLAGS.gpu_number = 1 FLAGS.sdf_init_shape_scale=[1.0, 1.0, 1.0] FLAGS.multi_gpu = "WORLD_SIZE" in os.environ and int(os.environ["WORLD_SIZE"]) > 1 if FLAGS.multi_gpu: FLAGS.gpu_number = int(os.environ["WORLD_SIZE"]) FLAGS.local_rank = int(os.environ["LOCAL_RANK"]) torch.distributed.init_process_group(backend="nccl", world_size = FLAGS.gpu_number, rank = FLAGS.local_rank) torch.cuda.set_device(FLAGS.local_rank) if FLAGS.config is not None: data = json.load(open(FLAGS.config, 'r')) for key in data: FLAGS.__dict__[key] = data[key] if FLAGS.display_res is None: FLAGS.display_res = FLAGS.train_res if FLAGS.local_rank == 0: print("Config / Flags:") print("---------") for key in FLAGS.__dict__.keys(): print(key, FLAGS.__dict__[key]) print("---------") seed_everything(FLAGS.seed, FLAGS.local_rank) os.makedirs(FLAGS.out_dir, exist_ok=True) glctx = dr.RasterizeCudaContext() # ============================================================================================== # Create data pipeline # ============================================================================================== dataset_train = DatasetMesh(glctx, FLAGS, validate=False) dataset_validate = DatasetMesh(glctx, FLAGS, validate=True) dataset_gif = DatasetMesh(glctx, FLAGS, gif=True) # ============================================================================================== # Create env light with trainable parameters # ============================================================================================== if FLAGS.mode == 'appearance_modeling' and FLAGS.base_mesh is not None: if FLAGS.learn_light: lgt = light.create_trainable_env_rnd(512, scale=0.0, bias=1) else: lgt = light.load_env(FLAGS.envmap, scale=FLAGS.env_scale) else: lgt = None if FLAGS.sdf_init_shape in ['ellipsoid', 'cylinder', 'custom_mesh'] and FLAGS.mode == 'geometry_modeling': if FLAGS.sdf_init_shape == 'ellipsoid': init_shape = o3d.geometry.TriangleMesh.create_sphere(1) elif FLAGS.sdf_init_shape == 'cylinder': init_shape = o3d.geometry.TriangleMesh.create_cylinder(radius=0.75, height=1.2, resolution=20, split=4, create_uv_map=False) elif FLAGS.sdf_init_shape == 'custom_mesh': if FLAGS.base_mesh: init_shape = get_normalize_mesh(FLAGS.base_mesh) else: assert False, "[Error] The path of custom mesh is invalid ! (geometry modeling)" else: assert False, "Invalid init type" vertices = np.asarray(init_shape.vertices) vertices[...,0]=vertices[...,0] * FLAGS.sdf_init_shape_scale[0] vertices[...,1]=vertices[...,1] * FLAGS.sdf_init_shape_scale[1] vertices[...,2]=vertices[...,2] * FLAGS.sdf_init_shape_scale[2] vertices = vertices @ util.rotate_x_2(np.deg2rad(FLAGS.sdf_init_shape_rotate_x)) vertices[...,1]=vertices[...,1] + FLAGS.translation_y init_shape.vertices = o3d.cuda.pybind.utility.Vector3dVector(vertices) points_surface = np.asarray(init_shape.sample_points_poisson_disk(5000).points) init_shape = o3d.t.geometry.TriangleMesh.from_legacy(init_shape) scene = o3d.t.geometry.RaycastingScene() scene.add_triangles(init_shape) scene_and_vertices = [scene, points_surface] guidance = StableDiffusion(device = 'cuda', mode = FLAGS.mode, text = FLAGS.text, add_directional_text = FLAGS.add_directional_text, batch = FLAGS.batch, guidance_weight = FLAGS.guidance_weight, sds_weight_strategy = FLAGS.sds_weight_strategy, early_time_step_range = FLAGS.early_time_step_range, late_time_step_range= FLAGS.late_time_step_range) if FLAGS.mode == 'geometry_modeling' : geometry = DMTetGeometry(FLAGS.dmtet_grid, FLAGS.mesh_scale, FLAGS) mat = initial_guness_material(geometry, True, FLAGS) # Run optimization geometry, mat = optimize_mesh(glctx, geometry, mat, lgt, dataset_train, dataset_validate, FLAGS, optimize_light=FLAGS.learn_light,optimize_geometry= not FLAGS.lock_pos, guidance= guidance, scene_and_vertices= scene_and_vertices) if FLAGS.local_rank == 0 and FLAGS.validate: validate(glctx, geometry, mat, lgt, dataset_gif, os.path.join(FLAGS.out_dir, "validate"), FLAGS) # Create textured mesh from result if FLAGS.local_rank == 0: base_mesh = xatlas_uvmap(glctx, geometry, mat, FLAGS) # # Free temporaries / cached memory torch.cuda.empty_cache() mat['kd_ks_normal'].cleanup() del mat['kd_ks_normal'] if FLAGS.local_rank == 0: # Dump mesh for debugging. os.makedirs(os.path.join(FLAGS.out_dir, "dmtet_mesh"), exist_ok=True) obj.write_obj(os.path.join(FLAGS.out_dir, "dmtet_mesh/"), base_mesh) elif FLAGS.mode == 'appearance_modeling': # ============================================================================================== # Train with fixed topology (mesh) # ============================================================================================== if FLAGS.base_mesh is None: assert False, "[Error] The path of custom mesh is invalid ! (appearance modeling)" base_mesh = mesh.load_mesh(FLAGS.base_mesh)
geometry = DLMesh(base_mesh, FLAGS)
3
2023-11-27 13:44:01+00:00
24k
zhenzhiwang/intercontrol
eval/eval_controlmdm.py
[ { "identifier": "ControlGaussianDiffusion", "path": "diffusion/control_diffusion.py", "snippet": "class ControlGaussianDiffusion(SpacedDiffusion):\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std = th.tensor(self.std, dtype=data.dtype, device=data.device, requires_grad=False)\n mean = th.tensor(self.mean, dtype=data.dtype, device=data.device, requires_grad=False)\n output = th.add(th.mul(data, std), mean)\n return output\n \n def q_sample(self, x_start, t, noise=None, model_kwargs=None):\n \"\"\"\n overrides q_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n if noise is None:\n noise = th.randn_like(x_start)\n assert noise.shape == x_start.shape\n\n bs, feat, _, frames = noise.shape\n noise *= 1. #- model_kwargs['y']['inpainting_mask']\n\n return (\n _extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start\n + _extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape)\n * noise\n )\n \n def global_joint_bfgs_optimize(self, x, model_kwargs=None):\n assert self.model_mean_type == ModelMeanType.START_X, 'This feature supports only X_start pred for mow!'\n pred_joint = self.humanml_to_global_joint(x)\n cond_joint = model_kwargs['y']['global_joint']\n mask = model_kwargs['y']['global_joint_mask']\n pred_joint = th.masked_select(pred_joint, mask.bool())\n cond_joint = th.masked_select(cond_joint, mask.bool())\n assert pred_joint.shape == cond_joint.shape, f\"pred_joint: {pred_joint.shape}, cond_joint: {cond_joint.shape}\"\n loss = self.mse_loss(pred_joint, cond_joint)\n return loss\n \n def humanml_to_global_joint(self, x):\n n_joints = 22 if x.shape[1] == 263 else 21\n pred_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n assert pred_joint.shape[1] == 1\n pred_joint = recover_from_ric(pred_joint, n_joints)\n pred_joint = pred_joint.view(-1, *pred_joint.shape[2:]).permute(0, 2, 3, 1)\n return pred_joint\n \n def global_joint_position_conditioning(self, x, model_kwargs=None):\n n_joints = 22 if x.shape[1] == 263 else 21\n assert self.model_mean_type == ModelMeanType.START_X, 'This feature supports only X_start pred for mow!'\n pred_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n pred_joint = recover_from_ric(pred_joint, n_joints)\n pred_joint = pred_joint.view(-1, *pred_joint.shape[2:]).permute(0, 2, 3, 1)\n #pred_joint.requires_grad = True\n assert pred_joint.shape == model_kwargs['y']['global_joint'].shape == model_kwargs['y']['global_joint_mask'].shape, f\"pred_joint: {pred_joint.shape}, global_joint: {model_kwargs['y']['global_joint'].shape}, global_joint_mask: {model_kwargs['y']['global_joint_mask'].shape}\"\n loss = self.global_joint_condition_loss(pred_joint, model_kwargs['y']['global_joint'], model_kwargs['y']['global_joint_mask'])\n diff_scale = ((pred_joint.clamp(min=1e-4) - model_kwargs['y']['global_joint'].clamp(min=1e-4)).abs() / model_kwargs['y']['global_joint'].clamp(min=1e-4).abs()).mean().item()\n #loss.requires_grad = True\n gradient = th.autograd.grad(loss, x, \n grad_outputs=th.ones_like(loss),\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n return gradient.clone().detach(), loss.item(), diff_scale\n\n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n const_noise=False,\n use_posterior=False,\n ):\n \"\"\"\n overrides p_sample to use the inpainting mask\n \n same usage as in GaussianDiffusion\n \"\"\"\n #assert use_posterior == False\n p_mean_variance_func = self.p_mean_variance_bfgs_posterior if use_posterior else self.p_mean_variance_bfgs_x0\n out = p_mean_variance_func(\n model,\n x,\n t,\n clip_denoised=clip_denoised,\n denoised_fn=denoised_fn,\n model_kwargs=model_kwargs,\n k_first = self.bfgs_times_first,\n k_last = self.bfgs_times_last,\n )\n \n noise = th.randn_like(x)\n if const_noise:\n noise = noise[[0]].repeat(x.shape[0], 1, 1, 1)\n\n nonzero_mask = (\n (t != 0).float().view(-1, *([1] * (len(x.shape) - 1)))\n ) # no noise when t == 0\n\n sample = out[\"mean\"] + nonzero_mask * th.exp(0.5 * out[\"log_variance\"]) * noise\n \n return {\"sample\": sample, \"pred_xstart\": out[\"pred_xstart\"]}\n \n def condition_mean_with_grad(self, cond_fn, x_mean, x_var, t, strength, model_kwargs=None):\n \"\"\"\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x. In particular, cond_fn computes grad(log(p(y|x))), and we want to\n condition on y.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n \"\"\"\n with th.enable_grad():\n x_mean = x_mean.clone().detach().requires_grad_(True)\n gradient, loss_value, diff_scale = cond_fn(x_mean, model_kwargs) # p_mean_var[\"mean\"]\n gradient_guidance = - strength * gradient.float() # x_var.clamp(min = 0.01) \n new_mean = (x_mean + gradient_guidance).clone().detach()\n return new_mean, loss_value, gradient_guidance.clone().detach().abs().cpu(), x_mean.clone().detach().abs().cpu(), diff_scale\n\n\n def condition_mean_bfgs(self, x_mean, num_condition, model_kwargs=None):\n \"\"\"\n Compute the mean for the previous step, given a function cond_fn that\n computes the gradient of a conditional log probability with respect to\n x.\n\n This uses the conditioning strategy from Sohl-Dickstein et al. (2015).\n \"\"\"\n \n with th.enable_grad():\n x_mean = x_mean.clone().detach().contiguous().requires_grad_(True)\n def closure():\n lbfgs.zero_grad()\n objective = self.global_joint_bfgs_optimize(x_mean, model_kwargs)\n objective.backward()\n return objective\n lbfgs = optim.LBFGS([x_mean],\n history_size=10, \n max_iter=4, \n line_search_fn=\"strong_wolfe\")\n for _ in range(num_condition):\n lbfgs.step(closure)\n #loss_value = self.global_joint_bfgs_optimize(x_mean, model_kwargs).item()\n return x_mean #, loss_value\n\n def p_mean_variance_bfgs_x0(\n self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, \n k_first = 1,\n k_last = 10,\n t_threshold = 10,\n ):\n \"\"\"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n \"\"\"\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n original_model_output = model(x, self._scale_timesteps(t), **model_kwargs)\n model_output = original_model_output.clone().detach()\n\n if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = th.split(model_output, C, dim=1)\n if self.model_var_type == ModelVarType.LEARNED:\n model_log_variance = model_var_values\n model_variance = th.exp(model_log_variance)\n else:\n min_log = _extract_into_tensor(\n self.posterior_log_variance_clipped, t, x.shape\n )\n max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = th.exp(model_log_variance)\n else:\n model_variance, model_log_variance = {\n # for fixedlarge, we set the initial (log-)variance like so\n # to get a better decoder log likelihood.\n ModelVarType.FIXED_LARGE: (\n np.append(self.posterior_variance[1], self.betas[1:]),\n np.log(np.append(self.posterior_variance[1], self.betas[1:])),\n ),\n ModelVarType.FIXED_SMALL: (\n self.posterior_variance,\n self.posterior_log_variance_clipped,\n ),\n }[self.model_var_type]\n\n model_variance = _extract_into_tensor(model_variance, t, x.shape)\n model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)\n\n\n # loss-guided condition\n #assert k_first ==1, \"k_first must be 1, {}\".format(k_first)\n num_condition = k_first if t[0] >= t_threshold else k_last # t[0] count from 1000 to 1, assume all t are equal\n model_output = self.condition_mean_bfgs(model_output, num_condition, model_kwargs=model_kwargs) # , loss_value\n\n def process_xstart(x):\n if denoised_fn is not None:\n x = denoised_fn(x)\n if clip_denoised:\n # print('clip_denoised', clip_denoised)\n return x.clamp(-1, 1)\n return x\n\n if self.model_mean_type == ModelMeanType.PREVIOUS_X:\n pred_xstart = process_xstart(\n self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)\n )\n model_mean = model_output\n elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: # THIS IS US!\n if self.model_mean_type == ModelMeanType.START_X:\n pred_xstart = process_xstart(model_output)\n else:\n pred_xstart = process_xstart(\n self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n )\n model_mean, _, _ = self.q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n else:\n raise NotImplementedError(self.model_mean_type)\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n )\n\n \n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n }\n \n def p_mean_variance_bfgs_posterior(\n self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None, \n k_first = 1,\n k_last = 10,\n t_threshold = 10,\n ):\n \"\"\"\n Apply the model to get p(x_{t-1} | x_t), as well as a prediction of\n the initial x, x_0.\n\n :param model: the model, which takes a signal and a batch of timesteps\n as input.\n :param x: the [N x C x ...] tensor at time t.\n :param t: a 1-D Tensor of timesteps.\n :param clip_denoised: if True, clip the denoised signal into [-1, 1].\n :param denoised_fn: if not None, a function which applies to the\n x_start prediction before it is used to sample. Applies before\n clip_denoised.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :return: a dict with the following keys:\n - 'mean': the model mean output.\n - 'variance': the model variance output.\n - 'log_variance': the log of 'variance'.\n - 'pred_xstart': the prediction for x_0.\n \"\"\"\n if model_kwargs is None:\n model_kwargs = {}\n\n B, C = x.shape[:2]\n assert t.shape == (B,)\n original_model_output = model(x, self._scale_timesteps(t), **model_kwargs)\n model_output = original_model_output.clone().detach()\n\n if self.model_var_type in [ModelVarType.LEARNED, ModelVarType.LEARNED_RANGE]:\n assert model_output.shape == (B, C * 2, *x.shape[2:])\n model_output, model_var_values = th.split(model_output, C, dim=1)\n if self.model_var_type == ModelVarType.LEARNED:\n model_log_variance = model_var_values\n model_variance = th.exp(model_log_variance)\n else:\n min_log = _extract_into_tensor(\n self.posterior_log_variance_clipped, t, x.shape\n )\n max_log = _extract_into_tensor(np.log(self.betas), t, x.shape)\n # The model_var_values is [-1, 1] for [min_var, max_var].\n frac = (model_var_values + 1) / 2\n model_log_variance = frac * max_log + (1 - frac) * min_log\n model_variance = th.exp(model_log_variance)\n else:\n model_variance, model_log_variance = {\n # for fixedlarge, we set the initial (log-)variance like so\n # to get a better decoder log likelihood.\n ModelVarType.FIXED_LARGE: (\n np.append(self.posterior_variance[1], self.betas[1:]),\n np.log(np.append(self.posterior_variance[1], self.betas[1:])),\n ),\n ModelVarType.FIXED_SMALL: (\n self.posterior_variance,\n self.posterior_log_variance_clipped,\n ),\n }[self.model_var_type]\n\n model_variance = _extract_into_tensor(model_variance, t, x.shape)\n model_log_variance = _extract_into_tensor(model_log_variance, t, x.shape)\n\n\n def process_xstart(x):\n if denoised_fn is not None:\n x = denoised_fn(x)\n if clip_denoised:\n # print('clip_denoised', clip_denoised)\n return x.clamp(-1, 1)\n return x\n\n if self.model_mean_type == ModelMeanType.PREVIOUS_X:\n pred_xstart = process_xstart(\n self._predict_xstart_from_xprev(x_t=x, t=t, xprev=model_output)\n )\n model_mean = model_output\n elif self.model_mean_type in [ModelMeanType.START_X, ModelMeanType.EPSILON]: # THIS IS US!\n if self.model_mean_type == ModelMeanType.START_X:\n pred_xstart = process_xstart(model_output)\n else:\n pred_xstart = process_xstart(\n self._predict_xstart_from_eps(x_t=x, t=t, eps=model_output)\n )\n model_mean, _, _ = self.q_posterior_mean_variance(\n x_start=pred_xstart, x_t=x, t=t\n )\n else:\n raise NotImplementedError(self.model_mean_type)\n\n assert (\n model_mean.shape == model_log_variance.shape == pred_xstart.shape == x.shape\n )\n\n # loss-guided condition\n #assert k_first ==1, \"k_first must be 1, {}\".format(k_first)\n num_condition = k_first if t[0] >= t_threshold else k_last # t[0] count from 1000 to 1, assume all t are equal\n model_mean = self.condition_mean_bfgs(model_mean, num_condition, model_kwargs=model_kwargs) # , loss_value\n\n return {\n \"mean\": model_mean,\n \"variance\": model_variance,\n \"log_variance\": model_log_variance,\n \"pred_xstart\": pred_xstart,\n }\n \n def training_losses(self, model, x_start, t, model_kwargs=None, noise=None, dataset=None,\n use_posterior = True,\n k_first = 1,\n k_last = 10,\n t_threshold = 10,):\n \"\"\"\n Compute training losses for a single timestep.\n\n :param model: the model to evaluate loss on.\n :param x_start: the [N x C x ...] tensor of inputs.\n :param t: a batch of timestep indices.\n :param model_kwargs: if not None, a dict of extra keyword arguments to\n pass to the model. This can be used for conditioning.\n :param noise: if specified, the specific Gaussian noise to try to remove.\n :return: a dict with the key \"loss\" containing a tensor of shape [N].\n Some mean or variance settings may also have other keys.\n \"\"\"\n\n # enc = model.model._modules['module']\n model = self._wrap_model(model)\n \n enc = model.model\n mask = model_kwargs['y']['mask']\n get_xyz = lambda sample: enc.rot2xyz(sample, mask=None, pose_rep=enc.pose_rep, translation=enc.translation,\n glob=enc.glob,\n # jointstype='vertices', # 3.4 iter/sec # USED ALSO IN MotionCLIP\n jointstype='smpl', # 3.4 iter/sec\n vertstrans=False)\n\n if model_kwargs is None:\n model_kwargs = {}\n if noise is None:\n noise = th.randn_like(x_start)\n x_t = self.q_sample(x_start, t, noise=noise, model_kwargs=model_kwargs)\n \n #assert k_first == 1, \"k_first must be 1, {}\".format(k_first)\n #assert k_last == 10, \"k_last must be 10, {}\".format(k_last)\n assert use_posterior == True, \"use_posterior must be True, {}\".format(use_posterior)\n if use_posterior:\n '''\n # loss-guided condition in training time\n if t[0] >= t_threshold:\n assert (t >= t_threshold).all(), f\"all t should be >=10 or <10 : t={t}\"\n num_condition = k_first # else k_last\n else:\n num_condition = k_last\n assert (t < t_threshold).all(), f\"all t should be >=10 or <10 : t={t}\"\n '''\n num_condition = k_first\n x_t = self.condition_mean_bfgs(x_t, num_condition, model_kwargs=model_kwargs)\n\n terms = {}\n if self.loss_type == LossType.MSE or self.loss_type == LossType.RESCALED_MSE:\n model_output = model(x_t, self._scale_timesteps(t), **model_kwargs)\n\n target = {\n ModelMeanType.PREVIOUS_X: self.q_posterior_mean_variance(\n x_start=x_start, x_t=x_t, t=t\n )[0],\n ModelMeanType.START_X: x_start,\n ModelMeanType.EPSILON: noise,\n }[self.model_mean_type]\n\n assert model_output.shape == target.shape == x_start.shape, \"model_output {}, target {}, x_start {}\".format(model_output.shape ,target.shape ,x_start.shape) # [bs, njoints, nfeats, nframes]\n\n terms[\"rot_mse\"] = self.masked_l2(target, model_output, mask) # mean_flat(rot_mse)\n\n terms[\"loss\"] = terms[\"rot_mse\"] + terms.get('vb', 0.) +\\\n (self.lambda_vel * terms.get('vel_mse', 0.)) +\\\n (self.lambda_rcxyz * terms.get('rcxyz_mse', 0.)) + \\\n (self.lambda_fc * terms.get('fc', 0.))\n else:\n raise NotImplementedError(self.loss_type)\n\n return terms" }, { "identifier": "SpacedDiffusion", "path": "diffusion/respace.py", "snippet": "class SpacedDiffusion(GaussianDiffusion):\n \"\"\"\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n \"\"\"\n\n def __init__(self, use_timesteps, **kwargs):\n self.use_timesteps = set(use_timesteps)\n self.timestep_map = []\n self.original_num_steps = len(kwargs[\"betas\"])\n\n base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa\n last_alpha_cumprod = 1.0\n new_betas = []\n for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):\n if i in self.use_timesteps:\n new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)\n last_alpha_cumprod = alpha_cumprod\n self.timestep_map.append(i)\n kwargs[\"betas\"] = np.array(new_betas)\n super().__init__(**kwargs)\n\n def p_mean_variance(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)\n\n def training_losses(\n self, model, *args, **kwargs\n ): # pylint: disable=signature-differs\n return super().training_losses(self._wrap_model(model), *args, **kwargs)\n\n def condition_mean(self, cond_fn, *args, **kwargs):\n return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)\n\n def condition_score(self, cond_fn, *args, **kwargs):\n return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)\n\n def _wrap_model(self, model):\n if isinstance(model, _WrappedModel):\n return model\n return _WrappedModel(\n model, self.timestep_map, self.rescale_timesteps, self.original_num_steps\n )\n\n def _scale_timesteps(self, t):\n # Scaling is done by the wrapped model.\n return t" }, { "identifier": "evaluation_inpainting_parser", "path": "utils/parser_util.py", "snippet": "def evaluation_inpainting_parser():\n parser = ArgumentParser()\n # args specified by the user: (all other will be loaded from the model)\n add_base_options(parser)\n add_evaluation_options(parser)\n add_inpainting_options(parser)\n add_interactive_options(parser)\n return parse_and_load_from_model(parser)" }, { "identifier": "fixseed", "path": "utils/fixseed.py", "snippet": "def fixseed(seed):\n torch.backends.cudnn.benchmark = False\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)" }, { "identifier": "get_mdm_loader", "path": "data_loaders/humanml/motion_loaders/model_motion_loaders.py", "snippet": "def get_mdm_loader(args, model, diffusion, batch_size, ground_truth_loader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale, num_unfoldings=0):\n opt = {\n 'name': 'test', # FIXME\n }\n print('Generating %s ...' % opt['name'])\n # dataset = CompMDMGeneratedDataset(opt, ground_truth_dataset, ground_truth_dataset.w_vectorizer, mm_num_samples, mm_num_repeats)\n if hasattr(args, \"inpainting_mask\") and args.inpainting_mask == 'global_joint':\n dataset = CompMDMControlGeneratedDataset(args, model, diffusion, ground_truth_loader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale)\n elif hasattr(args, \"inpainting_mask\") and args.inpainting_mask != '':\n dataset = CompMDMInpaintingGeneratedDataset(args, model, diffusion, ground_truth_loader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale)\n elif num_unfoldings > 1:\n dataset = CompMDMUnfoldingGeneratedDataset(args, model, diffusion, ground_truth_loader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale, num_unfoldings)\n else:\n dataset = CompMDMGeneratedDataset(args, model, diffusion, ground_truth_loader, mm_num_samples, mm_num_repeats, max_motion_length, num_samples_limit, scale)\n\n mm_dataset = MMGeneratedDataset(opt, dataset, ground_truth_loader.dataset.w_vectorizer)\n\n # NOTE: bs must not be changed! this will cause a bug in R precision calc!\n motion_loader = DataLoader(dataset, batch_size=batch_size, collate_fn=collate_fn, drop_last=True, num_workers=4)\n mm_motion_loader = DataLoader(mm_dataset, batch_size=1, num_workers=4)\n\n print('Generated Dataset Loading Completed!!!')\n\n return motion_loader, mm_motion_loader" }, { "identifier": "EvaluatorMDMWrapper", "path": "data_loaders/humanml/networks/evaluator_wrapper.py", "snippet": "class EvaluatorMDMWrapper(object):\n\n def __init__(self, dataset_name, device):\n opt = {\n 'dataset_name': dataset_name,\n 'device': device,\n 'dim_word': 300,\n 'max_motion_length': 196,\n 'dim_pos_ohot': len(POS_enumerator),\n 'dim_motion_hidden': 1024,\n 'max_text_len': 20,\n 'dim_text_hidden': 512,\n 'dim_coemb_hidden': 512,\n 'dim_pose': 263 if dataset_name == 'humanml' else 251,\n 'dim_movement_enc_hidden': 512,\n 'dim_movement_latent': 512,\n 'checkpoints_dir': '.',\n 'unit_length': 4,\n 'foot_contact_entries': 4,\n }\n\n if opt['dataset_name'] == 'babel':\n opt['dim_pose'] = 135\n opt['foot_contact_entries'] = 0\n\n self.text_encoder, self.motion_encoder, self.movement_encoder = build_evaluators(opt)\n self.opt = opt\n self.device = opt['device']\n\n self.text_encoder.to(opt['device'])\n self.motion_encoder.to(opt['device'])\n self.movement_encoder.to(opt['device'])\n\n self.text_encoder.eval()\n self.motion_encoder.eval()\n self.movement_encoder.eval()\n\n # Please note that the results does not following the order of inputs\n def get_co_embeddings(self, word_embs, pos_ohot, cap_lens, motions, m_lens):\n with torch.no_grad():\n word_embs = word_embs.detach().to(self.device).float()\n pos_ohot = pos_ohot.detach().to(self.device).float()\n motions = motions.detach().to(self.device).float()\n\n align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()\n motions = motions[align_idx]\n m_lens = m_lens[align_idx]\n\n '''Movement Encoding'''\n movements = self.movement_encoder(motion_wo_foot_contact(motions, self.opt['foot_contact_entries'])).detach()\n m_lens = m_lens // self.opt['unit_length']\n motion_embedding = self.motion_encoder(movements, m_lens)\n\n '''Text Encoding'''\n text_embedding = self.text_encoder(word_embs, pos_ohot, cap_lens)\n text_embedding = text_embedding[align_idx]\n return text_embedding, motion_embedding\n\n # Please note that the results does not following the order of inputs\n def get_motion_embeddings(self, motions, m_lens):\n with torch.no_grad():\n motions = motions.detach().to(self.device).float()\n\n align_idx = np.argsort(m_lens.data.tolist())[::-1].copy()\n motions = motions[align_idx]\n m_lens = m_lens[align_idx]\n\n '''Movement Encoding'''\n movements = self.movement_encoder(motion_wo_foot_contact(motions, self.opt['foot_contact_entries'])).detach()\n m_lens = m_lens // self.opt['unit_length']\n motion_embedding = self.motion_encoder(movements, m_lens)\n return motion_embedding" }, { "identifier": "load_controlmdm_and_diffusion", "path": "utils/model_util.py", "snippet": "def load_controlmdm_and_diffusion(args, data, device, ModelClass=ControlMDM, DiffusionClass=ControlGaussianDiffusion): \n model, diffusion = create_model_and_diffusion(args, data, ModelClass=ControlMDM, DiffusionClass=DiffusionClass)\n model_path = args.model_path\n print(f\"Loading checkpoints from [{model_path}]...\")\n state_dict = torch.load(model_path, map_location='cpu')\n load_model_wo_clip(model, state_dict)\n model.mean = data.dataset.t2m_dataset.mean\n model.std = data.dataset.t2m_dataset.std\n\n model.to(device)\n model.eval() # disable random masking\n model = wrap_model(model, args)\n return model, diffusion" }, { "identifier": "ControlMDM", "path": "model/ControlMDM.py", "snippet": "class ControlMDM(MDM):\n\n def __init__(self, modeltype, njoints, nfeats, num_actions, translation, pose_rep, glob, glob_rot,\n latent_dim=256, ff_size=1024, num_layers=8, num_heads=4, dropout=0.1,\n ablation=None, activation=\"gelu\", legacy=False, data_rep='rot6d', dataset='amass', clip_dim=512,\n arch='trans_enc', emb_trans_dec=False, clip_version=None, args=None, **kargs):\n\n super(ControlMDM, self).__init__(modeltype, njoints, nfeats, num_actions, translation, pose_rep, glob, glob_rot,\n latent_dim, ff_size, num_layers, num_heads, dropout,\n ablation, activation, legacy, data_rep, dataset, clip_dim,\n arch, emb_trans_dec, clip_version, **kargs)\n self.args = args\n self.num_layers = num_layers\n self.multi_person = args.multi_person\n self.upper_orientation_index = [0, 16, 17] # root, l_shoulder, r_shoulder\n self.lower_orientation_index = [0, 1, 2] # root, l_hip, r_hip\n\n # linear layers init with zeros\n if self.dataset == 'kit':\n self.first_zero_linear = nn.Linear(21*3*2 + 2*3, self.latent_dim)\n elif self.dataset == 'humanml':\n self.first_zero_linear = nn.Linear(22*3*2 + 2*3, self.latent_dim)\n else:\n raise NotImplementedError('Supporting only kit and humanml dataset, got {}'.format(self.dataset))\n \n nn.init.zeros_(self.first_zero_linear.weight)\n nn.init.zeros_(self.first_zero_linear.bias)\n self.mid_zero_linear = nn.ModuleList(\n [nn.Linear(self.latent_dim, self.latent_dim) for _ in range(self.num_layers)])\n for m in self.mid_zero_linear:\n nn.init.zeros_(m.weight)\n nn.init.zeros_(m.bias)\n\n if self.arch == 'trans_enc':\n seqTransEncoderLayer = nn.TransformerEncoderLayer(d_model=self.latent_dim,\n nhead=self.num_heads,\n dim_feedforward=self.ff_size,\n dropout=self.dropout,\n activation=self.activation)\n del self.seqTransEncoder\n self.seqTransEncoder_mdm = TransformerEncoder(seqTransEncoderLayer,\n num_layers=self.num_layers)\n self.seqTransEncoder_control = TransformerEncoder(seqTransEncoderLayer,\n num_layers=self.num_layers)\n else:\n raise ValueError('Supporting only trans_enc arch.')\n\n self.freeze_block(self.input_process)\n self.freeze_block(self.sequence_pos_encoder)\n self.freeze_block(self.seqTransEncoder_mdm)\n self.freeze_block(self.embed_timestep)\n if 'text' in self.cond_mode:\n self.freeze_block(self.embed_text)\n self.freeze_block(self.output_process)\n\n def inv_transform(self, data):\n assert self.std is not None and self.mean is not None\n #assert data.requires_grad == True\n std = torch.tensor(self.std, dtype=data.dtype, device=data.device, requires_grad=False)\n mean = torch.tensor(self.mean, dtype=data.dtype, device=data.device, requires_grad=False)\n output = torch.add(torch.mul(data, std), mean)\n return output\n \n def compute_triangle_normals(self, triangles):\n # Compute the vectors from the first point to the other two points\n v1 = triangles[:,:, 1] - triangles[:, :,0]\n v2 = triangles[:,:, 2] - triangles[:,:,0]\n\n # Compute the cross product of v1 and v2 to get the normal vectors\n normals = torch.cross(v2, v1, dim=-1)\n\n # Normalize the normal vectors to unit length\n normals = nn.functional.normalize(normals, dim=-1)\n return normals\n \n def humanml_to_global_joint(self, x):\n n_joints = 22 if x.shape[1] == 263 else 21\n curr_joint = self.inv_transform(x.permute(0, 2, 3, 1)).float()\n assert curr_joint.shape[1] == 1\n curr_joint = recover_from_ric(curr_joint, n_joints)\n curr_joint = curr_joint.view(-1, *curr_joint.shape[2:]).permute(0, 2, 3, 1)\n # change root positions for multi-person purpose\n if self.multi_person:\n curr_joint[1::2, :,2,:] *= -1\n curr_joint[1::2, :,0,:] *= -1\n curr_joint[1::2, :,2,:] += 2\n\n # more than 3 people\n #curr_joint[1, :,2,:] *= -1\n #curr_joint[1, :,0,:] *= -1\n #curr_joint[1, :,2,:] += 2\n #curr_joint[2, :,0,:] += 1\n return curr_joint\n\n def forward(self, x, timesteps, y=None):\n bs, njoints, nfeats, seqlen = x.shape\n control_bs, n_global_joints, xyz_dim, control_frames = y['global_joint'].shape\n assert bs == control_bs and seqlen == control_frames, \"bs {} != {} or seqlen {} != {}\".format(bs, control_bs, seqlen, control_frames)\n assert xyz_dim ==3, \"xyz_dim {} != 3\".format(xyz_dim)\n # prepare global joints for controlmdm\n curr_joint = self.humanml_to_global_joint(x).clone().detach() # [bs, njoints, 3, seqlen]\n curr_joint.requires_grad = False\n\n # Build embedding vector\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.encode_text(y['text'])\n emb += self.embed_text(self.mask_cond(enc_text, force_mask=force_mask))\n if 'action' in self.cond_mode:\n action_emb = self.embed_action(y['action'])\n emb += self.mask_cond(action_emb, force_mask=force_mask)\n\n # Embed motion to latent space (frame by frame)\n x = self.input_process(x) #[seqlen, bs, d]\n\n # adding the timestep embed\n xseq = torch.cat((emb, x), axis=0) # [seqlen+1, bs, d]\n xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d]\n\n # controlmdm\n # orientation\n upper_triangles = curr_joint[:,self.upper_orientation_index,:,:].permute(3,0,1,2) # [seqlen, bs, 3, 3]\n lower_triangles = curr_joint[:,self.lower_orientation_index,:,:].permute(3,0,1,2) # [seqlen, bs, 3, 3]\n upper_orientation = self.compute_triangle_normals(upper_triangles) # [seqlen, bs, 3]\n lower_orientation = self.compute_triangle_normals(lower_triangles) # [seqlen, bs, 3]\n\n # relative position to joint\n '''\n relative_position = torch.zeros_like(curr_joint, device = xseq.device, dtype=torch.float32) # [bs, njoints, 3, seqlen]\n relative_position[1::2,:,:,:] = ((y['global_joint'][::2,:,:,:].unsqueeze(1).float() - \\\n curr_joint[:,1::2,:,:].unsqueeze(2))*y['global_joint_mask'][::2,:,:,:].bool().float()).float().sum(1)\n relative_position[::2,:,:,:] = ((y['global_joint'][1::2,:,:,:].unsqueeze(1).float() - \\\n curr_joint[:,::2,:,:].unsqueeze(2))*y['global_joint_mask'][1::2,:,:,:].bool().float()).float().sum(1)\n '''\n relative_position = ((y['global_joint'].float() - curr_joint)*y['global_joint_mask'].bool().float()).float() # [bs, njoints, 3, seqlen]\n relative_position = relative_position.permute(3, 0, 1, 2).reshape(control_frames, control_bs, -1) # [seqlen, bs, 22*3]\n\n # relative position to root\n relative_root = ((y['global_joint'].float() - curr_joint[:,[0],:,:])*y['global_joint_mask'].bool().float()).float() # [bs, njoints, 3, seqlen]\n relative_root = relative_root.permute(3, 0, 1, 2).reshape(control_frames, control_bs, -1) # [seqlen, bs, 22*3]\n global_joint_feat = torch.cat((relative_position, relative_root, upper_orientation, lower_orientation), axis=-1) # [seqlen, bs, 22*3 *2 +3 +3]\n \n global_joint_feat = self.first_zero_linear(global_joint_feat) # [seqlen, bs, d]\n control_input = xseq + torch.cat((torch.zeros_like(emb, device = xseq.device, dtype=torch.float32), global_joint_feat), axis=0) # [seqlen+1, bs, d]\n control_output_list = self.seqTransEncoder_control.return_all_layers(control_input) # [seqlen+1, bs, d]\n for i in range(self.num_layers):\n control_output_list[i] = self.mid_zero_linear[i](control_output_list[i])\n \n output = self.seqTransEncoder_mdm.forward_with_condition(xseq, control_output_list)[1:] # [seqlen, bs, d]\n output = self.output_process(output) # [bs, njoints, nfeats, seqlen]\n return output\n\n def trainable_parameters(self):\n return [p for name, p in self.named_parameters() if p.requires_grad]\n # return [p for name, p in self.named_parameters() if not name.startswith('clip_model.')]\n \n def trainable_parameter_names(self):\n return [name for name, p in self.named_parameters() if p.requires_grad]\n\n def freeze_block(self, block):\n block.eval()\n for p in block.parameters():\n p.requires_grad = False\n\n def unfreeze_block(self, block):\n block.train()\n for p in block.parameters():\n p.requires_grad = True\n \n def forward_without_control(self, x, timesteps, y=None): #\n # Build embedding vector\n emb = self.embed_timestep(timesteps) # [1, bs, d]\n\n force_mask = y.get('uncond', False)\n if 'text' in self.cond_mode:\n enc_text = self.encode_text(y['text'])\n emb += self.embed_text(self.mask_cond(enc_text, force_mask=force_mask))\n if 'action' in self.cond_mode:\n action_emb = self.embed_action(y['action'])\n emb += self.mask_cond(action_emb, force_mask=force_mask)\n\n # Embed motion to latent space (frame by frame)\n x = self.input_process(x) #[seqlen, bs, d]\n # adding the timestep embed\n xseq = torch.cat((emb, x), axis=0) # [seqlen+1, bs, d]\n xseq = self.sequence_pos_encoder(xseq) # [seqlen+1, bs, d]\n output = self.seqTransEncoder_mdm(xseq)[1:] # [seqlen, bs, d]\n output = self.output_process(output) # [bs, njoints, nfeats, seqlen]\n return output" }, { "identifier": "logger", "path": "diffusion/logger.py", "snippet": "DEBUG = 10\nINFO = 20\nWARN = 30\nERROR = 40\nDISABLED = 50\n DEFAULT = None # A logger with no output files. (See right below class definition)\n CURRENT = None # Current logger being used by the free functions above\nclass KVWriter(object):\nclass SeqWriter(object):\nclass HumanOutputFormat(KVWriter, SeqWriter):\nclass JSONOutputFormat(KVWriter):\nclass CSVOutputFormat(KVWriter):\nclass TensorBoardOutputFormat(KVWriter):\nclass Logger(object):\n def writekvs(self, kvs):\n def writeseq(self, seq):\n def __init__(self, filename_or_file):\n def writekvs(self, kvs):\n def _truncate(self, s):\n def writeseq(self, seq):\n def close(self):\n def __init__(self, filename):\n def writekvs(self, kvs):\n def close(self):\n def __init__(self, filename):\n def writekvs(self, kvs):\n def close(self):\n def __init__(self, dir):\n def writekvs(self, kvs):\n def summary_val(k, v):\n def close(self):\ndef make_output_format(format, ev_dir, log_suffix=\"\"):\ndef logkv(key, val):\ndef logkv_mean(key, val):\ndef logkvs(d):\ndef dumpkvs():\ndef getkvs():\ndef log(*args, level=INFO):\ndef debug(*args):\ndef info(*args):\ndef warn(*args):\ndef error(*args):\ndef set_level(level):\ndef set_comm(comm):\ndef get_dir():\ndef profile_kv(scopename):\ndef profile(n):\n def decorator_with_name(func):\n def func_wrapper(*args, **kwargs):\ndef get_current():\n def __init__(self, dir, output_formats, comm=None):\n def logkv(self, key, val):\n def logkv_mean(self, key, val):\n def dumpkvs(self):\n def log(self, *args, level=INFO):\n def set_level(self, level):\n def set_comm(self, comm):\n def get_dir(self):\n def close(self):\n def _do_log(self, args):\ndef get_rank_without_mpi_import():\ndef mpi_weighted_mean(comm, local_name2valcount):\ndef configure(dir=None, format_strs=None, comm=None, log_suffix=\"\"):\ndef _configure_default_logger():\ndef reset():\ndef scoped_configure(dir=None, format_strs=None, comm=None):" }, { "identifier": "dist_util", "path": "utils/dist_util.py", "snippet": "GPUS_PER_NODE = 8\nSETUP_RETRY_COUNT = 3\ndef setup_dist(device=0):\ndef dev():\ndef load_state_dict(path, **kwargs):\ndef sync_params(params):\ndef _find_free_port():" }, { "identifier": "get_dataset_loader", "path": "data_loaders/get_data.py", "snippet": "def get_dataset_loader(name, batch_size, num_frames, split='train', load_mode='train', opt=None, short_db=False, cropping_sampler=False, size=None):\n if load_mode == 'text_only':\n load_mode = 'train'\n dataset = get_dataset(name, num_frames, split, load_mode, batch_size, opt, short_db, cropping_sampler, size)\n collate = get_collate_fn(name, load_mode)\n\n n_workers = 1 if load_mode in ['movement_train', 'evaluator_train'] else 8\n loader = DataLoader(\n dataset, batch_size=batch_size, shuffle=True,\n num_workers=n_workers, drop_last=True, collate_fn=collate\n )\n\n return loader" }, { "identifier": "wrap_model", "path": "model/cfg_sampler.py", "snippet": "def wrap_model(model, args):\n if args.guidance_param not in [0., 1.]:\n return ClassifierFreeSampleModel(model) # wrapping model with the classifier-free sampler\n elif args.guidance_param == 0:\n return UnconditionedModel(model)\n else:\n return model" } ]
from diffusion.control_diffusion import ControlGaussianDiffusion from diffusion.respace import SpacedDiffusion from utils.parser_util import evaluation_inpainting_parser from utils.fixseed import fixseed from datetime import datetime from data_loaders.humanml.motion_loaders.model_motion_loaders import get_mdm_loader # get_motion_loader from data_loaders.humanml.utils.metrics import * from data_loaders.humanml.networks.evaluator_wrapper import EvaluatorMDMWrapper from collections import OrderedDict from data_loaders.humanml.scripts.motion_process import * from data_loaders.humanml.utils.utils import * from utils.model_util import load_controlmdm_and_diffusion from model.ControlMDM import ControlMDM from diffusion import logger from utils import dist_util from data_loaders.get_data import get_dataset_loader from model.cfg_sampler import wrap_model
14,409
all_metrics['Diversity'][key] += [item] if run_mm: for key, item in mm_score_dict.items(): if key not in all_metrics['MultiModality']: all_metrics['MultiModality'][key] = [item] else: all_metrics['MultiModality'][key] += [item] # print(all_metrics['Diversity']) mean_dict = {} for metric_name, metric_dict in all_metrics.items(): print('========== %s Summary ==========' % metric_name) print('========== %s Summary ==========' % metric_name, file=f, flush=True) for model_name, values in metric_dict.items(): # print(metric_name, model_name) mean, conf_interval = get_metric_statistics(np.array(values), replication_times) mean_dict[metric_name + '_' + model_name] = mean # print(mean, mean.dtype) if isinstance(mean, np.float64) or isinstance(mean, np.float32): print(f'---> [{model_name}] Mean: {mean:.4f} CInterval: {conf_interval:.4f}') print(f'---> [{model_name}] Mean: {mean:.4f} CInterval: {conf_interval:.4f}', file=f, flush=True) elif metric_name == 'Trajectory Error': traj_err_key = ["traj_fail_20cm", "traj_fail_50cm", "loc_fail_20cm", "loc_fail_50cm", "avg_err(m)"] line = f'---> [{model_name}]' print(line) print(line, file=f, flush=True) line = '' for i in range(len(mean)): # zip(traj_err_key, mean): line += ' (%s): Mean: %.4f CInt: %.4f; \n' % (traj_err_key[i], mean[i], conf_interval[i]) print(line) print(line, file=f, flush=True) elif isinstance(mean, np.ndarray): line = f'---> [{model_name}]' for i in range(len(mean)): line += '(top %d) Mean: %.4f CInt: %.4f;' % (i+1, mean[i], conf_interval[i]) print(line) print(line, file=f, flush=True) return mean_dict if __name__ == '__main__': args = evaluation_inpainting_parser() assert args.multi_person == False, 'multi-person is not supported for this script' assert args.guidance_param == 2.5 fixseed(args.seed) args.batch_size = 32 # This must be 32! Don't change it! otherwise it will cause a bug in R precision calc! model_name = os.path.basename(os.path.dirname(args.model_path)) niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') dataset_name = args.dataset #log_file = os.path.join(os.path.dirname(args.model_path), 'eval_{}_{}_{}'.format(dataset_name, model_name, niter)) log_file = os.path.join(os.path.dirname(args.model_path), 'eval_niter_' + str(int(niter)) +'_'+ args.control_joint) assert args.inpainting_mask == 'global_joint', "This script only supports global_joint inpainting!" log_file += f'_mask{args.mask_ratio}' log_file += f'_bfgs_first{args.bfgs_times_first}_last{args.bfgs_times_last}_skip{args.bfgs_interval}' if args.use_posterior: log_file += '_posterior' else: log_file += '_x0' log_file += f'_{args.eval_mode}' log_file += '.log' print(f'Will save to log file [{log_file}]') assert args.overwrite or not os.path.exists(log_file), "Log file already exists!" print(f'Eval mode [{args.eval_mode}]') if args.eval_mode == 'debug': num_samples_limit = 1000 # None means no limit (eval over all dataset) run_mm = False mm_num_samples = 0 mm_num_repeats = 0 mm_num_times = 0 diversity_times = 300 replication_times = 5 # about 3 Hrs elif args.eval_mode == 'wo_mm': num_samples_limit = 1000 run_mm = False mm_num_samples = 0 mm_num_repeats = 0 mm_num_times = 0 diversity_times = 300 replication_times = 20 # about 12 Hrs elif args.eval_mode == 'mm_short': num_samples_limit = 1000 run_mm = True mm_num_samples = 100 mm_num_repeats = 30 mm_num_times = 10 diversity_times = 300 replication_times = 5 # about 15 Hrs else: raise ValueError() replication_times = replication_times if args.replication_times is None else args.replication_times dist_util.setup_dist(args.device) logger.configure() logger.log("creating data loader...") split = 'test' gt_loader = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=None, split=split, load_mode='gt') gen_loader = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=None, split=split, load_mode='eval') num_actions = gen_loader.dataset.num_actions logger.log("Creating model and diffusion...") DiffusionClass = ControlGaussianDiffusion if args.filter_noise else SpacedDiffusion model, diffusion = load_controlmdm_and_diffusion(args, gen_loader, dist_util.dev(), ModelClass=ControlMDM, DiffusionClass=DiffusionClass) diffusion.mean = gen_loader.dataset.t2m_dataset.mean diffusion.std = gen_loader.dataset.t2m_dataset.std eval_motion_loaders = { ################ ## HumanML3D Dataset## ################ 'vald': lambda: get_mdm_loader( args, model, diffusion, args.batch_size, gen_loader, mm_num_samples, mm_num_repeats, gt_loader.dataset.opt.max_motion_length, num_samples_limit, args.guidance_param ) }
torch.multiprocessing.set_sharing_strategy('file_system') def evaluate_matching_score(eval_wrapper, motion_loaders, file): match_score_dict = OrderedDict({}) R_precision_dict = OrderedDict({}) activation_dict = OrderedDict({}) trajectory_score_dict = OrderedDict({}) skating_ratio_dict = OrderedDict({}) print('========== Evaluating Matching Score ==========') for motion_loader_name, motion_loader in motion_loaders.items(): all_motion_embeddings = [] score_list = [] all_size = 0 matching_score_sum = 0 top_k_count = 0 skate_ratio_sum = 0.0 traj_err = [] traj_err_key = ["traj_fail_20cm", "traj_fail_50cm", "loc_fail_20cm", "loc_fail_50cm", "avg_err(m)"] # print(motion_loader_name) with torch.no_grad(): for idx, batch in enumerate(motion_loader): if motion_loader_name == 'ground truth': word_embeddings, pos_one_hots, _, sent_lens, motions, m_lens, _, _ = batch else: assert motion_loader_name == 'vald' # tested method named vald as default word_embeddings, pos_one_hots, _, sent_lens, motions, m_lens, _, skate_ratio, err_np = batch text_embeddings, motion_embeddings = eval_wrapper.get_co_embeddings( word_embs=word_embeddings, pos_ohot=pos_one_hots, cap_lens=sent_lens, motions=motions, m_lens=m_lens) dist_mat = euclidean_distance_matrix(text_embeddings.cpu().numpy(),motion_embeddings.cpu().numpy()) matching_score_sum += dist_mat.trace() argsmax = np.argsort(dist_mat, axis=1) top_k_mat = calculate_top_k(argsmax, top_k=3) top_k_count += top_k_mat.sum(axis=0) all_size += text_embeddings.shape[0] all_motion_embeddings.append(motion_embeddings.cpu().numpy()) if motion_loader_name != 'ground truth': traj_err.append(err_np) skate_ratio_sum += skate_ratio.sum() all_motion_embeddings = np.concatenate(all_motion_embeddings, axis=0) matching_score = matching_score_sum / all_size R_precision = top_k_count / all_size match_score_dict[motion_loader_name] = matching_score R_precision_dict[motion_loader_name] = R_precision activation_dict[motion_loader_name] = all_motion_embeddings if motion_loader_name != 'ground truth': ### For trajecotry evaluation ### traj_err = np.concatenate(traj_err).mean(0) trajectory_score_dict[motion_loader_name] = traj_err line = f'---> [{motion_loader_name}] Traj Error: ' print(line) print(line, file=file, flush=True) line = '' for (k, v) in zip(traj_err_key, traj_err): line += ' (%s): %.4f \n' % (k, np.mean(v)) print(line) print(line, file=file, flush=True) # For skating evaluation skating_score = skate_ratio_sum / all_size skating_ratio_dict[motion_loader_name] = skating_score print(f'---> [{motion_loader_name}] Skating Ratio: {skating_score:.4f}') print(f'---> [{motion_loader_name}] Skating Ratio: {skating_score:.4f}', file=file, flush=True) print(f'---> [{motion_loader_name}] Matching Score: {matching_score:.4f}') print(f'---> [{motion_loader_name}] Matching Score: {matching_score:.4f}',file=file,flush=True) line = f'---> [{motion_loader_name}] R_precision: ' for i in range(len(R_precision)): line += '(top %d): %.4f ' % (i + 1, R_precision[i]) print(line) print(line, file=file, flush=True) return match_score_dict, R_precision_dict, activation_dict, trajectory_score_dict, skating_ratio_dict def evaluate_fid(eval_wrapper, groundtruth_loader, activation_dict, file): eval_dict = OrderedDict({}) gt_motion_embeddings = [] print('========== Evaluating FID ==========') with torch.no_grad(): for idx, batch in enumerate(groundtruth_loader): _, _, _, sent_lens, motions, m_lens, _, _ = batch motion_embeddings = eval_wrapper.get_motion_embeddings( motions=motions, m_lens=m_lens ) gt_motion_embeddings.append(motion_embeddings.cpu().numpy()) gt_motion_embeddings = np.concatenate(gt_motion_embeddings, axis=0) gt_mu, gt_cov = calculate_activation_statistics(gt_motion_embeddings) # print(gt_mu) for model_name, motion_embeddings in activation_dict.items(): mu, cov = calculate_activation_statistics(motion_embeddings) # print(mu) fid = calculate_frechet_distance(gt_mu, gt_cov, mu, cov) print(f'---> [{model_name}] FID: {fid:.4f}') print(f'---> [{model_name}] FID: {fid:.4f}', file=file, flush=True) eval_dict[model_name] = fid return eval_dict def evaluate_diversity(activation_dict, file, diversity_times): eval_dict = OrderedDict({}) print('========== Evaluating Diversity ==========') for model_name, motion_embeddings in activation_dict.items(): diversity = calculate_diversity(motion_embeddings, diversity_times) eval_dict[model_name] = diversity print(f'---> [{model_name}] Diversity: {diversity:.4f}') print(f'---> [{model_name}] Diversity: {diversity:.4f}', file=file, flush=True) return eval_dict def evaluate_multimodality(eval_wrapper, mm_motion_loaders, file, mm_num_times): eval_dict = OrderedDict({}) print('========== Evaluating MultiModality ==========') for model_name, mm_motion_loader in mm_motion_loaders.items(): mm_motion_embeddings = [] with torch.no_grad(): for idx, batch in enumerate(mm_motion_loader): # (1, mm_replications, dim_pos) motions, m_lens = batch motion_embedings = eval_wrapper.get_motion_embeddings(motions[0], m_lens[0]) mm_motion_embeddings.append(motion_embedings.unsqueeze(0)) if len(mm_motion_embeddings) == 0: multimodality = 0 else: mm_motion_embeddings = torch.cat(mm_motion_embeddings, dim=0).cpu().numpy() multimodality = calculate_multimodality(mm_motion_embeddings, mm_num_times) print(f'---> [{model_name}] Multimodality: {multimodality:.4f}') print(f'---> [{model_name}] Multimodality: {multimodality:.4f}', file=file, flush=True) eval_dict[model_name] = multimodality return eval_dict def get_metric_statistics(values, replication_times): mean = np.mean(values, axis=0) std = np.std(values, axis=0) conf_interval = 1.96 * std / np.sqrt(replication_times) return mean, conf_interval def evaluation(eval_wrapper, gt_loader, eval_motion_loaders, log_file, replication_times, diversity_times, mm_num_times, run_mm=False): with open(log_file, 'w') as f: all_metrics = OrderedDict({'Matching Score': OrderedDict({}), 'R_precision': OrderedDict({}), 'FID': OrderedDict({}), 'Diversity': OrderedDict({}), 'MultiModality': OrderedDict({}), 'Trajectory Error': OrderedDict({}), 'Skating Ratio': OrderedDict({}), }) for replication in range(replication_times): motion_loaders = {} mm_motion_loaders = {} for motion_loader_name, motion_loader_getter in eval_motion_loaders.items(): motion_loader, mm_motion_loader = motion_loader_getter() motion_loaders[motion_loader_name] = motion_loader mm_motion_loaders[motion_loader_name] = mm_motion_loader motion_loaders['ground truth'] = gt_loader print(f'==================== Replication {replication} ====================') print(f'==================== Replication {replication} ====================', file=f, flush=True) print(f'Time: {datetime.now()}') print(f'Time: {datetime.now()}', file=f, flush=True) mat_score_dict, R_precision_dict, acti_dict, trajectory_score_dict, skating_ratio_dict = evaluate_matching_score(eval_wrapper, motion_loaders, f) print(f'Time: {datetime.now()}') print(f'Time: {datetime.now()}', file=f, flush=True) fid_score_dict = evaluate_fid(eval_wrapper, gt_loader, acti_dict, f) print(f'Time: {datetime.now()}') print(f'Time: {datetime.now()}', file=f, flush=True) div_score_dict = evaluate_diversity(acti_dict, f, diversity_times) if run_mm: print(f'Time: {datetime.now()}') print(f'Time: {datetime.now()}', file=f, flush=True) mm_score_dict = evaluate_multimodality(eval_wrapper, mm_motion_loaders, f, mm_num_times) print(f'!!! DONE !!!') print(f'!!! DONE !!!', file=f, flush=True) for key, item in trajectory_score_dict.items(): if key not in all_metrics['Trajectory Error']: all_metrics['Trajectory Error'][key] = [item] else: all_metrics['Trajectory Error'][key] += [item] for key, item in skating_ratio_dict.items(): if key not in all_metrics['Skating Ratio']: all_metrics['Skating Ratio'][key] = [item] else: all_metrics['Skating Ratio'][key] += [item] for key, item in mat_score_dict.items(): if key not in all_metrics['Matching Score']: all_metrics['Matching Score'][key] = [item] else: all_metrics['Matching Score'][key] += [item] for key, item in R_precision_dict.items(): if key not in all_metrics['R_precision']: all_metrics['R_precision'][key] = [item] else: all_metrics['R_precision'][key] += [item] for key, item in fid_score_dict.items(): if key not in all_metrics['FID']: all_metrics['FID'][key] = [item] else: all_metrics['FID'][key] += [item] for key, item in div_score_dict.items(): if key not in all_metrics['Diversity']: all_metrics['Diversity'][key] = [item] else: all_metrics['Diversity'][key] += [item] if run_mm: for key, item in mm_score_dict.items(): if key not in all_metrics['MultiModality']: all_metrics['MultiModality'][key] = [item] else: all_metrics['MultiModality'][key] += [item] # print(all_metrics['Diversity']) mean_dict = {} for metric_name, metric_dict in all_metrics.items(): print('========== %s Summary ==========' % metric_name) print('========== %s Summary ==========' % metric_name, file=f, flush=True) for model_name, values in metric_dict.items(): # print(metric_name, model_name) mean, conf_interval = get_metric_statistics(np.array(values), replication_times) mean_dict[metric_name + '_' + model_name] = mean # print(mean, mean.dtype) if isinstance(mean, np.float64) or isinstance(mean, np.float32): print(f'---> [{model_name}] Mean: {mean:.4f} CInterval: {conf_interval:.4f}') print(f'---> [{model_name}] Mean: {mean:.4f} CInterval: {conf_interval:.4f}', file=f, flush=True) elif metric_name == 'Trajectory Error': traj_err_key = ["traj_fail_20cm", "traj_fail_50cm", "loc_fail_20cm", "loc_fail_50cm", "avg_err(m)"] line = f'---> [{model_name}]' print(line) print(line, file=f, flush=True) line = '' for i in range(len(mean)): # zip(traj_err_key, mean): line += ' (%s): Mean: %.4f CInt: %.4f; \n' % (traj_err_key[i], mean[i], conf_interval[i]) print(line) print(line, file=f, flush=True) elif isinstance(mean, np.ndarray): line = f'---> [{model_name}]' for i in range(len(mean)): line += '(top %d) Mean: %.4f CInt: %.4f;' % (i+1, mean[i], conf_interval[i]) print(line) print(line, file=f, flush=True) return mean_dict if __name__ == '__main__': args = evaluation_inpainting_parser() assert args.multi_person == False, 'multi-person is not supported for this script' assert args.guidance_param == 2.5 fixseed(args.seed) args.batch_size = 32 # This must be 32! Don't change it! otherwise it will cause a bug in R precision calc! model_name = os.path.basename(os.path.dirname(args.model_path)) niter = os.path.basename(args.model_path).replace('model', '').replace('.pt', '') dataset_name = args.dataset #log_file = os.path.join(os.path.dirname(args.model_path), 'eval_{}_{}_{}'.format(dataset_name, model_name, niter)) log_file = os.path.join(os.path.dirname(args.model_path), 'eval_niter_' + str(int(niter)) +'_'+ args.control_joint) assert args.inpainting_mask == 'global_joint', "This script only supports global_joint inpainting!" log_file += f'_mask{args.mask_ratio}' log_file += f'_bfgs_first{args.bfgs_times_first}_last{args.bfgs_times_last}_skip{args.bfgs_interval}' if args.use_posterior: log_file += '_posterior' else: log_file += '_x0' log_file += f'_{args.eval_mode}' log_file += '.log' print(f'Will save to log file [{log_file}]') assert args.overwrite or not os.path.exists(log_file), "Log file already exists!" print(f'Eval mode [{args.eval_mode}]') if args.eval_mode == 'debug': num_samples_limit = 1000 # None means no limit (eval over all dataset) run_mm = False mm_num_samples = 0 mm_num_repeats = 0 mm_num_times = 0 diversity_times = 300 replication_times = 5 # about 3 Hrs elif args.eval_mode == 'wo_mm': num_samples_limit = 1000 run_mm = False mm_num_samples = 0 mm_num_repeats = 0 mm_num_times = 0 diversity_times = 300 replication_times = 20 # about 12 Hrs elif args.eval_mode == 'mm_short': num_samples_limit = 1000 run_mm = True mm_num_samples = 100 mm_num_repeats = 30 mm_num_times = 10 diversity_times = 300 replication_times = 5 # about 15 Hrs else: raise ValueError() replication_times = replication_times if args.replication_times is None else args.replication_times dist_util.setup_dist(args.device) logger.configure() logger.log("creating data loader...") split = 'test' gt_loader = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=None, split=split, load_mode='gt') gen_loader = get_dataset_loader(name=args.dataset, batch_size=args.batch_size, num_frames=None, split=split, load_mode='eval') num_actions = gen_loader.dataset.num_actions logger.log("Creating model and diffusion...") DiffusionClass = ControlGaussianDiffusion if args.filter_noise else SpacedDiffusion model, diffusion = load_controlmdm_and_diffusion(args, gen_loader, dist_util.dev(), ModelClass=ControlMDM, DiffusionClass=DiffusionClass) diffusion.mean = gen_loader.dataset.t2m_dataset.mean diffusion.std = gen_loader.dataset.t2m_dataset.std eval_motion_loaders = { ################ ## HumanML3D Dataset## ################ 'vald': lambda: get_mdm_loader( args, model, diffusion, args.batch_size, gen_loader, mm_num_samples, mm_num_repeats, gt_loader.dataset.opt.max_motion_length, num_samples_limit, args.guidance_param ) }
eval_wrapper = EvaluatorMDMWrapper(args.dataset, dist_util.dev())
5
2023-11-27 05:28:02+00:00
24k
camenduru/magicanimate-hf
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n # for controlnet\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n # config[\"mid_block_type\"] = \"UNetMidBlock3DCrossAttn\"\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "ControlNetModel", "path": "magicanimate/models/controlnet.py", "snippet": "class ControlNetModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n in_channels: int = 4,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"DownBlock2D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n projection_class_embeddings_input_dim: Optional[int] = None,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n ):\n super().__init__()\n\n # Check inputs\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # control net conditioning embedding\n self.controlnet_cond_embedding = ControlNetConditioningEmbedding(\n conditioning_embedding_channels=block_out_channels[0],\n block_out_channels=conditioning_embedding_out_channels,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.controlnet_down_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[i],\n downsample_padding=downsample_padding,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n self.down_blocks.append(down_block)\n\n for _ in range(layers_per_block):\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n if not is_final_block:\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n # mid\n mid_block_channel = block_out_channels[-1]\n\n controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_mid_block = controlnet_block\n\n self.mid_block = UNetMidBlock2DCrossAttn(\n in_channels=mid_block_channel,\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n\n @classmethod\n def from_unet(\n cls,\n unet: UNet2DConditionModel,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n load_weights_from_unet: bool = True,\n ):\n r\"\"\"\n Instantiate Controlnet class from UNet2DConditionModel.\n\n Parameters:\n unet (`UNet2DConditionModel`):\n UNet model which weights are copied to the ControlNet. Note that all configuration options are also\n copied where applicable.\n \"\"\"\n controlnet = cls(\n in_channels=unet.config.in_channels,\n flip_sin_to_cos=unet.config.flip_sin_to_cos,\n freq_shift=unet.config.freq_shift,\n down_block_types=unet.config.down_block_types,\n only_cross_attention=unet.config.only_cross_attention,\n block_out_channels=unet.config.block_out_channels,\n layers_per_block=unet.config.layers_per_block,\n downsample_padding=unet.config.downsample_padding,\n mid_block_scale_factor=unet.config.mid_block_scale_factor,\n act_fn=unet.config.act_fn,\n norm_num_groups=unet.config.norm_num_groups,\n norm_eps=unet.config.norm_eps,\n cross_attention_dim=unet.config.cross_attention_dim,\n attention_head_dim=unet.config.attention_head_dim,\n use_linear_projection=unet.config.use_linear_projection,\n class_embed_type=unet.config.class_embed_type,\n num_class_embeds=unet.config.num_class_embeds,\n upcast_attention=unet.config.upcast_attention,\n resnet_time_scale_shift=unet.config.resnet_time_scale_shift,\n projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,\n controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,\n conditioning_embedding_out_channels=conditioning_embedding_out_channels,\n )\n\n if load_weights_from_unet:\n controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())\n controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())\n controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())\n\n if controlnet.class_embedding:\n controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())\n\n controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())\n controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())\n\n return controlnet\n\n # @property\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors\n # def attn_processors(self) -> Dict[str, AttentionProcessor]:\n # r\"\"\"\n # Returns:\n # `dict` of attention processors: A dictionary containing all attention processors used in the model with\n # indexed by its weight name.\n # \"\"\"\n # # set recursively\n # processors = {}\n\n # def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n # if hasattr(module, \"set_processor\"):\n # processors[f\"{name}.processor\"] = module.processor\n\n # for sub_name, child in module.named_children():\n # fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n # return processors\n\n # for name, module in self.named_children():\n # fn_recursive_add_processors(name, module, processors)\n\n # return processors\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor\n # def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):\n # r\"\"\"\n # Parameters:\n # `processor (`dict` of `AttentionProcessor` or `AttentionProcessor`):\n # The instantiated processor class or a dictionary of processor classes that will be set as the processor\n # of **all** `Attention` layers.\n # In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.:\n\n # \"\"\"\n # count = len(self.attn_processors.keys())\n\n # if isinstance(processor, dict) and len(processor) != count:\n # raise ValueError(\n # f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n # f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n # )\n\n # def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n # if hasattr(module, \"set_processor\"):\n # if not isinstance(processor, dict):\n # module.set_processor(processor)\n # else:\n # module.set_processor(processor.pop(f\"{name}.processor\"))\n\n # for sub_name, child in module.named_children():\n # fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n # for name, module in self.named_children():\n # fn_recursive_attn_processor(name, module, processor)\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor\n # def set_default_attn_processor(self):\n # \"\"\"\n # Disables custom attention processors and sets the default attention implementation.\n # \"\"\"\n # self.set_attn_processor(AttnProcessor())\n\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maximum amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_sliceable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_sliceable_dims(module)\n\n num_sliceable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_sliceable_layers * [1]\n\n slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n controlnet_cond: torch.FloatTensor,\n conditioning_scale: float = 1.0,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> Union[ControlNetOutput, Tuple]:\n # check channel order\n channel_order = self.config.controlnet_conditioning_channel_order\n\n if channel_order == \"rgb\":\n # in rgb order by default\n ...\n elif channel_order == \"bgr\":\n controlnet_cond = torch.flip(controlnet_cond, dims=[1])\n else:\n raise ValueError(f\"unknown `controlnet_conditioning_channel_order`: {channel_order}\")\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)\n\n sample += controlnet_cond\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n\n # 5. Control net blocks\n\n controlnet_down_block_res_samples = ()\n\n for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):\n down_block_res_sample = controlnet_block(down_block_res_sample)\n controlnet_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = controlnet_down_block_res_samples\n\n mid_block_res_sample = self.controlnet_mid_block(sample)\n\n # 6. scaling\n down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]\n mid_block_res_sample *= conditioning_scale\n\n if not return_dict:\n return (down_block_res_samples, mid_block_res_sample)\n\n return ControlNetOutput(\n down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample\n )" }, { "identifier": "ReferenceAttentionControl", "path": "magicanimate/models/mutual_self_attention.py", "snippet": "class ReferenceAttentionControl():\n \n def __init__(self, \n unet,\n mode=\"write\",\n do_classifier_free_guidance=False,\n attention_auto_machine_weight = float('inf'),\n gn_auto_machine_weight = 1.0,\n style_fidelity = 1.0,\n reference_attn=True,\n reference_adain=False,\n fusion_blocks=\"midup\",\n batch_size=1, \n ) -> None:\n # 10. Modify self attention and group norm\n self.unet = unet\n assert mode in [\"read\", \"write\"]\n assert fusion_blocks in [\"midup\", \"full\"]\n self.reference_attn = reference_attn\n self.reference_adain = reference_adain\n self.fusion_blocks = fusion_blocks\n self.register_reference_hooks(\n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n fusion_blocks,\n batch_size=batch_size, \n )\n\n def register_reference_hooks(\n self, \n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n dtype=torch.float16,\n batch_size=1, \n num_images_per_prompt=1, \n device=torch.device(\"cpu\"), \n fusion_blocks='midup',\n ):\n MODE = mode\n do_classifier_free_guidance = do_classifier_free_guidance\n attention_auto_machine_weight = attention_auto_machine_weight\n gn_auto_machine_weight = gn_auto_machine_weight\n style_fidelity = style_fidelity\n reference_attn = reference_attn\n reference_adain = reference_adain\n fusion_blocks = fusion_blocks\n num_images_per_prompt = num_images_per_prompt\n dtype=dtype\n if do_classifier_free_guidance:\n uc_mask = (\n torch.Tensor([1] * batch_size * num_images_per_prompt * 16 + [0] * batch_size * num_images_per_prompt * 16)\n .to(device)\n .bool()\n )\n else:\n uc_mask = (\n torch.Tensor([0] * batch_size * num_images_per_prompt * 2)\n .to(device)\n .bool()\n )\n \n def hacked_basic_transformer_inner_forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n video_length=None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n if self.only_cross_attention:\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n else:\n if MODE == \"write\":\n self.bank.append(norm_hidden_states.clone())\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if MODE == \"read\":\n self.bank = [rearrange(d.unsqueeze(1).repeat(1, video_length, 1, 1), \"b t l c -> (b t) l c\")[:hidden_states.shape[0]] for d in self.bank]\n hidden_states_uc = self.attn1(norm_hidden_states, \n encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),\n attention_mask=attention_mask) + hidden_states\n hidden_states_c = hidden_states_uc.clone()\n _uc_mask = uc_mask.clone()\n if do_classifier_free_guidance:\n if hidden_states.shape[0] != _uc_mask.shape[0]:\n _uc_mask = (\n torch.Tensor([1] * (hidden_states.shape[0]//2) + [0] * (hidden_states.shape[0]//2))\n .to(device)\n .bool()\n )\n hidden_states_c[_uc_mask] = self.attn1(\n norm_hidden_states[_uc_mask],\n encoder_hidden_states=norm_hidden_states[_uc_mask],\n attention_mask=attention_mask,\n ) + hidden_states[_uc_mask]\n hidden_states = hidden_states_c.clone()\n \n self.bank.clear()\n if self.attn2 is not None:\n # Cross-Attention\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n hidden_states = (\n self.attn2(\n norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n + hidden_states\n )\n\n # Feed-forward\n hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states\n\n # Temporal-Attention\n if self.unet_use_temporal_attention:\n d = hidden_states.shape[1]\n hidden_states = rearrange(hidden_states, \"(b f) d c -> (b d) f c\", f=video_length)\n norm_hidden_states = (\n self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)\n )\n hidden_states = self.attn_temp(norm_hidden_states) + hidden_states\n hidden_states = rearrange(hidden_states, \"(b d) f c -> (b f) d c\", d=d)\n\n return hidden_states\n \n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states\n\n def hacked_mid_forward(self, *args, **kwargs):\n eps = 1e-6\n x = self.original_forward(*args, **kwargs)\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append(mean)\n self.var_bank.append(var)\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))\n var_acc = sum(self.var_bank) / float(len(self.var_bank))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n x_uc = (((x - mean) / std) * std_acc) + mean_acc\n x_c = x_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n x_c[uc_mask] = x[uc_mask]\n x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc\n self.mean_bank = []\n self.var_bank = []\n return x\n\n def hack_CrossAttnDownBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_DownBlock2D_forward(self, hidden_states, temb=None):\n eps = 1e-6\n\n output_states = ()\n\n for i, resnet in enumerate(self.resnets):\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_CrossAttnUpBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n # TODO(Patrick, William) - attention mask is not used\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):\n eps = 1e-6\n for i, resnet in enumerate(self.resnets):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)] \n attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n\n for i, module in enumerate(attn_modules):\n module._original_inner_forward = module.forward\n module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)\n module.bank = []\n module.attn_weight = float(i) / float(len(attn_modules))\n\n if self.reference_adain:\n gn_modules = [self.unet.mid_block]\n self.unet.mid_block.gn_weight = 0\n\n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n module.gn_weight = 1.0 - float(w) / float(len(down_blocks))\n gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n module.gn_weight = float(w) / float(len(up_blocks))\n gn_modules.append(module)\n\n for i, module in enumerate(gn_modules):\n if getattr(module, \"original_forward\", None) is None:\n module.original_forward = module.forward\n if i == 0:\n # mid_block\n module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)\n elif isinstance(module, CrossAttnDownBlock2D):\n module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)\n elif isinstance(module, DownBlock2D):\n module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)\n elif isinstance(module, CrossAttnUpBlock2D):\n module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)\n elif isinstance(module, UpBlock2D):\n module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)\n module.mean_bank = []\n module.var_bank = []\n module.gn_weight *= 2\n \n def update(self, writer, dtype=torch.float16):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in (torch_dfs(writer.unet.mid_block)+torch_dfs(writer.unet.up_blocks)) if isinstance(module, BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in torch_dfs(writer.unet) if isinstance(module, BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) \n writer_attn_modules = sorted(writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r, w in zip(reader_attn_modules, writer_attn_modules):\n r.bank = [v.clone().to(dtype) for v in w.bank]\n # w.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n writer_gn_modules = [writer.unet.mid_block]\n \n down_blocks = writer.unet.down_blocks\n for w, module in enumerate(down_blocks):\n writer_gn_modules.append(module)\n\n up_blocks = writer.unet.up_blocks\n for w, module in enumerate(up_blocks):\n writer_gn_modules.append(module)\n \n for r, w in zip(reader_gn_modules, writer_gn_modules):\n if len(w.mean_bank) > 0 and isinstance(w.mean_bank[0], list):\n r.mean_bank = [[v.clone().to(dtype) for v in vl] for vl in w.mean_bank]\n r.var_bank = [[v.clone().to(dtype) for v in vl] for vl in w.var_bank]\n else:\n r.mean_bank = [v.clone().to(dtype) for v in w.mean_bank]\n r.var_bank = [v.clone().to(dtype) for v in w.var_bank]\n \n def clear(self):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r in reader_attn_modules:\n r.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n for r in reader_gn_modules:\n r.mean_bank.clear()\n r.var_bank.clear()" }, { "identifier": "get_context_scheduler", "path": "magicanimate/pipelines/context.py", "snippet": "def get_context_scheduler(name: str) -> Callable:\n if name == \"uniform\":\n return uniform\n else:\n raise ValueError(f\"Unknown context_overlap policy {name}\")" }, { "identifier": "get_total_steps", "path": "magicanimate/pipelines/context.py", "snippet": "def get_total_steps(\n scheduler,\n timesteps: List[int],\n num_steps: Optional[int] = None,\n num_frames: int = ...,\n context_size: Optional[int] = None,\n context_stride: int = 3,\n context_overlap: int = 4,\n closed_loop: bool = True,\n):\n return sum(\n len(\n list(\n scheduler(\n i,\n num_steps,\n num_frames,\n context_size,\n context_stride,\n context_overlap,\n )\n )\n )\n for i in range(len(timesteps))\n )" }, { "identifier": "get_tensor_interpolation_method", "path": "magicanimate/utils/util.py", "snippet": "def get_tensor_interpolation_method():\n return tensor_interpolation" } ]
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.controlnet import ControlNetModel from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
19,383
verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate:
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel, controlnet: ControlNetModel, scheduler: Union[ DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler, EulerDiscreteScheduler, EulerAncestralDiscreteScheduler, DPMSolverMultistepScheduler, ], ): super().__init__() if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`" f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure " "to update the config accordingly as leaving `steps_offset` might led to incorrect results" " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub," " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`" " file" ) deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["steps_offset"] = 1 scheduler._internal_dict = FrozenDict(new_config) if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True: deprecation_message = ( f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`." " `clip_sample` should be set to False in the configuration file. Please make sure to update the" " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in" " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very" " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file" ) deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(scheduler.config) new_config["clip_sample"] = False scheduler._internal_dict = FrozenDict(new_config) is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse( version.parse(unet.config._diffusers_version).base_version ) < version.parse("0.9.0.dev0") is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64 if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64: deprecation_message = ( "The configuration file of the unet has set the default `sample_size` to smaller than" " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the" " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-" " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5" " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the" " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`" " in the config might lead to incorrect results in future versions. If you have downloaded this" " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for" " the `unet/config.json` file" ) deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False) new_config = dict(unet.config) new_config["sample_size"] = 64 unet._internal_dict = FrozenDict(new_config) self.register_modules( vae=vae, text_encoder=text_encoder, tokenizer=tokenizer, unet=unet, controlnet=controlnet, scheduler=scheduler, ) self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) def enable_vae_slicing(self): self.vae.enable_slicing() def disable_vae_slicing(self): self.vae.disable_slicing() def enable_sequential_cpu_offload(self, gpu_id=0): if is_accelerate_available(): else: raise ImportError("Please install accelerate via `pip install accelerate`") device = torch.device(f"cuda:{gpu_id}") for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae]: if cpu_offloaded_model is not None: cpu_offload(cpu_offloaded_model, device) @property def _execution_device(self): if self.device != torch.device("meta") or not hasattr(self.unet, "_hf_hook"): return self.device for module in self.unet.modules(): if ( hasattr(module, "_hf_hook") and hasattr(module._hf_hook, "execution_device") and module._hf_hook.execution_device is not None ): return torch.device(module._hf_hook.execution_device) return self.device def _encode_prompt(self, prompt, device, num_videos_per_prompt, do_classifier_free_guidance, negative_prompt): batch_size = len(prompt) if isinstance(prompt, list) else 1 text_inputs = self.tokenizer( prompt, padding="max_length", max_length=self.tokenizer.model_max_length, truncation=True, return_tensors="pt", ) text_input_ids = text_inputs.input_ids untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids): removed_text = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]) logger.warning( "The following part of your input was truncated because CLIP can only handle sequences up to" f" {self.tokenizer.model_max_length} tokens: {removed_text}" ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = text_inputs.attention_mask.to(device) else: attention_mask = None text_embeddings = self.text_encoder( text_input_ids.to(device), attention_mask=attention_mask, ) text_embeddings = text_embeddings[0] # duplicate text embeddings for each generation per prompt, using mps friendly method bs_embed, seq_len, _ = text_embeddings.shape text_embeddings = text_embeddings.repeat(1, num_videos_per_prompt, 1) text_embeddings = text_embeddings.view(bs_embed * num_videos_per_prompt, seq_len, -1) # get unconditional embeddings for classifier free guidance if do_classifier_free_guidance: uncond_tokens: List[str] if negative_prompt is None: uncond_tokens = [""] * batch_size elif type(prompt) is not type(negative_prompt): raise TypeError( f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" f" {type(prompt)}." ) elif isinstance(negative_prompt, str): uncond_tokens = [negative_prompt] elif batch_size != len(negative_prompt): raise ValueError( f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" " the batch size of `prompt`." ) else: uncond_tokens = negative_prompt max_length = text_input_ids.shape[-1] uncond_input = self.tokenizer( uncond_tokens, padding="max_length", max_length=max_length, truncation=True, return_tensors="pt", ) if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask: attention_mask = uncond_input.attention_mask.to(device) else: attention_mask = None uncond_embeddings = self.text_encoder( uncond_input.input_ids.to(device), attention_mask=attention_mask, ) uncond_embeddings = uncond_embeddings[0] # duplicate unconditional embeddings for each generation per prompt, using mps friendly method seq_len = uncond_embeddings.shape[1] uncond_embeddings = uncond_embeddings.repeat(1, num_videos_per_prompt, 1) uncond_embeddings = uncond_embeddings.view(batch_size * num_videos_per_prompt, seq_len, -1) # For classifier free guidance, we need to do two forward passes. # Here we concatenate the unconditional and text embeddings into a single batch # to avoid doing two forward passes text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) return text_embeddings def decode_latents(self, latents, rank, decoder_consistency=None): video_length = latents.shape[2] latents = 1 / 0.18215 * latents latents = rearrange(latents, "b c f h w -> (b f) c h w") # video = self.vae.decode(latents).sample video = [] for frame_idx in tqdm(range(latents.shape[0]), disable=(rank!=0)): if decoder_consistency is not None: video.append(decoder_consistency(latents[frame_idx:frame_idx+1])) else: video.append(self.vae.decode(latents[frame_idx:frame_idx+1]).sample) video = torch.cat(video) video = rearrange(video, "(b f) c h w -> b c f h w", f=video_length) video = (video / 2 + 0.5).clamp(0, 1) # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 video = video.cpu().float().numpy() return video def prepare_extra_step_kwargs(self, generator, eta): # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 # and should be between [0, 1] accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) extra_step_kwargs = {} if accepts_eta: extra_step_kwargs["eta"] = eta # check if the scheduler accepts generator accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) if accepts_generator: extra_step_kwargs["generator"] = generator return extra_step_kwargs def check_inputs(self, prompt, height, width, callback_steps): if not isinstance(prompt, str) and not isinstance(prompt, list): raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") if height % 8 != 0 or width % 8 != 0: raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") if (callback_steps is None) or ( callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) ): raise ValueError( f"`callback_steps` has to be a positive integer but is {callback_steps} of type" f" {type(callback_steps)}." ) def prepare_latents(self, batch_size, num_channels_latents, video_length, height, width, dtype, device, generator, latents=None, clip_length=16): shape = (batch_size, num_channels_latents, clip_length, height // self.vae_scale_factor, width // self.vae_scale_factor) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." ) if latents is None: rand_device = "cpu" if device.type == "mps" else device if isinstance(generator, list): latents = [ torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype) for i in range(batch_size) ] latents = torch.cat(latents, dim=0).to(device) else: latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype).to(device) latents = latents.repeat(1, 1, video_length//clip_length, 1, 1) else: if latents.shape != shape: raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") latents = latents.to(device) # scale the initial noise by the standard deviation required by the scheduler latents = latents * self.scheduler.init_noise_sigma return latents def prepare_condition(self, condition, num_videos_per_prompt, device, dtype, do_classifier_free_guidance): # prepare conditions for controlnet condition = torch.from_numpy(condition.copy()).to(device=device, dtype=dtype) / 255.0 condition = torch.stack([condition for _ in range(num_videos_per_prompt)], dim=0) condition = rearrange(condition, 'b f h w c -> (b f) c h w').clone() if do_classifier_free_guidance: condition = torch.cat([condition] * 2) return condition def next_step( self, model_output: torch.FloatTensor, timestep: int, x: torch.FloatTensor, eta=0., verbose=False ): """ Inverse sampling for DDIM Inversion """ if verbose: print("timestep: ", timestep) next_step = timestep timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999) alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod alpha_prod_t_next = self.scheduler.alphas_cumprod[next_step] beta_prod_t = 1 - alpha_prod_t pred_x0 = (x - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 pred_dir = (1 - alpha_prod_t_next)**0.5 * model_output x_next = alpha_prod_t_next**0.5 * pred_x0 + pred_dir return x_next, pred_x0 @torch.no_grad() def images2latents(self, images, dtype): """ Convert RGB image to VAE latents """ device = self._execution_device images = torch.from_numpy(images).float().to(dtype) / 127.5 - 1 images = rearrange(images, "f h w c -> f c h w").to(device) latents = [] for frame_idx in range(images.shape[0]): latents.append(self.vae.encode(images[frame_idx:frame_idx+1])['latent_dist'].mean * 0.18215) latents = torch.cat(latents) return latents @torch.no_grad() def invert( self, image: torch.Tensor, prompt, num_inference_steps=20, num_actual_inference_steps=10, eta=0.0, return_intermediates=False, **kwargs): """ Adapted from: https://github.com/Yujun-Shi/DragDiffusion/blob/main/drag_pipeline.py#L440 invert a real image into noise map with determinisc DDIM inversion """ device = self._execution_device batch_size = image.shape[0] if isinstance(prompt, list): if batch_size == 1: image = image.expand(len(prompt), -1, -1, -1) elif isinstance(prompt, str): if batch_size > 1: prompt = [prompt] * batch_size # text embeddings text_input = self.tokenizer( prompt, padding="max_length", max_length=77, return_tensors="pt" ) text_embeddings = self.text_encoder(text_input.input_ids.to(device))[0] print("input text embeddings :", text_embeddings.shape) # define initial latents latents = self.images2latents(image) print("latents shape: ", latents.shape) # interative sampling self.scheduler.set_timesteps(num_inference_steps) print("Valid timesteps: ", reversed(self.scheduler.timesteps)) latents_list = [latents] pred_x0_list = [latents] for i, t in enumerate(tqdm(reversed(self.scheduler.timesteps), desc="DDIM Inversion")): if num_actual_inference_steps is not None and i >= num_actual_inference_steps: continue model_inputs = latents # predict the noise # NOTE: the u-net here is UNet3D, therefore the model_inputs need to be of shape (b c f h w) model_inputs = rearrange(model_inputs, "f c h w -> 1 c f h w") noise_pred = self.unet(model_inputs, t, encoder_hidden_states=text_embeddings).sample noise_pred = rearrange(noise_pred, "b c f h w -> (b f) c h w") # compute the previous noise sample x_t-1 -> x_t latents, pred_x0 = self.next_step(noise_pred, t, latents) latents_list.append(latents) pred_x0_list.append(pred_x0) if return_intermediates: # return the intermediate laters during inversion return latents, latents_list return latents def interpolate_latents(self, latents: torch.Tensor, interpolation_factor:int, device ): if interpolation_factor < 2: return latents new_latents = torch.zeros( (latents.shape[0],latents.shape[1],((latents.shape[2]-1) * interpolation_factor)+1, latents.shape[3],latents.shape[4]), device=latents.device, dtype=latents.dtype, ) org_video_length = latents.shape[2] rate = [i/interpolation_factor for i in range(interpolation_factor)][1:] new_index = 0 v0 = None v1 = None for i0,i1 in zip( range( org_video_length ),range( org_video_length )[1:] ): v0 = latents[:,:,i0,:,:] v1 = latents[:,:,i1,:,:] new_latents[:,:,new_index,:,:] = v0 new_index += 1 for f in rate:
v = get_tensor_interpolation_method()(v0.to(device=device),v1.to(device=device),f)
5
2023-12-04 20:47:34+00:00
24k
metatube-community/metatube-plex-plugins
MetaTube.bundle/Contents/Libraries/Shared/urllib3/poolmanager.py
[ { "identifier": "HTTPHeaderDict", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class HTTPHeaderDict(MutableMapping):\n \"\"\"\n :param headers:\n An iterable of field-value pairs. Must not contain multiple field names\n when compared case-insensitively.\n\n :param kwargs:\n Additional field-value pairs to pass in to ``dict.update``.\n\n A ``dict`` like container for storing HTTP Headers.\n\n Field names are stored and compared case-insensitively in compliance with\n RFC 7230. Iteration provides the first case-sensitive key seen for each\n case-insensitive pair.\n\n Using ``__setitem__`` syntax overwrites fields that compare equal\n case-insensitively in order to maintain ``dict``'s api. For fields that\n compare equal, instead create a new ``HTTPHeaderDict`` and use ``.add``\n in a loop.\n\n If multiple fields that are equal case-insensitively are passed to the\n constructor or ``.update``, the behavior is undefined and some will be\n lost.\n\n >>> headers = HTTPHeaderDict()\n >>> headers.add('Set-Cookie', 'foo=bar')\n >>> headers.add('set-cookie', 'baz=quxx')\n >>> headers['content-length'] = '7'\n >>> headers['SET-cookie']\n 'foo=bar, baz=quxx'\n >>> headers['Content-Length']\n '7'\n \"\"\"\n\n def __init__(self, headers=None, **kwargs):\n super(HTTPHeaderDict, self).__init__()\n self._container = OrderedDict()\n if headers is not None:\n if isinstance(headers, HTTPHeaderDict):\n self._copy_from(headers)\n else:\n self.extend(headers)\n if kwargs:\n self.extend(kwargs)\n\n def __setitem__(self, key, val):\n self._container[key.lower()] = [key, val]\n return self._container[key.lower()]\n\n def __getitem__(self, key):\n val = self._container[key.lower()]\n return \", \".join(val[1:])\n\n def __delitem__(self, key):\n del self._container[key.lower()]\n\n def __contains__(self, key):\n return key.lower() in self._container\n\n def __eq__(self, other):\n if not isinstance(other, Mapping) and not hasattr(other, \"keys\"):\n return False\n if not isinstance(other, type(self)):\n other = type(self)(other)\n return dict((k.lower(), v) for k, v in self.itermerged()) == dict(\n (k.lower(), v) for k, v in other.itermerged()\n )\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n if six.PY2: # Python 2\n iterkeys = MutableMapping.iterkeys\n itervalues = MutableMapping.itervalues\n\n __marker = object()\n\n def __len__(self):\n return len(self._container)\n\n def __iter__(self):\n # Only provide the originally cased names\n for vals in self._container.values():\n yield vals[0]\n\n def pop(self, key, default=__marker):\n \"\"\"D.pop(k[,d]) -> v, remove specified key and return the corresponding value.\n If key is not found, d is returned if given, otherwise KeyError is raised.\n \"\"\"\n # Using the MutableMapping function directly fails due to the private marker.\n # Using ordinary dict.pop would expose the internal structures.\n # So let's reinvent the wheel.\n try:\n value = self[key]\n except KeyError:\n if default is self.__marker:\n raise\n return default\n else:\n del self[key]\n return value\n\n def discard(self, key):\n try:\n del self[key]\n except KeyError:\n pass\n\n def add(self, key, val):\n \"\"\"Adds a (name, value) pair, doesn't overwrite the value if it already\n exists.\n\n >>> headers = HTTPHeaderDict(foo='bar')\n >>> headers.add('Foo', 'baz')\n >>> headers['foo']\n 'bar, baz'\n \"\"\"\n key_lower = key.lower()\n new_vals = [key, val]\n # Keep the common case aka no item present as fast as possible\n vals = self._container.setdefault(key_lower, new_vals)\n if new_vals is not vals:\n vals.append(val)\n\n def extend(self, *args, **kwargs):\n \"\"\"Generic import function for any type of header-like object.\n Adapted version of MutableMapping.update in order to insert items\n with self.add instead of self.__setitem__\n \"\"\"\n if len(args) > 1:\n raise TypeError(\n \"extend() takes at most 1 positional \"\n \"arguments ({0} given)\".format(len(args))\n )\n other = args[0] if len(args) >= 1 else ()\n\n if isinstance(other, HTTPHeaderDict):\n for key, val in other.iteritems():\n self.add(key, val)\n elif isinstance(other, Mapping):\n for key in other:\n self.add(key, other[key])\n elif hasattr(other, \"keys\"):\n for key in other.keys():\n self.add(key, other[key])\n else:\n for key, value in other:\n self.add(key, value)\n\n for key, value in kwargs.items():\n self.add(key, value)\n\n def getlist(self, key, default=__marker):\n \"\"\"Returns a list of all the values for the named field. Returns an\n empty list if the key doesn't exist.\"\"\"\n try:\n vals = self._container[key.lower()]\n except KeyError:\n if default is self.__marker:\n return []\n return default\n else:\n return vals[1:]\n\n def _prepare_for_method_change(self):\n \"\"\"\n Remove content-specific header fields before changing the request\n method to GET or HEAD according to RFC 9110, Section 15.4.\n \"\"\"\n content_specific_headers = [\n \"Content-Encoding\",\n \"Content-Language\",\n \"Content-Location\",\n \"Content-Type\",\n \"Content-Length\",\n \"Digest\",\n \"Last-Modified\",\n ]\n for header in content_specific_headers:\n self.discard(header)\n return self\n\n # Backwards compatibility for httplib\n getheaders = getlist\n getallmatchingheaders = getlist\n iget = getlist\n\n # Backwards compatibility for http.cookiejar\n get_all = getlist\n\n def __repr__(self):\n return \"%s(%s)\" % (type(self).__name__, dict(self.itermerged()))\n\n def _copy_from(self, other):\n for key in other:\n val = other.getlist(key)\n if isinstance(val, list):\n # Don't need to convert tuples\n val = list(val)\n self._container[key.lower()] = [key] + val\n\n def copy(self):\n clone = type(self)()\n clone._copy_from(self)\n return clone\n\n def iteritems(self):\n \"\"\"Iterate over all header lines, including duplicate ones.\"\"\"\n for key in self:\n vals = self._container[key.lower()]\n for val in vals[1:]:\n yield vals[0], val\n\n def itermerged(self):\n \"\"\"Iterate over all headers, merging duplicate ones together.\"\"\"\n for key in self:\n val = self._container[key.lower()]\n yield val[0], \", \".join(val[1:])\n\n def items(self):\n return list(self.iteritems())\n\n @classmethod\n def from_httplib(cls, message): # Python 2\n \"\"\"Read headers from a Python 2 httplib message object.\"\"\"\n # python2.7 does not expose a proper API for exporting multiheaders\n # efficiently. This function re-reads raw lines from the message\n # object and extracts the multiheaders properly.\n obs_fold_continued_leaders = (\" \", \"\\t\")\n headers = []\n\n for line in message.headers:\n if line.startswith(obs_fold_continued_leaders):\n if not headers:\n # We received a header line that starts with OWS as described\n # in RFC-7230 S3.2.4. This indicates a multiline header, but\n # there exists no previous header to which we can attach it.\n raise InvalidHeader(\n \"Header continuation with no previous header: %s\" % line\n )\n else:\n key, value = headers[-1]\n headers[-1] = (key, value + \" \" + line.strip())\n continue\n\n key, value = line.split(\":\", 1)\n headers.append((key, value.strip()))\n\n return cls(headers)" }, { "identifier": "RecentlyUsedContainer", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/_collections.py", "snippet": "class RecentlyUsedContainer(MutableMapping):\n \"\"\"\n Provides a thread-safe dict-like container which maintains up to\n ``maxsize`` keys while throwing away the least-recently-used keys beyond\n ``maxsize``.\n\n :param maxsize:\n Maximum number of recent elements to retain.\n\n :param dispose_func:\n Every time an item is evicted from the container,\n ``dispose_func(value)`` is called. Callback which will get called\n \"\"\"\n\n ContainerCls = OrderedDict\n\n def __init__(self, maxsize=10, dispose_func=None):\n self._maxsize = maxsize\n self.dispose_func = dispose_func\n\n self._container = self.ContainerCls()\n self.lock = RLock()\n\n def __getitem__(self, key):\n # Re-insert the item, moving it to the end of the eviction line.\n with self.lock:\n item = self._container.pop(key)\n self._container[key] = item\n return item\n\n def __setitem__(self, key, value):\n evicted_value = _Null\n with self.lock:\n # Possibly evict the existing value of 'key'\n evicted_value = self._container.get(key, _Null)\n self._container[key] = value\n\n # If we didn't evict an existing value, we might have to evict the\n # least recently used item from the beginning of the container.\n if len(self._container) > self._maxsize:\n _key, evicted_value = self._container.popitem(last=False)\n\n if self.dispose_func and evicted_value is not _Null:\n self.dispose_func(evicted_value)\n\n def __delitem__(self, key):\n with self.lock:\n value = self._container.pop(key)\n\n if self.dispose_func:\n self.dispose_func(value)\n\n def __len__(self):\n with self.lock:\n return len(self._container)\n\n def __iter__(self):\n raise NotImplementedError(\n \"Iteration over this class is unlikely to be threadsafe.\"\n )\n\n def clear(self):\n with self.lock:\n # Copy pointers to all values, then wipe the mapping\n values = list(itervalues(self._container))\n self._container.clear()\n\n if self.dispose_func:\n for value in values:\n self.dispose_func(value)\n\n def keys(self):\n with self.lock:\n return list(iterkeys(self._container))" }, { "identifier": "HTTPConnectionPool", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/connectionpool.py", "snippet": "class ConnectionPool(object):\nclass HTTPConnectionPool(ConnectionPool, RequestMethods):\nclass HTTPSConnectionPool(HTTPConnectionPool):\n def __init__(self, host, port=None):\n def __str__(self):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def close(self):\n def __init__(\n self,\n host,\n port=None,\n strict=False,\n timeout=Timeout.DEFAULT_TIMEOUT,\n maxsize=1,\n block=False,\n headers=None,\n retries=None,\n _proxy=None,\n _proxy_headers=None,\n _proxy_config=None,\n **conn_kw\n ):\n def _new_conn(self):\n def _get_conn(self, timeout=None):\n def _put_conn(self, conn):\n def _validate_conn(self, conn):\n def _prepare_proxy(self, conn):\n def _get_timeout(self, timeout):\n def _raise_timeout(self, err, url, timeout_value):\n def _make_request(\n self, conn, method, url, timeout=_Default, chunked=False, **httplib_request_kw\n ):\n def _absolute_url(self, path):\n def close(self):\n def is_same_host(self, url):\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n retries=None,\n redirect=True,\n assert_same_host=True,\n timeout=_Default,\n pool_timeout=None,\n release_conn=None,\n chunked=False,\n body_pos=None,\n **response_kw\n ):\n def _is_ssl_error_message_from_http_proxy(ssl_error):\n def __init__(\n self,\n host,\n port=None,\n strict=False,\n timeout=Timeout.DEFAULT_TIMEOUT,\n maxsize=1,\n block=False,\n headers=None,\n retries=None,\n _proxy=None,\n _proxy_headers=None,\n key_file=None,\n cert_file=None,\n cert_reqs=None,\n key_password=None,\n ca_certs=None,\n ssl_version=None,\n assert_hostname=None,\n assert_fingerprint=None,\n ca_cert_dir=None,\n **conn_kw\n ):\n def _prepare_conn(self, conn):\n def _prepare_proxy(self, conn):\n def _new_conn(self):\n def _validate_conn(self, conn):\ndef connection_from_url(url, **kw):\ndef _normalize_host(host, scheme):\ndef _close_pool_connections(pool):" }, { "identifier": "LocationValueError", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class LocationValueError(ValueError, HTTPError):\n \"\"\"Raised when there is something wrong with a given URL input.\"\"\"\n\n pass" }, { "identifier": "MaxRetryError", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class MaxRetryError(RequestError):\n \"\"\"Raised when the maximum number of retries is exceeded.\n\n :param pool: The connection pool\n :type pool: :class:`~urllib3.connectionpool.HTTPConnectionPool`\n :param string url: The requested Url\n :param exceptions.Exception reason: The underlying error\n\n \"\"\"\n\n def __init__(self, pool, url, reason=None):\n self.reason = reason\n\n message = \"Max retries exceeded with url: %s (Caused by %r)\" % (url, reason)\n\n RequestError.__init__(self, pool, url, message)" }, { "identifier": "ProxySchemeUnknown", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class ProxySchemeUnknown(AssertionError, URLSchemeUnknown):\n \"\"\"ProxyManager does not support the supplied scheme\"\"\"\n\n # TODO(t-8ch): Stop inheriting from AssertionError in v2.0.\n\n def __init__(self, scheme):\n # 'localhost' is here because our URL parser parses\n # localhost:8080 -> scheme=localhost, remove if we fix this.\n if scheme == \"localhost\":\n scheme = None\n if scheme is None:\n message = \"Proxy URL had no scheme, should start with http:// or https://\"\n else:\n message = (\n \"Proxy URL had unsupported scheme %s, should use http:// or https://\"\n % scheme\n )\n super(ProxySchemeUnknown, self).__init__(message)" }, { "identifier": "ProxySchemeUnsupported", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class ProxySchemeUnsupported(ValueError):\n \"\"\"Fetching HTTPS resources through HTTPS proxies is unsupported\"\"\"\n\n pass" }, { "identifier": "URLSchemeUnknown", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/exceptions.py", "snippet": "class URLSchemeUnknown(LocationValueError):\n \"\"\"Raised when a URL input has an unsupported scheme.\"\"\"\n\n def __init__(self, scheme):\n message = \"Not supported URL scheme %s\" % scheme\n super(URLSchemeUnknown, self).__init__(message)\n\n self.scheme = scheme" }, { "identifier": "six", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/packages/six.py", "snippet": "PY2 = sys.version_info[0] == 2\nPY3 = sys.version_info[0] == 3\nPY34 = sys.version_info[0:2] >= (3, 4)\n MAXSIZE = sys.maxsize\n MAXSIZE = int((1 << 31) - 1)\n MAXSIZE = int((1 << 31) - 1)\n MAXSIZE = int((1 << 63) - 1)\n class X(object):\nclass _LazyDescr(object):\nclass MovedModule(_LazyDescr):\nclass _LazyModule(types.ModuleType):\nclass MovedAttribute(_LazyDescr):\nclass _SixMetaPathImporter(object):\nclass _MovedItems(_LazyModule):\nclass Module_six_moves_urllib_parse(_LazyModule):\nclass Module_six_moves_urllib_error(_LazyModule):\nclass Module_six_moves_urllib_request(_LazyModule):\nclass Module_six_moves_urllib_response(_LazyModule):\nclass Module_six_moves_urllib_robotparser(_LazyModule):\nclass Module_six_moves_urllib(types.ModuleType):\n class Iterator(object):\n class metaclass(type):\n def __len__(self):\ndef _add_doc(func, doc):\ndef _import_module(name):\n def __init__(self, name):\n def __get__(self, obj, tp):\n def __init__(self, name, old, new=None):\n def _resolve(self):\n def __getattr__(self, attr):\n def __init__(self, name):\n def __dir__(self):\n def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):\n def _resolve(self):\n def __init__(self, six_module_name):\n def _add_module(self, mod, *fullnames):\n def _get_module(self, fullname):\n def find_module(self, fullname, path=None):\n def find_spec(self, fullname, path, target=None):\n def __get_module(self, fullname):\n def load_module(self, fullname):\n def is_package(self, fullname):\n def get_code(self, fullname):\n def create_module(self, spec):\n def exec_module(self, module):\n def __dir__(self):\ndef add_move(move):\ndef remove_move(name):\n def advance_iterator(it):\n def callable(obj):\n def get_unbound_function(unbound):\n def create_unbound_method(func, cls):\n def get_unbound_function(unbound):\n def create_bound_method(func, obj):\n def create_unbound_method(func, cls):\n def next(self):\n def iterkeys(d, **kw):\n def itervalues(d, **kw):\n def iteritems(d, **kw):\n def iterlists(d, **kw):\n def iterkeys(d, **kw):\n def itervalues(d, **kw):\n def iteritems(d, **kw):\n def iterlists(d, **kw):\n def b(s):\n def u(s):\n def b(s):\n def u(s):\n def byte2int(bs):\n def indexbytes(buf, i):\ndef assertCountEqual(self, *args, **kwargs):\ndef assertRaisesRegex(self, *args, **kwargs):\ndef assertRegex(self, *args, **kwargs):\ndef assertNotRegex(self, *args, **kwargs):\n def reraise(tp, value, tb=None):\n def exec_(_code_, _globs_=None, _locs_=None):\n def raise_from(value, from_value):\n def print_(*args, **kwargs):\n def write(data):\n def print_(*args, **kwargs):\n def _update_wrapper(\n wrapper,\n wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES,\n ):\n def wraps(\n wrapped,\n assigned=functools.WRAPPER_ASSIGNMENTS,\n updated=functools.WRAPPER_UPDATES,\n ):\ndef with_metaclass(meta, *bases):\n def __new__(cls, name, this_bases, d):\n def __prepare__(cls, name, this_bases):\ndef add_metaclass(metaclass):\n def wrapper(cls):\ndef ensure_binary(s, encoding=\"utf-8\", errors=\"strict\"):\ndef ensure_str(s, encoding=\"utf-8\", errors=\"strict\"):\ndef ensure_text(s, encoding=\"utf-8\", errors=\"strict\"):\ndef python_2_unicode_compatible(klass):" }, { "identifier": "RequestMethods", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/request.py", "snippet": "class RequestMethods(object):\n \"\"\"\n Convenience mixin for classes who implement a :meth:`urlopen` method, such\n as :class:`urllib3.HTTPConnectionPool` and\n :class:`urllib3.PoolManager`.\n\n Provides behavior for making common types of HTTP request methods and\n decides which type of request field encoding to use.\n\n Specifically,\n\n :meth:`.request_encode_url` is for sending requests whose fields are\n encoded in the URL (such as GET, HEAD, DELETE).\n\n :meth:`.request_encode_body` is for sending requests whose fields are\n encoded in the *body* of the request using multipart or www-form-urlencoded\n (such as for POST, PUT, PATCH).\n\n :meth:`.request` is for making any kind of request, it will look up the\n appropriate encoding format and use one of the above two methods to make\n the request.\n\n Initializer parameters:\n\n :param headers:\n Headers to include with all requests, unless other headers are given\n explicitly.\n \"\"\"\n\n _encode_url_methods = {\"DELETE\", \"GET\", \"HEAD\", \"OPTIONS\"}\n\n def __init__(self, headers=None):\n self.headers = headers or {}\n\n def urlopen(\n self,\n method,\n url,\n body=None,\n headers=None,\n encode_multipart=True,\n multipart_boundary=None,\n **kw\n ): # Abstract\n raise NotImplementedError(\n \"Classes extending RequestMethods must implement \"\n \"their own ``urlopen`` method.\"\n )\n\n def request(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the appropriate encoding of\n ``fields`` based on the ``method`` used.\n\n This is a convenience method that requires the least amount of manual\n effort. It can be used in most situations, while still having the\n option to drop down to more specific methods when necessary, such as\n :meth:`request_encode_url`, :meth:`request_encode_body`,\n or even the lowest level :meth:`urlopen`.\n \"\"\"\n method = method.upper()\n\n urlopen_kw[\"request_url\"] = url\n\n if method in self._encode_url_methods:\n return self.request_encode_url(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n else:\n return self.request_encode_body(\n method, url, fields=fields, headers=headers, **urlopen_kw\n )\n\n def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the url. This is useful for request methods like GET, HEAD, DELETE, etc.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw = {\"headers\": headers}\n extra_kw.update(urlopen_kw)\n\n if fields:\n url += \"?\" + urlencode(fields)\n\n return self.urlopen(method, url, **extra_kw)\n\n def request_encode_body(\n self,\n method,\n url,\n fields=None,\n headers=None,\n encode_multipart=True,\n multipart_boundary=None,\n **urlopen_kw\n ):\n \"\"\"\n Make a request using :meth:`urlopen` with the ``fields`` encoded in\n the body. This is useful for request methods like POST, PUT, PATCH, etc.\n\n When ``encode_multipart=True`` (default), then\n :func:`urllib3.encode_multipart_formdata` is used to encode\n the payload with the appropriate content type. Otherwise\n :func:`urllib.parse.urlencode` is used with the\n 'application/x-www-form-urlencoded' content type.\n\n Multipart encoding must be used when posting files, and it's reasonably\n safe to use it in other times too. However, it may break request\n signing, such as with OAuth.\n\n Supports an optional ``fields`` parameter of key/value strings AND\n key/filetuple. A filetuple is a (filename, data, MIME type) tuple where\n the MIME type is optional. For example::\n\n fields = {\n 'foo': 'bar',\n 'fakefile': ('foofile.txt', 'contents of foofile'),\n 'realfile': ('barfile.txt', open('realfile').read()),\n 'typedfile': ('bazfile.bin', open('bazfile').read(),\n 'image/jpeg'),\n 'nonamefile': 'contents of nonamefile field',\n }\n\n When uploading a file, providing a filename (the first parameter of the\n tuple) is optional but recommended to best mimic behavior of browsers.\n\n Note that if ``headers`` are supplied, the 'Content-Type' header will\n be overwritten because it depends on the dynamic random boundary string\n which is used to compose the body of the request. The random boundary\n string can be explicitly set with the ``multipart_boundary`` parameter.\n \"\"\"\n if headers is None:\n headers = self.headers\n\n extra_kw = {\"headers\": {}}\n\n if fields:\n if \"body\" in urlopen_kw:\n raise TypeError(\n \"request got values for both 'fields' and 'body', can only specify one.\"\n )\n\n if encode_multipart:\n body, content_type = encode_multipart_formdata(\n fields, boundary=multipart_boundary\n )\n else:\n body, content_type = (\n urlencode(fields),\n \"application/x-www-form-urlencoded\",\n )\n\n extra_kw[\"body\"] = body\n extra_kw[\"headers\"] = {\"Content-Type\": content_type}\n\n extra_kw[\"headers\"].update(headers)\n extra_kw.update(urlopen_kw)\n\n return self.urlopen(method, url, **extra_kw)" }, { "identifier": "connection_requires_http_tunnel", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/proxy.py", "snippet": "def connection_requires_http_tunnel(\n proxy_url=None, proxy_config=None, destination_scheme=None\n):\n \"\"\"\n Returns True if the connection requires an HTTP CONNECT through the proxy.\n\n :param URL proxy_url:\n URL of the proxy.\n :param ProxyConfig proxy_config:\n Proxy configuration from poolmanager.py\n :param str destination_scheme:\n The scheme of the destination. (i.e https, http, etc)\n \"\"\"\n # If we're not using a proxy, no way to use a tunnel.\n if proxy_url is None:\n return False\n\n # HTTP destinations never require tunneling, we always forward.\n if destination_scheme == \"http\":\n return False\n\n # Support for forwarding with HTTPS proxies and HTTPS destinations.\n if (\n proxy_url.scheme == \"https\"\n and proxy_config\n and proxy_config.use_forwarding_for_https\n ):\n return False\n\n # Otherwise always use a tunnel.\n return True" }, { "identifier": "Retry", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/retry.py", "snippet": "class Retry(object):\n \"\"\"Retry configuration.\n\n Each retry attempt will create a new Retry object with updated values, so\n they can be safely reused.\n\n Retries can be defined as a default for a pool::\n\n retries = Retry(connect=5, read=2, redirect=5)\n http = PoolManager(retries=retries)\n response = http.request('GET', 'http://example.com/')\n\n Or per-request (which overrides the default for the pool)::\n\n response = http.request('GET', 'http://example.com/', retries=Retry(10))\n\n Retries can be disabled by passing ``False``::\n\n response = http.request('GET', 'http://example.com/', retries=False)\n\n Errors will be wrapped in :class:`~urllib3.exceptions.MaxRetryError` unless\n retries are disabled, in which case the causing exception will be raised.\n\n :param int total:\n Total number of retries to allow. Takes precedence over other counts.\n\n Set to ``None`` to remove this constraint and fall back on other\n counts.\n\n Set to ``0`` to fail on the first retry.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int connect:\n How many connection-related errors to retry on.\n\n These are errors raised before the request is sent to the remote server,\n which we assume has not triggered the server to process the request.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int read:\n How many times to retry on read errors.\n\n These errors are raised after the request was sent to the server, so the\n request may have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int redirect:\n How many redirects to perform. Limit this to avoid infinite redirect\n loops.\n\n A redirect is a HTTP response with a status code 301, 302, 303, 307 or\n 308.\n\n Set to ``0`` to fail on the first retry of this type.\n\n Set to ``False`` to disable and imply ``raise_on_redirect=False``.\n\n :param int status:\n How many times to retry on bad status codes.\n\n These are retries made on responses, where status code matches\n ``status_forcelist``.\n\n Set to ``0`` to fail on the first retry of this type.\n\n :param int other:\n How many times to retry on other errors.\n\n Other errors are errors that are not connect, read, redirect or status errors.\n These errors might be raised after the request was sent to the server, so the\n request might have side-effects.\n\n Set to ``0`` to fail on the first retry of this type.\n\n If ``total`` is not set, it's a good idea to set this to 0 to account\n for unexpected edge cases and avoid infinite retry loops.\n\n :param iterable allowed_methods:\n Set of uppercased HTTP method verbs that we should retry on.\n\n By default, we only retry on methods which are considered to be\n idempotent (multiple requests with the same parameters end with the\n same state). See :attr:`Retry.DEFAULT_ALLOWED_METHODS`.\n\n Set to a ``False`` value to retry on any verb.\n\n .. warning::\n\n Previously this parameter was named ``method_whitelist``, that\n usage is deprecated in v1.26.0 and will be removed in v2.0.\n\n :param iterable status_forcelist:\n A set of integer HTTP status codes that we should force a retry on.\n A retry is initiated if the request method is in ``allowed_methods``\n and the response status code is in ``status_forcelist``.\n\n By default, this is disabled with ``None``.\n\n :param float backoff_factor:\n A backoff factor to apply between attempts after the second try\n (most errors are resolved immediately by a second try without a\n delay). urllib3 will sleep for::\n\n {backoff factor} * (2 ** ({number of total retries} - 1))\n\n seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep\n for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer\n than :attr:`Retry.DEFAULT_BACKOFF_MAX`.\n\n By default, backoff is disabled (set to 0).\n\n :param bool raise_on_redirect: Whether, if the number of redirects is\n exhausted, to raise a MaxRetryError, or to return a response with a\n response code in the 3xx range.\n\n :param bool raise_on_status: Similar meaning to ``raise_on_redirect``:\n whether we should raise an exception, or return a response,\n if status falls in ``status_forcelist`` range and retries have\n been exhausted.\n\n :param tuple history: The history of the request encountered during\n each call to :meth:`~Retry.increment`. The list is in the order\n the requests occurred. Each list item is of class :class:`RequestHistory`.\n\n :param bool respect_retry_after_header:\n Whether to respect Retry-After header on status codes defined as\n :attr:`Retry.RETRY_AFTER_STATUS_CODES` or not.\n\n :param iterable remove_headers_on_redirect:\n Sequence of headers to remove from the request when a response\n indicating a redirect is returned before firing off the redirected\n request.\n \"\"\"\n\n #: Default methods to be used for ``allowed_methods``\n DEFAULT_ALLOWED_METHODS = frozenset(\n [\"HEAD\", \"GET\", \"PUT\", \"DELETE\", \"OPTIONS\", \"TRACE\"]\n )\n\n #: Default status codes to be used for ``status_forcelist``\n RETRY_AFTER_STATUS_CODES = frozenset([413, 429, 503])\n\n #: Default headers to be used for ``remove_headers_on_redirect``\n DEFAULT_REMOVE_HEADERS_ON_REDIRECT = frozenset([\"Cookie\", \"Authorization\"])\n\n #: Maximum backoff time.\n DEFAULT_BACKOFF_MAX = 120\n\n def __init__(\n self,\n total=10,\n connect=None,\n read=None,\n redirect=None,\n status=None,\n other=None,\n allowed_methods=_Default,\n status_forcelist=None,\n backoff_factor=0,\n raise_on_redirect=True,\n raise_on_status=True,\n history=None,\n respect_retry_after_header=True,\n remove_headers_on_redirect=_Default,\n # TODO: Deprecated, remove in v2.0\n method_whitelist=_Default,\n ):\n\n if method_whitelist is not _Default:\n if allowed_methods is not _Default:\n raise ValueError(\n \"Using both 'allowed_methods' and \"\n \"'method_whitelist' together is not allowed. \"\n \"Instead only use 'allowed_methods'\"\n )\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n stacklevel=2,\n )\n allowed_methods = method_whitelist\n if allowed_methods is _Default:\n allowed_methods = self.DEFAULT_ALLOWED_METHODS\n if remove_headers_on_redirect is _Default:\n remove_headers_on_redirect = self.DEFAULT_REMOVE_HEADERS_ON_REDIRECT\n\n self.total = total\n self.connect = connect\n self.read = read\n self.status = status\n self.other = other\n\n if redirect is False or total is False:\n redirect = 0\n raise_on_redirect = False\n\n self.redirect = redirect\n self.status_forcelist = status_forcelist or set()\n self.allowed_methods = allowed_methods\n self.backoff_factor = backoff_factor\n self.raise_on_redirect = raise_on_redirect\n self.raise_on_status = raise_on_status\n self.history = history or tuple()\n self.respect_retry_after_header = respect_retry_after_header\n self.remove_headers_on_redirect = frozenset(\n [h.lower() for h in remove_headers_on_redirect]\n )\n\n def new(self, **kw):\n params = dict(\n total=self.total,\n connect=self.connect,\n read=self.read,\n redirect=self.redirect,\n status=self.status,\n other=self.other,\n status_forcelist=self.status_forcelist,\n backoff_factor=self.backoff_factor,\n raise_on_redirect=self.raise_on_redirect,\n raise_on_status=self.raise_on_status,\n history=self.history,\n remove_headers_on_redirect=self.remove_headers_on_redirect,\n respect_retry_after_header=self.respect_retry_after_header,\n )\n\n # TODO: If already given in **kw we use what's given to us\n # If not given we need to figure out what to pass. We decide\n # based on whether our class has the 'method_whitelist' property\n # and if so we pass the deprecated 'method_whitelist' otherwise\n # we use 'allowed_methods'. Remove in v2.0\n if \"method_whitelist\" not in kw and \"allowed_methods\" not in kw:\n if \"method_whitelist\" in self.__dict__:\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n params[\"method_whitelist\"] = self.allowed_methods\n else:\n params[\"allowed_methods\"] = self.allowed_methods\n\n params.update(kw)\n return type(self)(**params)\n\n @classmethod\n def from_int(cls, retries, redirect=True, default=None):\n \"\"\"Backwards-compatibility for the old retries format.\"\"\"\n if retries is None:\n retries = default if default is not None else cls.DEFAULT\n\n if isinstance(retries, Retry):\n return retries\n\n redirect = bool(redirect) and None\n new_retries = cls(retries, redirect=redirect)\n log.debug(\"Converted retries value: %r -> %r\", retries, new_retries)\n return new_retries\n\n def get_backoff_time(self):\n \"\"\"Formula for computing the current backoff\n\n :rtype: float\n \"\"\"\n # We want to consider only the last consecutive errors sequence (Ignore redirects).\n consecutive_errors_len = len(\n list(\n takewhile(lambda x: x.redirect_location is None, reversed(self.history))\n )\n )\n if consecutive_errors_len <= 1:\n return 0\n\n backoff_value = self.backoff_factor * (2 ** (consecutive_errors_len - 1))\n return min(self.DEFAULT_BACKOFF_MAX, backoff_value)\n\n def parse_retry_after(self, retry_after):\n # Whitespace: https://tools.ietf.org/html/rfc7230#section-3.2.4\n if re.match(r\"^\\s*[0-9]+\\s*$\", retry_after):\n seconds = int(retry_after)\n else:\n retry_date_tuple = email.utils.parsedate_tz(retry_after)\n if retry_date_tuple is None:\n raise InvalidHeader(\"Invalid Retry-After header: %s\" % retry_after)\n if retry_date_tuple[9] is None: # Python 2\n # Assume UTC if no timezone was specified\n # On Python2.7, parsedate_tz returns None for a timezone offset\n # instead of 0 if no timezone is given, where mktime_tz treats\n # a None timezone offset as local time.\n retry_date_tuple = retry_date_tuple[:9] + (0,) + retry_date_tuple[10:]\n\n retry_date = email.utils.mktime_tz(retry_date_tuple)\n seconds = retry_date - time.time()\n\n if seconds < 0:\n seconds = 0\n\n return seconds\n\n def get_retry_after(self, response):\n \"\"\"Get the value of Retry-After in seconds.\"\"\"\n\n retry_after = response.headers.get(\"Retry-After\")\n\n if retry_after is None:\n return None\n\n return self.parse_retry_after(retry_after)\n\n def sleep_for_retry(self, response=None):\n retry_after = self.get_retry_after(response)\n if retry_after:\n time.sleep(retry_after)\n return True\n\n return False\n\n def _sleep_backoff(self):\n backoff = self.get_backoff_time()\n if backoff <= 0:\n return\n time.sleep(backoff)\n\n def sleep(self, response=None):\n \"\"\"Sleep between retry attempts.\n\n This method will respect a server's ``Retry-After`` response header\n and sleep the duration of the time requested. If that is not present, it\n will use an exponential backoff. By default, the backoff factor is 0 and\n this method will return immediately.\n \"\"\"\n\n if self.respect_retry_after_header and response:\n slept = self.sleep_for_retry(response)\n if slept:\n return\n\n self._sleep_backoff()\n\n def _is_connection_error(self, err):\n \"\"\"Errors when we're fairly sure that the server did not receive the\n request, so it should be safe to retry.\n \"\"\"\n if isinstance(err, ProxyError):\n err = err.original_error\n return isinstance(err, ConnectTimeoutError)\n\n def _is_read_error(self, err):\n \"\"\"Errors that occur after the request has been started, so we should\n assume that the server began processing it.\n \"\"\"\n return isinstance(err, (ReadTimeoutError, ProtocolError))\n\n def _is_method_retryable(self, method):\n \"\"\"Checks if a given HTTP method should be retried upon, depending if\n it is included in the allowed_methods\n \"\"\"\n # TODO: For now favor if the Retry implementation sets its own method_whitelist\n # property outside of our constructor to avoid breaking custom implementations.\n if \"method_whitelist\" in self.__dict__:\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n allowed_methods = self.method_whitelist\n else:\n allowed_methods = self.allowed_methods\n\n if allowed_methods and method.upper() not in allowed_methods:\n return False\n return True\n\n def is_retry(self, method, status_code, has_retry_after=False):\n \"\"\"Is this method/status code retryable? (Based on allowlists and control\n variables such as the number of total retries to allow, whether to\n respect the Retry-After header, whether this header is present, and\n whether the returned status code is on the list of status codes to\n be retried upon on the presence of the aforementioned header)\n \"\"\"\n if not self._is_method_retryable(method):\n return False\n\n if self.status_forcelist and status_code in self.status_forcelist:\n return True\n\n return (\n self.total\n and self.respect_retry_after_header\n and has_retry_after\n and (status_code in self.RETRY_AFTER_STATUS_CODES)\n )\n\n def is_exhausted(self):\n \"\"\"Are we out of retries?\"\"\"\n retry_counts = (\n self.total,\n self.connect,\n self.read,\n self.redirect,\n self.status,\n self.other,\n )\n retry_counts = list(filter(None, retry_counts))\n if not retry_counts:\n return False\n\n return min(retry_counts) < 0\n\n def increment(\n self,\n method=None,\n url=None,\n response=None,\n error=None,\n _pool=None,\n _stacktrace=None,\n ):\n \"\"\"Return a new Retry object with incremented retry counters.\n\n :param response: A response object, or None, if the server did not\n return a response.\n :type response: :class:`~urllib3.response.HTTPResponse`\n :param Exception error: An error encountered during the request, or\n None if the response was received successfully.\n\n :return: A new ``Retry`` object.\n \"\"\"\n if self.total is False and error:\n # Disabled, indicate to re-raise the error.\n raise six.reraise(type(error), error, _stacktrace)\n\n total = self.total\n if total is not None:\n total -= 1\n\n connect = self.connect\n read = self.read\n redirect = self.redirect\n status_count = self.status\n other = self.other\n cause = \"unknown\"\n status = None\n redirect_location = None\n\n if error and self._is_connection_error(error):\n # Connect retry?\n if connect is False:\n raise six.reraise(type(error), error, _stacktrace)\n elif connect is not None:\n connect -= 1\n\n elif error and self._is_read_error(error):\n # Read retry?\n if read is False or not self._is_method_retryable(method):\n raise six.reraise(type(error), error, _stacktrace)\n elif read is not None:\n read -= 1\n\n elif error:\n # Other retry?\n if other is not None:\n other -= 1\n\n elif response and response.get_redirect_location():\n # Redirect retry?\n if redirect is not None:\n redirect -= 1\n cause = \"too many redirects\"\n redirect_location = response.get_redirect_location()\n status = response.status\n\n else:\n # Incrementing because of a server error like a 500 in\n # status_forcelist and the given method is in the allowed_methods\n cause = ResponseError.GENERIC_ERROR\n if response and response.status:\n if status_count is not None:\n status_count -= 1\n cause = ResponseError.SPECIFIC_ERROR.format(status_code=response.status)\n status = response.status\n\n history = self.history + (\n RequestHistory(method, url, error, status, redirect_location),\n )\n\n new_retry = self.new(\n total=total,\n connect=connect,\n read=read,\n redirect=redirect,\n status=status_count,\n other=other,\n history=history,\n )\n\n if new_retry.is_exhausted():\n raise MaxRetryError(_pool, url, error or ResponseError(cause))\n\n log.debug(\"Incremented Retry for (url='%s'): %r\", url, new_retry)\n\n return new_retry\n\n def __repr__(self):\n return (\n \"{cls.__name__}(total={self.total}, connect={self.connect}, \"\n \"read={self.read}, redirect={self.redirect}, status={self.status})\"\n ).format(cls=type(self), self=self)\n\n def __getattr__(self, item):\n if item == \"method_whitelist\":\n # TODO: Remove this deprecated alias in v2.0\n warnings.warn(\n \"Using 'method_whitelist' with Retry is deprecated and \"\n \"will be removed in v2.0. Use 'allowed_methods' instead\",\n DeprecationWarning,\n )\n return self.allowed_methods\n try:\n return getattr(super(Retry, self), item)\n except AttributeError:\n return getattr(Retry, item)" }, { "identifier": "parse_url", "path": "MetaTube.bundle/Contents/Libraries/Shared/urllib3/util/url.py", "snippet": "def parse_url(url):\n \"\"\"\n Given a url, return a parsed :class:`.Url` namedtuple. Best-effort is\n performed to parse incomplete urls. Fields not provided will be None.\n This parser is RFC 3986 and RFC 6874 compliant.\n\n The parser logic and helper functions are based heavily on\n work done in the ``rfc3986`` module.\n\n :param str url: URL to parse into a :class:`.Url` namedtuple.\n\n Partly backwards-compatible with :mod:`urlparse`.\n\n Example::\n\n >>> parse_url('http://google.com/mail/')\n Url(scheme='http', host='google.com', port=None, path='/mail/', ...)\n >>> parse_url('google.com:80')\n Url(scheme=None, host='google.com', port=80, path=None, ...)\n >>> parse_url('/foo?bar')\n Url(scheme=None, host=None, port=None, path='/foo', query='bar', ...)\n \"\"\"\n if not url:\n # Empty\n return Url()\n\n source_url = url\n if not SCHEME_RE.search(url):\n url = \"//\" + url\n\n try:\n scheme, authority, path, query, fragment = URI_RE.match(url).groups()\n normalize_uri = scheme is None or scheme.lower() in NORMALIZABLE_SCHEMES\n\n if scheme:\n scheme = scheme.lower()\n\n if authority:\n auth, _, host_port = authority.rpartition(\"@\")\n auth = auth or None\n host, port = _HOST_PORT_RE.match(host_port).groups()\n if auth and normalize_uri:\n auth = _encode_invalid_chars(auth, USERINFO_CHARS)\n if port == \"\":\n port = None\n else:\n auth, host, port = None, None, None\n\n if port is not None:\n port = int(port)\n if not (0 <= port <= 65535):\n raise LocationParseError(url)\n\n host = _normalize_host(host, scheme)\n\n if normalize_uri and path:\n path = _remove_path_dot_segments(path)\n path = _encode_invalid_chars(path, PATH_CHARS)\n if normalize_uri and query:\n query = _encode_invalid_chars(query, QUERY_CHARS)\n if normalize_uri and fragment:\n fragment = _encode_invalid_chars(fragment, FRAGMENT_CHARS)\n\n except (ValueError, AttributeError):\n return six.raise_from(LocationParseError(source_url), None)\n\n # For the sake of backwards compatibility we put empty\n # string values for path if there are any defined values\n # beyond the path in the URL.\n # TODO: Remove this when we break backwards compatibility.\n if not path:\n if query is not None or fragment is not None:\n path = \"\"\n else:\n path = None\n\n # Ensure that each part of the URL is a `str` for\n # backwards compatibility.\n if isinstance(url, six.text_type):\n ensure_func = six.ensure_text\n else:\n ensure_func = six.ensure_str\n\n def ensure_type(x):\n return x if x is None else ensure_func(x)\n\n return Url(\n scheme=ensure_type(scheme),\n auth=ensure_type(auth),\n host=ensure_type(host),\n port=port,\n path=ensure_type(path),\n query=ensure_type(query),\n fragment=ensure_type(fragment),\n )" } ]
import collections import functools import logging from ._collections import HTTPHeaderDict, RecentlyUsedContainer from .connectionpool import HTTPConnectionPool, HTTPSConnectionPool, port_by_scheme from .exceptions import ( LocationValueError, MaxRetryError, ProxySchemeUnknown, ProxySchemeUnsupported, URLSchemeUnknown, ) from .packages import six from .packages.six.moves.urllib.parse import urljoin from .request import RequestMethods from .util.proxy import connection_requires_http_tunnel from .util.retry import Retry from .util.url import parse_url
14,705
Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https: raise ProxySchemeUnsupported( "Contacting HTTPS destinations through HTTPS proxies " "'via CONNECT tunnels' is not supported in Python 2" ) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) self._validate_proxy_scheme_url_selection(u.scheme) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw["assert_same_host"] = False kw["redirect"] = False if "headers" not in kw: kw["headers"] = self.headers.copy() if self._proxy_requires_url_absolute_form(u): response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) if response.status == 303: # Change the method according to RFC 9110, Section 15.4.4. method = "GET" # And lose the body not to transfer anything sensitive. kw["body"] = None kw["headers"] = HTTPHeaderDict(kw["headers"])._prepare_for_method_change() retries = kw.get("retries") if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect) # Strip headers marked as unsafe to forward to the redirected location. # Check remove_headers_on_redirect to avoid a potential network call within # conn.is_same_host() which may use socket.gethostbyname() in the future. if retries.remove_headers_on_redirect and not conn.is_same_host( redirect_location ): headers = list(six.iterkeys(kw["headers"])) for header in headers: if header.lower() in retries.remove_headers_on_redirect: kw["headers"].pop(header, None) try: retries = retries.increment(method, url, response=response, _pool=conn)
from __future__ import absolute_import __all__ = ["PoolManager", "ProxyManager", "proxy_from_url"] log = logging.getLogger(__name__) SSL_KEYWORDS = ( "key_file", "cert_file", "cert_reqs", "ca_certs", "ssl_version", "ca_cert_dir", "ssl_context", "key_password", "server_hostname", ) # All known keyword arguments that could be provided to the pool manager, its # pools, or the underlying connections. This is used to construct a pool key. _key_fields = ( "key_scheme", # str "key_host", # str "key_port", # int "key_timeout", # int or float or Timeout "key_retries", # int or Retry "key_strict", # bool "key_block", # bool "key_source_address", # str "key_key_file", # str "key_key_password", # str "key_cert_file", # str "key_cert_reqs", # str "key_ca_certs", # str "key_ssl_version", # str "key_ca_cert_dir", # str "key_ssl_context", # instance of ssl.SSLContext or urllib3.util.ssl_.SSLContext "key_maxsize", # int "key_headers", # dict "key__proxy", # parsed proxy url "key__proxy_headers", # dict "key__proxy_config", # class "key_socket_options", # list of (level (int), optname (int), value (int or str)) tuples "key__socks_options", # dict "key_assert_hostname", # bool or string "key_assert_fingerprint", # str "key_server_hostname", # str ) #: The namedtuple class used to construct keys for the connection pool. #: All custom key schemes should include the fields in this key at a minimum. PoolKey = collections.namedtuple("PoolKey", _key_fields) _proxy_config_fields = ("ssl_context", "use_forwarding_for_https") ProxyConfig = collections.namedtuple("ProxyConfig", _proxy_config_fields) def _default_key_normalizer(key_class, request_context): """ Create a pool key out of a request context dictionary. According to RFC 3986, both the scheme and host are case-insensitive. Therefore, this function normalizes both before constructing the pool key for an HTTPS request. If you wish to change this behaviour, provide alternate callables to ``key_fn_by_scheme``. :param key_class: The class to use when constructing the key. This should be a namedtuple with the ``scheme`` and ``host`` keys at a minimum. :type key_class: namedtuple :param request_context: A dictionary-like object that contain the context for a request. :type request_context: dict :return: A namedtuple that can be used as a connection pool key. :rtype: PoolKey """ # Since we mutate the dictionary, make a copy first context = request_context.copy() context["scheme"] = context["scheme"].lower() context["host"] = context["host"].lower() # These are both dictionaries and need to be transformed into frozensets for key in ("headers", "_proxy_headers", "_socks_options"): if key in context and context[key] is not None: context[key] = frozenset(context[key].items()) # The socket_options key may be a list and needs to be transformed into a # tuple. socket_opts = context.get("socket_options") if socket_opts is not None: context["socket_options"] = tuple(socket_opts) # Map the kwargs to the names in the namedtuple - this is necessary since # namedtuples can't have fields starting with '_'. for key in list(context.keys()): context["key_" + key] = context.pop(key) # Default to ``None`` for keys missing from the context for field in key_class._fields: if field not in context: context[field] = None return key_class(**context) #: A dictionary that maps a scheme to a callable that creates a pool key. #: This can be used to alter the way pool keys are constructed, if desired. #: Each PoolManager makes a copy of this dictionary so they can be configured #: globally here, or individually on the instance. key_fn_by_scheme = { "http": functools.partial(_default_key_normalizer, PoolKey), "https": functools.partial(_default_key_normalizer, PoolKey), } pool_classes_by_scheme = {"http": HTTPConnectionPool, "https": HTTPSConnectionPool} class PoolManager(RequestMethods): """ Allows for arbitrary requests while transparently keeping track of necessary connection pools for you. :param num_pools: Number of connection pools to cache before discarding the least recently used pool. :param headers: Headers to include with all requests, unless other headers are given explicitly. :param \\**connection_pool_kw: Additional parameters are used to create fresh :class:`urllib3.connectionpool.ConnectionPool` instances. Example:: >>> manager = PoolManager(num_pools=2) >>> r = manager.request('GET', 'http://google.com/') >>> r = manager.request('GET', 'http://google.com/mail') >>> r = manager.request('GET', 'http://yahoo.com/') >>> len(manager.pools) 2 """ proxy = None proxy_config = None def __init__(self, num_pools=10, headers=None, **connection_pool_kw): RequestMethods.__init__(self, headers) self.connection_pool_kw = connection_pool_kw self.pools = RecentlyUsedContainer(num_pools) # Locally set the pool classes and keys so other PoolManagers can # override them. self.pool_classes_by_scheme = pool_classes_by_scheme self.key_fn_by_scheme = key_fn_by_scheme.copy() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.clear() # Return False to re-raise any potential exceptions return False def _new_pool(self, scheme, host, port, request_context=None): """ Create a new :class:`urllib3.connectionpool.ConnectionPool` based on host, port, scheme, and any additional pool keyword arguments. If ``request_context`` is provided, it is provided as keyword arguments to the pool class used. This method is used to actually create the connection pools handed out by :meth:`connection_from_url` and companion methods. It is intended to be overridden for customization. """ pool_cls = self.pool_classes_by_scheme[scheme] if request_context is None: request_context = self.connection_pool_kw.copy() # Although the context has everything necessary to create the pool, # this function has historically only used the scheme, host, and port # in the positional args. When an API change is acceptable these can # be removed. for key in ("scheme", "host", "port"): request_context.pop(key, None) if scheme == "http": for kw in SSL_KEYWORDS: request_context.pop(kw, None) return pool_cls(host, port, **request_context) def clear(self): """ Empty our store of pools and direct them all to close. This will not affect in-flight connections, but they will not be re-used after completion. """ self.pools.clear() def connection_from_host(self, host, port=None, scheme="http", pool_kwargs=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the host, port, and scheme. If ``port`` isn't given, it will be derived from the ``scheme`` using ``urllib3.connectionpool.port_by_scheme``. If ``pool_kwargs`` is provided, it is merged with the instance's ``connection_pool_kw`` variable and used to create the new connection pool, if one is needed. """ if not host: raise LocationValueError("No host specified.") request_context = self._merge_pool_kwargs(pool_kwargs) request_context["scheme"] = scheme or "http" if not port: port = port_by_scheme.get(request_context["scheme"].lower(), 80) request_context["port"] = port request_context["host"] = host return self.connection_from_context(request_context) def connection_from_context(self, request_context): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the request context. ``request_context`` must at least contain the ``scheme`` key and its value must be a key in ``key_fn_by_scheme`` instance variable. """ scheme = request_context["scheme"].lower() pool_key_constructor = self.key_fn_by_scheme.get(scheme) if not pool_key_constructor: raise URLSchemeUnknown(scheme) pool_key = pool_key_constructor(request_context) return self.connection_from_pool_key(pool_key, request_context=request_context) def connection_from_pool_key(self, pool_key, request_context=None): """ Get a :class:`urllib3.connectionpool.ConnectionPool` based on the provided pool key. ``pool_key`` should be a namedtuple that only contains immutable objects. At a minimum it must have the ``scheme``, ``host``, and ``port`` fields. """ with self.pools.lock: # If the scheme, host, or port doesn't match existing open # connections, open a new ConnectionPool. pool = self.pools.get(pool_key) if pool: return pool # Make a fresh ConnectionPool of the desired type scheme = request_context["scheme"] host = request_context["host"] port = request_context["port"] pool = self._new_pool(scheme, host, port, request_context=request_context) self.pools[pool_key] = pool return pool def connection_from_url(self, url, pool_kwargs=None): """ Similar to :func:`urllib3.connectionpool.connection_from_url`. If ``pool_kwargs`` is not provided and a new pool needs to be constructed, ``self.connection_pool_kw`` is used to initialize the :class:`urllib3.connectionpool.ConnectionPool`. If ``pool_kwargs`` is provided, it is used instead. Note that if a new pool does not need to be created for the request, the provided ``pool_kwargs`` are not used. """ u = parse_url(url) return self.connection_from_host( u.host, port=u.port, scheme=u.scheme, pool_kwargs=pool_kwargs ) def _merge_pool_kwargs(self, override): """ Merge a dictionary of override values for self.connection_pool_kw. This does not modify self.connection_pool_kw and returns a new dict. Any keys in the override dictionary with a value of ``None`` are removed from the merged dictionary. """ base_pool_kwargs = self.connection_pool_kw.copy() if override: for key, value in override.items(): if value is None: try: del base_pool_kwargs[key] except KeyError: pass else: base_pool_kwargs[key] = value return base_pool_kwargs def _proxy_requires_url_absolute_form(self, parsed_url): """ Indicates if the proxy requires the complete destination URL in the request. Normally this is only needed when not using an HTTP CONNECT tunnel. """ if self.proxy is None: return False return not connection_requires_http_tunnel( self.proxy, self.proxy_config, parsed_url.scheme ) def _validate_proxy_scheme_url_selection(self, url_scheme): """ Validates that were not attempting to do TLS in TLS connections on Python2 or with unsupported SSL implementations. """ if self.proxy is None or url_scheme != "https": return if self.proxy.scheme != "https": return if six.PY2 and not self.proxy_config.use_forwarding_for_https: raise ProxySchemeUnsupported( "Contacting HTTPS destinations through HTTPS proxies " "'via CONNECT tunnels' is not supported in Python 2" ) def urlopen(self, method, url, redirect=True, **kw): """ Same as :meth:`urllib3.HTTPConnectionPool.urlopen` with custom cross-host redirect logic and only sends the request-uri portion of the ``url``. The given ``url`` parameter must be absolute, such that an appropriate :class:`urllib3.connectionpool.ConnectionPool` can be chosen for it. """ u = parse_url(url) self._validate_proxy_scheme_url_selection(u.scheme) conn = self.connection_from_host(u.host, port=u.port, scheme=u.scheme) kw["assert_same_host"] = False kw["redirect"] = False if "headers" not in kw: kw["headers"] = self.headers.copy() if self._proxy_requires_url_absolute_form(u): response = conn.urlopen(method, url, **kw) else: response = conn.urlopen(method, u.request_uri, **kw) redirect_location = redirect and response.get_redirect_location() if not redirect_location: return response # Support relative URLs for redirecting. redirect_location = urljoin(url, redirect_location) if response.status == 303: # Change the method according to RFC 9110, Section 15.4.4. method = "GET" # And lose the body not to transfer anything sensitive. kw["body"] = None kw["headers"] = HTTPHeaderDict(kw["headers"])._prepare_for_method_change() retries = kw.get("retries") if not isinstance(retries, Retry): retries = Retry.from_int(retries, redirect=redirect) # Strip headers marked as unsafe to forward to the redirected location. # Check remove_headers_on_redirect to avoid a potential network call within # conn.is_same_host() which may use socket.gethostbyname() in the future. if retries.remove_headers_on_redirect and not conn.is_same_host( redirect_location ): headers = list(six.iterkeys(kw["headers"])) for header in headers: if header.lower() in retries.remove_headers_on_redirect: kw["headers"].pop(header, None) try: retries = retries.increment(method, url, response=response, _pool=conn)
except MaxRetryError:
4
2023-11-27 07:01:39+00:00
24k
NobiDeveloper/Nobita-Filter-Bot
plugins/p_ttishow.py
[ { "identifier": "ADMINS", "path": "info.py", "snippet": "ADMINS = [int(admin) if id_pattern.search(admin) else admin for admin in environ.get('ADMINS', '').split()]" }, { "identifier": "LOG_CHANNEL", "path": "info.py", "snippet": "LOG_CHANNEL = int(environ.get('LOG_CHANNEL', ''))" }, { "identifier": "SUPPORT_CHAT", "path": "info.py", "snippet": "SUPPORT_CHAT = environ.get('SUPPORT_CHAT', 'NobiDeveloperSupport')" }, { "identifier": "MELCOW_NEW_USERS", "path": "info.py", "snippet": "MELCOW_NEW_USERS = is_enabled((environ.get('MELCOW_NEW_USERS', \"True\")), True)" }, { "identifier": "MELCOW_VID", "path": "info.py", "snippet": "MELCOW_VID = environ.get(\"MELCOW_VID\", \"https://telegra.ph/file/61ef9818986cef9554017.jpg\")" }, { "identifier": "CHNL_LNK", "path": "info.py", "snippet": "CHNL_LNK = environ.get('CHNL_LNK', 'https://telegram.me/NobiDeveloper')" }, { "identifier": "GRP_LNK", "path": "info.py", "snippet": "GRP_LNK = environ.get('GRP_LNK', 'https://telegram.me/NobiDeveloperSupport')" }, { "identifier": "db", "path": "database/users_chats_db.py", "snippet": "class Database:\n def __init__(self, uri, database_name):\n def new_user(self, id, name):\n def new_group(self, id, title):\n async def add_user(self, id, name):\n async def is_user_exist(self, id):\n async def total_users_count(self):\n async def remove_ban(self, id):\n async def ban_user(self, user_id, ban_reason=\"No Reason\"):\n async def get_ban_status(self, id):\n async def get_all_users(self):\n async def delete_user(self, user_id):\n async def get_banned(self):\n async def add_chat(self, chat, title):\n async def get_chat(self, chat):\n async def re_enable_chat(self, id):\n async def update_settings(self, id, settings):\n async def get_settings(self, id):\n async def disable_chat(self, chat, reason=\"No Reason\"):\n async def total_chat_count(self):\n async def get_all_chats(self):\n async def get_db_size(self):" }, { "identifier": "Media", "path": "database/ia_filterdb.py", "snippet": "class Media(Document):\n file_id = fields.StrField(attribute='_id')\n file_ref = fields.StrField(allow_none=True)\n file_name = fields.StrField(required=True)\n file_size = fields.IntField(required=True)\n file_type = fields.StrField(allow_none=True)\n mime_type = fields.StrField(allow_none=True)\n caption = fields.StrField(allow_none=True)\n\n class Meta:\n indexes = ('$file_name', )\n collection_name = COLLECTION_NAME" }, { "identifier": "get_size", "path": "utils.py", "snippet": "def get_size(size):\n \"\"\"Get size in readable format\"\"\"\n\n units = [\"Bytes\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\"]\n size = float(size)\n i = 0\n while size >= 1024.0 and i < len(units):\n i += 1\n size /= 1024.0\n return \"%.2f %s\" % (size, units[i])" }, { "identifier": "temp", "path": "utils.py", "snippet": "class temp(object):\n BANNED_USERS = []\n BANNED_CHATS = []\n ME = None\n CURRENT=int(os.environ.get(\"SKIP\", 2))\n CANCEL = False\n MELCOW = {}\n U_NAME = None\n B_NAME = None\n GETALL = {}\n SHORT = {}\n SETTINGS = {}" }, { "identifier": "get_settings", "path": "utils.py", "snippet": "async def get_settings(group_id):\n settings = temp.SETTINGS.get(group_id)\n if not settings:\n settings = await db.get_settings(group_id)\n temp.SETTINGS[group_id] = settings\n return settings" }, { "identifier": "script", "path": "Script.py", "snippet": "class script(object):\n START_TXT = \"\"\"\n<b>{},\n\nɪ ᴄᴀɴ ᴘʀᴏᴠɪᴅᴇ ᴍᴏᴠɪᴇs ᴀɴᴅ sᴇʀɪᴇs,\nᴊᴜsᴛ ᴀᴅᴅ ᴍᴇ ᴛᴏ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴀɴᴅ ᴇɴᴊᴏʏ 😍\n\n💞 ᴍᴀɪɴᴛᴀɪɴᴇᴅ ʙʏ : <a href='https://telegram.me/MovieVillaYT'>ᴍᴏᴠɪᴇ ᴠɪʟʟᴀ</a></b>\n\"\"\"\n\n HELP_TXT = \"\"\"\n<b>{},\n\n/g_info - ᴛᴏ ᴄʜᴇᴄᴋ ʏᴏᴜʀ ᴠᴀʟᴜᴇꜱ\n/set_tutorial - ᴛᴏ ꜱᴇᴛ ᴄᴜꜱᴛᴏᴍ ᴛᴜᴛᴏʀɪᴀʟ\n/set_shortlink - ᴛᴏ ꜱᴇᴛ ᴄᴜꜱᴛᴏᴍ ꜱʜᴏʀᴛᴇɴᴇʀ\n/rem_tutorial - ᴛᴏ ʀᴇᴍᴏᴠᴇ ᴛᴜᴛᴏʀɪᴀʟ ʟɪɴᴋ\n</b>\"\"\"\n\n ABOUT_TXT = \"\"\"<b>➣ ᴍʏ ɴᴀᴍᴇ ⋟</b> {}\n<b>➢ ᴄʀᴇᴀᴛᴏʀ ⋟</b> <a href=https://youtube.com/@NobiDeveloper>𝘔𝘖𝘝𝘐𝘌 𝘝𝘐𝘓𝘓𝘈</a>\n<b>➣ ʟɪʙʀᴀʀʏ ⋟</b> 𝘱𝘺𝘳𝘰𝘨𝘳𝘢𝘮\n<b>➢ ʟᴀɴɢᴜᴀɢᴇ ⋟</b> 𝘱𝘺𝘵𝘩𝘰𝘯 3\n<b>➣ ᴅᴀᴛᴀʙᴀsᴇ ⋟</b> 𝘮𝘰𝘯𝘨𝘰 𝘥𝘣\n<b>➢ ʙᴏᴛ sᴇʀᴠᴇʀ ⋟</b> 𝘩𝘦𝘳𝘰𝘬𝘶\n<b>➣ ʙᴜɪʟᴅ sᴛᴀᴛs ⋟</b> 𝘷2.0.1 ﹝ʙᴇᴛᴀ﹞\"\"\"\n\n SOURCE_TXT = \"\"\"\n<b>ᴛʜɪꜱ ɪꜱ ᴀɴ ᴏᴘᴇɴ ꜱᴏᴜʀᴄᴇ ᴘʀᴏᴊᴇᴄᴛ.</b>\n\nᴀʟʟ ᴛʜᴇ ꜰɪʟᴇꜱ ɪɴ ᴛʜɪꜱ ʙᴏᴛ ᴀʀᴇ ꜰʀᴇᴇʟʏ ᴀᴠᴀɪʟᴀʙʟᴇ ᴏɴ ᴛʜᴇ ɪɴᴛᴇʀɴᴇᴛ ᴏʀ ᴘᴏꜱᴛᴇᴅ ʙʏ ꜱᴏᴍᴇʙᴏᴅʏ ᴇʟꜱᴇ. ᴊᴜꜱᴛ ꜰᴏʀ ᴇᴀꜱʏ ꜱᴇᴀʀᴄʜɪɴɢ ᴛʜɪꜱ ʙᴏᴛ ɪꜱ ɪɴᴅᴇxɪɴɢ ꜰɪʟᴇꜱ ᴡʜɪᴄʜ ᴀʀᴇ ᴀʟʀᴇᴀᴅʏ ᴜᴘʟᴏᴀᴅᴇᴅ ᴏɴ ᴛᴇʟᴇɢʀᴀᴍ. ᴡᴇ ʀᴇꜱᴘᴇᴄᴛ ᴀʟʟ ᴛʜᴇ ᴄᴏᴘʏʀɪɢʜᴛ ʟᴀᴡꜱ ᴀɴᴅ ᴡᴏʀᴋꜱ ɪɴ ᴄᴏᴍᴘʟɪᴀɴᴄᴇ ᴡɪᴛʜ ᴅᴍᴄᴀ ᴀɴᴅ ᴇᴜᴄᴅ. ɪꜰ ᴀɴʏᴛʜɪɴɢ ɪꜱ ᴀɢᴀɪɴꜱᴛ ʟᴀᴡ ᴘʟᴇᴀꜱᴇ ᴄᴏɴᴛᴀᴄᴛ ᴍᴇ ꜱᴏ ᴛʜᴀᴛ ɪᴛ ᴄᴀɴ ʙᴇ ʀᴇᴍᴏᴠᴇᴅ ᴀꜱᴀᴘ. ɪᴛ ɪꜱ ꜰᴏʀʙɪʙʙᴇɴ ᴛᴏ ᴅᴏᴡɴʟᴏᴀᴅ, ꜱᴛʀᴇᴀᴍ, ʀᴇᴘʀᴏᴅᴜᴄᴇ, ᴏʀ ʙʏ ᴀɴʏ ᴍᴇᴀɴꜱ, ꜱʜᴀʀᴇ, ᴏʀ ᴄᴏɴꜱᴜᴍᴇ, ᴄᴏɴᴛᴇɴᴛ ᴡɪᴛʜᴏᴜᴛ ᴇxᴘʟɪᴄɪᴛ ᴘᴇʀᴍɪꜱꜱɪᴏɴ ꜰʀᴏᴍ ᴛʜᴇ ᴄᴏɴᴛᴇɴᴛ ᴄʀᴇᴀᴛᴏʀ ᴏʀ ʟᴇɢᴀʟ ᴄᴏᴘʏʀɪɢʜᴛ ʜᴏʟᴅᴇʀ. ɪꜰ ʏᴏᴜ ʙᴇʟɪᴇᴠᴇ ᴛʜɪꜱ ʙᴏᴛ ɪꜱ ᴠɪᴏʟᴀᴛɪɴɢ ʏᴏᴜʀ ɪɴᴛᴇʟʟᴇᴄᴛᴜᴀʟ ᴘʀᴏᴘᴇʀᴛʏ, ᴄᴏɴᴛᴀᴄᴛ ᴛʜᴇ ʀᴇꜱᴘᴇᴄᴛɪᴠᴇ ᴄʜᴀɴɴᴇʟꜱ ꜰᴏʀ ʀᴇᴍᴏᴠᴀʟ. ᴛʜᴇ ʙᴏᴛ ᴅᴏᴇꜱ ɴᴏᴛ ᴏᴡɴ ᴀɴʏ ᴏꜰ ᴛʜᴇꜱᴇ ᴄᴏɴᴛᴇɴᴛꜱ, ɪᴛ ᴏɴʟʏ ɪɴᴅᴇx ᴛʜᴇ ꜰɪʟᴇꜱ ꜰʀᴏᴍ ᴛᴇʟᴇɢʀᴀᴍ.\n\n<b><a href=https://telegram.me/NobiDeveloper>~ ᴍᴀɪɴᴛᴀɪɴᴇᴅ ʙʏ @MovieVillaYT</a></b>\n\"\"\"\n\n MANUELFILTER_TXT = \"\"\"\n<b>{},\n\n~ ʏᴏᴜ ᴄᴀɴ ᴇᴀsɪʟʏ ᴄᴜsᴛᴏᴍɪᴢᴇ ᴛʜɪs ʙᴏᴛ ꜰᴏʀ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\n\n~ ᴏɴʟʏ ɢʀᴏᴜᴘ ᴀᴅᴍɪɴ ᴄᴀɴ ᴜsᴇ ᴛʜɪs ᴄᴏᴍᴍᴀɴᴅ ᴀɴᴅ ᴄʜᴀɴɢᴇs sᴇᴛᴛɪɴɢs.\n\n~ ɪᴛ ᴡᴏʀᴋs ᴏɴʟʏ ᴡʜᴇɴ ʏᴏᴜ ᴀʟʀᴇᴀᴅʏ ᴄᴏɴɴᴇᴄᴛ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\n\nᴄᴏᴍᴍᴀɴᴅs ᴀɴᴅ ᴜsᴀɢᴇ -\n\n• /settings - ᴄʜᴀɴɢᴇ sᴇᴛᴛɪɴɢs ᴀs ʏᴏᴜʀ ᴡɪsʜ.</b>\n\"\"\"\n\n GROUP_TXT = \"\"\"\n<b>⍟ ᴄʜᴀɴɴᴇʟs ᴀɴᴅ ɢʀᴏᴜᴘs ᴍᴏᴅᴜʟᴇ ⍟</b>\n\n<b>🍿 ᴍᴏᴠɪᴇꜱ ᴄʜᴀɴɴᴇʟ.\n🗣️ ʙᴏᴛ sᴜᴘᴘᴏʀᴛ ɢʀᴏᴜᴘ.\n🚦 ʙᴏᴛ ᴜᴘᴅᴀᴛᴇs ᴄʜᴀɴɴᴇʟ.\n🎬 ᴍᴏᴠɪᴇ ʀᴇǫᴜᴇsᴛɪɴɢ ɢʀᴏᴜᴘ.</b>\"\"\"\n\n BUTTON_TXT = \"\"\"\n<b>💵 ɪ ʀᴇǫᴜᴇsᴛᴇᴅ ᴛᴏ ʏᴏᴜ 💸\n\nᴘʟᴇᴀsᴇ ᴅᴏɴᴀᴛᴇ ᴛʜᴇ ᴅᴇᴠᴇʟᴏᴘᴇʀ ꜰᴏʀ ᴋᴇᴇᴘɪɴɢ ᴛʜᴇ sᴇʀᴠɪᴄᴇ ᴀʟɪᴠᴇ & ᴋᴇᴇᴘ ʙʀɪɴɢɪɴɢ ᴍᴏʀᴇ ɴᴇᴡ ꜰᴇᴀᴛᴜʀᴇs ꜰᴏʀ ʏᴏᴜ....</b>\n\n𝐘𝐨𝐮 𝐂𝐚𝐧 𝐃𝐨𝐧𝐚𝐭𝐞 𝐀𝐧𝐲 𝐀𝐦𝐨𝐮𝐧𝐭 𝐘𝐨𝐮 𝐇𝐚𝐯𝐞 💷\n\n<b>᚜ ᴘᴀʏᴍᴇɴᴛ ᴍᴇᴛʜᴏᴅs ᚛</b>\n\n💵 <a href='https://telegra.ph/SUPPORT-12-22-2'>𝗚𝗼𝗼𝗴𝗹𝗲 𝗣𝗮𝘆</a>\n💸 <a href='https://telegra.ph/SUPPORT-12-22-2'>𝗣𝗮𝘆𝘁𝗺</a>\n💶 <a href='https://telegra.ph/SUPPORT-12-22-2'>𝗣𝗵𝗼𝗻𝗲𝗣𝗲</a>\n\n𝐂𝐨𝐧𝐭𝐚𝐜𝐭 𝐌𝐞 𝐅𝐨𝐫 𝐊𝐧𝐨𝐰 𝐀𝐛𝐨𝐮𝐭 𝐓𝐡𝐞 𝐏𝐚𝐲𝐦𝐞𝐧𝐭 𝐈𝐧𝐟𝐨\n\n<b>ᴄʟɪᴄᴋ ʜᴇʀᴇ - <a href='https://telegram.me/NobiDeveloperr'>ʙᴏss</a>\nᴄʟɪᴄᴋ ʜᴇʀᴇ - <a href='https://telegram.me/NobiDeveloperr'>ʙᴏss</a></b>\"\"\"\n\n AUTOFILTER_TXT = \"\"\"ʜᴇʟᴘ: <b>ᴀᴜᴛᴏ ꜰɪʟᴛᴇʀ</b>\n<b>ɴᴏᴛᴇ: Fɪʟᴇ Iɴᴅᴇx</b>\n1. ᴍᴀᴋᴇ ᴍᴇ ᴛʜᴇ ᴀᴅᴍɪɴ ᴏꜰ ʏᴏᴜʀ ᴄʜᴀɴɴᴇʟ ɪꜰ ɪᴛ'ꜱ ᴘʀɪᴠᴀᴛᴇ.\n2. ᴍᴀᴋᴇ ꜱᴜʀᴇ ᴛʜᴀᴛ ʏᴏᴜʀ ᴄʜᴀɴɴᴇʟ ᴅᴏᴇꜱ ɴᴏᴛ ᴄᴏɴᴛᴀɪɴꜱ ᴄᴀᴍʀɪᴘꜱ, ᴘᴏʀɴ ᴀɴᴅ ꜰᴀᴋᴇ ꜰɪʟᴇꜱ.\n3. ꜰᴏʀᴡᴀʀᴅ ᴛʜᴇ ʟᴀꜱᴛ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ ᴍᴇ ᴡɪᴛʜ Qᴜᴏᴛᴇꜱ. ɪ'ʟʟ ᴀᴅᴅ ᴀʟʟ ᴛʜᴇ ꜰɪʟᴇꜱ ɪɴ ᴛʜᴀᴛ ᴄʜᴀɴɴᴇʟ ᴛᴏ ᴍʏ ᴅʙ.\n\n<b>Nᴏᴛᴇ: AᴜᴛᴏFɪʟᴛᴇʀ</b>\n1. Aᴅᴅ ᴛʜᴇ ʙᴏᴛ ᴀs ᴀᴅᴍɪɴ ᴏɴ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\n2. Usᴇ /connect ᴀɴᴅ ᴄᴏɴɴᴇᴄᴛ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴛᴏ ᴛʜᴇ ʙᴏᴛ.\n3. Usᴇ /settings ᴏɴ ʙᴏᴛ's PM ᴀɴᴅ ᴛᴜʀɴ ᴏɴ AᴜᴛᴏFɪʟᴛᴇʀ ᴏɴ ᴛʜᴇ sᴇᴛᴛɪɴɢs ᴍᴇɴᴜ.\"\"\"\n\n CONNECTION_TXT = \"\"\"ʜᴇʟᴘ: <b>ᴄᴏɴɴᴇᴄᴛɪᴏɴꜱ</b>\n- ᴜꜱᴇᴅ ᴛᴏ ᴄᴏɴɴᴇᴄᴛ ʙᴏᴛ ᴛᴏ ᴘᴍ ꜰᴏʀ ᴍᴀɴᴀɢɪɴɢ ꜰɪʟᴛᴇʀꜱ \n- ɪᴛ ʜᴇʟᴘꜱ ᴛᴏ ᴀᴠᴏɪᴅ ꜱᴘᴀᴍᴍɪɴɢ ɪɴ ɢʀᴏᴜᴘꜱ.\n<b>ɴᴏᴛᴇ:</b>\n1. ᴏɴʟʏ ᴀᴅᴍɪɴꜱ ᴄᴀɴ ᴀᴅᴅ ᴀ ᴄᴏɴɴᴇᴄᴛɪᴏɴ.\n2. ꜱᴇɴᴅ <code>/ᴄᴏɴɴᴇᴄᴛ</code> ꜰᴏʀ ᴄᴏɴɴᴇᴄᴛɪɴɢ ᴍᴇ ᴛᴏ ʏᴏᴜʀ ᴘᴍ\nCᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ:\n• /connect - <code>ᴄᴏɴɴᴇᴄᴛ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ᴄʜᴀᴛ ᴛᴏ ʏᴏᴜʀ ᴘᴍ</code>\n• /disconnect - <code>ᴅɪꜱᴄᴏɴɴᴇᴄᴛ ꜰʀᴏᴍ ᴀ ᴄʜᴀᴛ</code>\n• /connections - <code>ʟɪꜱᴛ ᴀʟʟ ʏᴏᴜʀ ᴄᴏɴɴᴇᴄᴛɪᴏɴꜱ</code>\"\"\"\n\n EXTRAMOD_TXT = \"\"\"ʜᴇʟᴘ: Exᴛʀᴀ Mᴏᴅᴜʟᴇs\n<b>ɴᴏᴛᴇ:</b>\nᴛʜᴇꜱᴇ ᴀʀᴇ ᴛʜᴇ ᴇxᴛʀᴀ ꜰᴇᴀᴛᴜʀᴇꜱ ᴏꜰ ᴛʜɪꜱ ʙᴏᴛ\nCᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ:\n• /id - <code>ɢᴇᴛ ɪᴅ ᴏꜰ ᴀ ꜱᴘᴇᴄɪꜰɪᴇᴅ ᴜꜱᴇʀ.</code>\n• /info - <code>ɢᴇᴛ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ᴀʙᴏᴜᴛ ᴀ ᴜꜱᴇʀ.</code>\n• /imdb - <code>ɢᴇᴛ ᴛʜᴇ ꜰɪʟᴍ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ꜰʀᴏᴍ ɪᴍᴅʙ ꜱᴏᴜʀᴄᴇ.</code>\n• /search - <code>ɢᴇᴛ ᴛʜᴇ ꜰɪʟᴍ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ꜰʀᴏᴍ ᴠᴀʀɪᴏᴜꜱ ꜱᴏᴜʀᴄᴇꜱ.</code>\"\"\"\n\n ADMIN_TXT = \"\"\"ʜᴇʟᴘ: Aᴅᴍɪɴ Mᴏᴅs\n<b>ɴᴏᴛᴇ:</b>\nTʜɪs Mᴏᴅᴜʟᴇ Oɴʟʏ Wᴏʀᴋs Fᴏʀ Mʏ Aᴅᴍɪɴs\nCᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ:\n• /logs - <code>ᴛᴏ ɢᴇᴛ ᴛʜᴇ ʀᴇᴄᴇɴᴛ ᴇʀʀᴏʀꜱ</code>\n• /stats - <code>ᴛᴏ ɢᴇᴛ ꜱᴛᴀᴛᴜꜱ ᴏꜰ ꜰɪʟᴇꜱ ɪɴ ᴅʙ. [Tʜɪs Cᴏᴍᴍᴀɴᴅ Cᴀɴ Bᴇ Usᴇᴅ Bʏ Aɴʏᴏɴᴇ]</code>\n• /delete - <code>ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀ ꜱᴘᴇᴄɪꜰɪᴄ ꜰɪʟᴇ ꜰʀᴏᴍ ᴅʙ.</code>\n• /users - <code>ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴍʏ ᴜꜱᴇʀꜱ ᴀɴᴅ ɪᴅꜱ.</code>\n• /chats - <code>ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴍʏ ᴄʜᴀᴛꜱ ᴀɴᴅ ɪᴅꜱ</code>\n• /leave - <code>ᴛᴏ ʟᴇᴀᴠᴇ ꜰʀᴏᴍ ᴀ ᴄʜᴀᴛ.</code>\n• /disable - <code>ᴛᴏ ᴅɪꜱᴀʙʟᴇ ᴀ ᴄʜᴀᴛ.</code>\n• /ban - <code>ᴛᴏ ʙᴀɴ ᴀ ᴜꜱᴇʀ.</code>\n• /unban - <code>ᴛᴏ ᴜɴʙᴀɴ ᴀ ᴜꜱᴇʀ.</code>\n• /channel - <code>ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴛᴏᴛᴀʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ᴄʜᴀɴɴᴇʟꜱ</code>\n• /broadcast - <code>ᴛᴏ ʙʀᴏᴀᴅᴄᴀꜱᴛ ᴀ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ ᴀʟʟ ᴜꜱᴇʀꜱ</code>\n• /grp_broadcast - <code>Tᴏ ʙʀᴏᴀᴅᴄᴀsᴛ ᴀ ᴍᴇssᴀɢᴇ ᴛᴏ ᴀʟʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ɢʀᴏᴜᴘs.</code>\n• /gfilter - <code>ᴛᴏ ᴀᴅᴅ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs</code>\n• /gfilters - <code>ᴛᴏ ᴠɪᴇᴡ ʟɪsᴛ ᴏғ ᴀʟʟ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs</code>\n• /delg - <code>ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀ sᴘᴇᴄɪғɪᴄ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ</code>\n• /request - <code>Tᴏ sᴇɴᴅ ᴀ Mᴏᴠɪᴇ/Sᴇʀɪᴇs ʀᴇᴏ̨ᴜᴇsᴛ ᴛᴏ ʙᴏᴛ ᴀᴅᴍɪɴs. Oɴʟʏ ᴡᴏʀᴋs ᴏɴ sᴜᴘᴘᴏʀᴛ ɢʀᴏᴜᴘ. [Tʜɪs Cᴏᴍᴍᴀɴᴅ Cᴀɴ Bᴇ Usᴇᴅ Bʏ Aɴʏᴏɴᴇ]</code>\n• /delallg - <code>Tᴏ ᴅᴇʟᴇᴛᴇ ᴀʟʟ Gғɪʟᴛᴇʀs ғʀᴏᴍ ᴛʜᴇ ʙᴏᴛ's ᴅᴀᴛᴀʙᴀsᴇ.</code>\n• /deletefiles - <code>Tᴏ ᴅᴇʟᴇᴛᴇ CᴀᴍRɪᴘ ᴀɴᴅ PʀᴇDVD Fɪʟᴇs ғʀᴏᴍ ᴛʜᴇ ʙᴏᴛ's ᴅᴀᴛᴀʙᴀsᴇ.</code>\"\"\"\n\n STATUS_TXT = \"\"\"<b>📂 ᴛᴏᴛᴀʟ ꜰɪʟᴇs: <code>{}</code>\n👤 ᴛᴏᴛᴀʟ ᴜsᴇʀs: <code>{}</code>\n♻️ ᴛᴏᴛᴀʟ ᴄʜᴀᴛs: <code>{}</code>\n🗃️ ᴜsᴇᴅ sᴛᴏʀᴀɢᴇ: <code>{}</code>\n🆓 ꜰʀᴇᴇ sᴛᴏʀᴀɢᴇ: <code>{}</code></b>\"\"\"\n\n LOG_TEXT_G = \"\"\"#𝐍𝐞𝐰𝐆𝐫𝐨𝐮𝐩\n\n<b>᚛› 𝐆𝐫𝐨𝐮𝐩 ⪼ {}(<code>{}</code>)</b>\n<b>᚛› 𝐓𝐨𝐭𝐚𝐥 𝐌𝐞𝐦𝐛𝐞𝐫𝐬 ⪼ <code>{}</code></b>\n<b>᚛› 𝐀𝐝𝐝𝐞𝐝 𝐁𝐲 ⪼ {}</b>\n\"\"\"\n\n LOG_TEXT_P = \"\"\"#𝐍𝐞𝐰𝐔𝐬𝐞𝐫\n\n<b>᚛› 𝐈𝐃 - <code>{}</code></b>\n<b>᚛› 𝐍𝐚𝐦𝐞 - {}</b>\n\"\"\"\n\n ALRT_TXT = \"\"\"{},\nᴄʜᴇᴄᴋ ʏᴏᴜʀ ᴏᴡɴ ʀᴇǫᴜᴇ𝗌ᴛ 😤\n\"\"\"\n\n OLD_ALRT_TXT =\"\"\"{},\n\nʏᴏᴜ ᴀʀᴇ ᴜꜱɪɴɢ ᴍʏ ᴏʟᴅ ᴍᴇꜱꜱᴀɢᴇ,\n\nꜱᴇɴᴅ ᴛʜᴇ ʀᴇǫᴜᴇ𝗌ᴛ ᴀɢᴀɪɴ 😊\n\"\"\"\n\n CUDNT_FND = \"\"\"<b>{},</b>\n\n𝗜 𝗰𝗼𝘂𝗹𝗱𝗻'𝘁 𝗳𝗶𝗻𝗱 𝗮𝗻𝘆𝘁𝗵𝗶𝗻𝗴 𝗿𝗲𝗹𝗮𝘁𝗲𝗱 𝘁𝗼 𝘁𝗵𝗮𝘁 𝗱𝗶𝗱 𝘆𝗼𝘂 𝗺𝗲𝗮𝗻 𝗮𝗻𝘆 𝗼𝗻𝗲 𝗼𝗳 𝘁𝗵𝗲𝘀𝗲 ?? 👇\"\"\"\n\n I_CUDNT = \"\"\"<b>{},</b>\n\n𝗜 𝗰𝗼𝘂𝗹𝗱𝗻'𝘁 𝗳𝗶𝗻𝗱 𝗮𝗻𝘆 𝗺𝗼𝘃𝗶𝗲 𝗼𝗿 𝘀𝗲𝗿𝗶𝗲𝘀 𝗶𝗻 𝘁𝗵𝗮𝘁 𝗻𝗮𝗺𝗲.. 😐\"\"\"\n\n I_CUD_NT = \"\"\"ɪ ᴄᴏᴜʟᴅɴ'ᴛ ꜰɪɴᴅ ᴀɴʏ ᴍᴏᴠɪᴇ ʀᴇʟᴀᴛᴇᴅ ᴛᴏ {}.\nᴘʟᴇᴀꜱᴇ ᴄʜᴇᴄᴋ ᴛʜᴇ ꜱᴘᴇʟʟɪɴɢ ᴏɴ ɢᴏᴏɢʟᴇ ᴏʀ ɪᴍᴅʙ...\"\"\"\n\n MVE_NT_FND = \"\"\"<b>ᴍᴏᴠɪᴇ ɴᴏᴛ ꜰᴏᴜɴᴅ...\n\n<u>ʀᴇᴀꜱᴏɴꜱ:</u></b>\n\n𝟷) ꜱᴘᴇʟʟɪɴɢ ᴍɪꜱᴛᴀᴋᴇ\n\n𝟸) ᴏᴛᴛ ᴏʀ ᴅᴠᴅ ɴᴏᴛ ʀᴇʟᴇᴀꜱᴇᴅ\n\n𝟹) ɴᴏᴛ ᴀᴠᴀɪʟᴀʙʟᴇ ɪɴ ᴅᴀᴛᴀʙᴀꜱᴇ\n\n<b><a href=https://telegram.me/NobiDeveloperr>~ ʀᴇǫᴜᴇ𝗌ᴛ ᴛᴏ ᴏᴡɴᴇʀ</a></b>\n\"\"\"\n\n TOP_ALRT_MSG = \"\"\"ꜱᴇᴀʀᴄʜɪɴɢ ɪɴ ᴅᴀᴛᴀʙᴀꜱᴇ...\"\"\"\n\n MELCOW_ENG = \"\"\"<b>{},\n\n📿 ᴡᴇʟᴄᴏᴍᴇ ᴛᴏ ᴏᴜʀ ɢʀᴏᴜᴘ {}\n\n🚬 ᴛʜɪs ɪs ᴀ ᴍᴏᴠɪᴇ ɢʀᴏᴜᴘ\n\n⏳ ᴀʟʟ ᴄᴀᴛᴇɢᴏʀɪᴇs ᴏꜰ ᴍᴏᴠɪᴇs ᴀᴠᴀɪʟᴀʙʟᴇ ʜᴇʀᴇ\n\n🧨 ᴊᴜsᴛ ᴛʏᴘᴇ ᴛʜᴇ ᴍᴏᴠɪᴇ ɴᴀᴍᴇ\n\n🤖 ʙᴏᴛ ᴡɪʟʟ sᴇɴᴅ ʏᴏᴜʀ ᴍᴏᴠɪᴇ\n\n☎️ ʀᴇᴀᴅ ɢʀᴏᴜᴘ ʀᴜʟᴇs ᴛᴏ ᴋɴᴏᴡ ᴍᴏʀᴇ...</b>\"\"\"\n\n SHORTLINK_INFO = \"\"\"\n<b>──────「 <a href='https://telegram.me/NobiDeveloper'>ᴇᴀʀɴ ᴍᴏɴᴇʏ</a> 」──────\n\n➥ ɴᴏᴡ ʏᴏᴜ ᴄᴀɴ ᴀʟsᴏ ᴇᴀʀɴ ʟᴏᴛs ᴏꜰ ᴍᴏɴᴇʏ ꜰʀᴏᴍ ᴛʜɪꜱ ʙᴏᴛ.\n\n›› sᴛᴇᴘ 𝟷 : ʏᴏᴜ ᴍᴜsᴛ ʜᴀᴠᴇ ᴀᴛʟᴇᴀsᴛ ᴏɴᴇ ɢʀᴏᴜᴘ ᴡɪᴛʜ ᴍɪɴɪᴍᴜᴍ 𝟹𝟶𝟶 ᴍᴇᴍʙᴇʀs.\n\n›› sᴛᴇᴘ 𝟸 : ᴍᴀᴋᴇ ᴀᴄᴄᴏᴜɴᴛ ᴏɴ <a href='https://tnshort.net/ref/devilofficial'>ᴛɴʟɪɴᴋ</a> ᴏʀ <a href='https://onepagelink.in/ref/Nobita'>ᴏɴᴇᴘᴀɢᴇʟɪɴᴋ</a>. [ ʏᴏᴜ ᴄᴀɴ ᴀʟsᴏ ᴜsᴇ ᴏᴛʜᴇʀ sʜᴏʀᴛɴᴇʀ ᴡᴇʙsɪᴛᴇ ]\n\n›› sᴛᴇᴘ 𝟹 : ꜰᴏʟʟᴏᴡ ᴛʜᴇsᴇ <a href='https://telegram.me/NobiDeveloper/1063'>ɪɴꜱᴛʀᴜᴄᴛɪᴏɴꜱ</a>.\n\n➥ ᴛʜɪꜱ ʙᴏᴛ ꜰʀᴇᴇ ꜰᴏʀ ᴀʟʟ ʏᴏᴜ ᴄᴀɴ ᴜꜱᴇ ᴛʜɪꜱ ʙᴏᴛ ɪɴ ʏᴏᴜʀ ɢʀᴏᴜᴘs ꜰʀᴇᴇ ᴏꜰ ᴄᴏꜱᴛ.</b>\"\"\"\n\n REQINFO = \"\"\"\n⚠ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ⚠\n\nᴀꜰᴛᴇʀ 5 ᴍɪɴᴜᴛᴇꜱ ᴛʜɪꜱ ᴍᴇꜱꜱᴀɢᴇ ᴡɪʟʟ ʙᴇ ᴀᴜᴛᴏᴍᴀᴛɪᴄᴀʟʟʏ ᴅᴇʟᴇᴛᴇᴅ\n\nɪꜰ ʏᴏᴜ ᴅᴏ ɴᴏᴛ ꜱᴇᴇ ᴛʜᴇ ʀᴇǫᴜᴇsᴛᴇᴅ ᴍᴏᴠɪᴇ / sᴇʀɪᴇs ꜰɪʟᴇ, ʟᴏᴏᴋ ᴀᴛ ᴛʜᴇ ɴᴇxᴛ ᴘᴀɢᴇ\"\"\"\n\n SELECT = \"\"\"\nMOVIES ➢ Sᴇʟᴇᴄᴛ \"Lᴀɴɢᴜᴀɢᴇs\"\n\nSERIES ➢ Sᴇʟᴇᴄᴛ \"Sᴇᴀsᴏɴs\"\n\nTɪᴘ: Sᴇʟᴇᴄᴛ \"Lᴀɴɢᴜᴀɢᴇs\" ᴏʀ \"Sᴇᴀsᴏɴs\" Bᴜᴛᴛᴏɴ ᴀɴᴅ Cʟɪᴄᴋ \"Sᴇɴᴅ Aʟʟ\" Tᴏ ɢᴇᴛ Aʟʟ Fɪʟᴇ Lɪɴᴋs ɪɴ ᴀ Sɪɴɢʟᴇ ᴄʟɪᴄᴋ\"\"\"\n\n SINFO = \"\"\"\n▣ ᴛɪᴘs ▣\n\n☆ ᴛʏᴘᴇ ᴄᴏʀʀᴇᴄᴛ sᴘᴇʟʟɪɴɢ (ɢᴏᴏɢʟᴇ)\n\n☆ ɪꜰ ʏᴏᴜ ɴᴏᴛ ɢᴇᴛ ʏᴏᴜʀ ꜰɪʟᴇ ɪɴ ᴛʜɪꜱ ᴘᴀɢᴇ ᴛʜᴇɴ ᴄʟɪᴄᴋ ᴏɴ ɴᴇxᴛ ʙᴜᴛᴛᴏɴ\n\n☆ ᴄᴏɴᴛɪɴᴜᴇ ᴛʜɪs ᴍᴇᴛʜᴏᴅ ᴛᴏ ɢᴇᴛᴛɪɴɢ ʏᴏᴜ ꜰɪʟᴇ\n\n❤️‍🔥 ᴘᴏᴡᴇʀᴇᴅ ʙʏ @NobiDeveloper\n\"\"\"\n\n NORSLTS = \"\"\"\n★ #𝗡𝗼𝗥𝗲𝘀𝘂𝗹𝘁𝘀 ★\n\n𝗜𝗗 <b>: {}</b>\n𝗡𝗮𝗺𝗲 <b>: {}</b>\n𝗠𝗲𝘀𝘀𝗮𝗴𝗲 <b>: {}</b>\"\"\"\n\n CAPTION = \"\"\"\n[{file_name}](https://telegram.me/NobiDeveloper)\n\n<b>•────•────────•────•\n📌 ʀᴇǫᴜᴇsᴛ ɢʀᴏᴜᴘ​ : [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://telegram.me/AllRequestGroups)\n🎬 ᴍᴏᴠɪᴇs ᴄʜᴀɴɴᴇʟ​ : [ᴄʟɪᴄᴋ ʜᴇʀᴇ](https://telegram.me/MovieVillaYT)\n•────•────────•────•\n\n©️ ᴘᴏᴡᴇʀᴇᴅ ʙʏ : [ᴍᴏᴠɪᴇ ᴠɪʟʟᴀ](https://youtube.com/@NobiDeveloper)</b>\"\"\"\n\n IMDB_TEMPLATE_TXT = \"\"\"\n<b>{title}</b>\n\n⭐️<b>{rating}</b> | ⏰ <b>{runtime}</b> | 📆 <b>{release_date}</b>\n\n● <b>{genres}</b>\n● <b>{languages}</b>\n\n📖 sᴛᴏʀʏ : <b>{plot}</b> \n\n© {message.chat.title}\n\"\"\"\n \n ALL_FILTERS = \"\"\"\n<b>Hᴇʏ {}, Tʜᴇsᴇ ᴀʀᴇ ᴍʏ ᴛʜʀᴇᴇ ᴛʏᴘᴇs ᴏғ ғɪʟᴛᴇʀs.</b>\"\"\"\n \n GFILTER_TXT = \"\"\"\n<b>Wᴇʟᴄᴏᴍᴇ ᴛᴏ Gʟᴏʙᴀʟ Fɪʟᴛᴇʀs. Gʟᴏʙᴀʟ Fɪʟᴛᴇʀs ᴀʀᴇ ᴛʜᴇ ғɪʟᴛᴇʀs sᴇᴛ ʙʏ ʙᴏᴛ ᴀᴅᴍɪɴs ᴡʜɪᴄʜ ᴡɪʟʟ ᴡᴏʀᴋ ᴏɴ ᴀʟʟ ɢʀᴏᴜᴘs.</b>\n \nAᴠᴀɪʟᴀʙʟᴇ ᴄᴏᴍᴍᴀɴᴅs:\n• /gfilter - <code>Tᴏ ᴄʀᴇᴀᴛᴇ ᴀ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ.</code>\n• /gfilters - <code>Tᴏ ᴠɪᴇᴡ ᴀʟʟ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs.</code>\n• /delg - <code>Tᴏ ᴅᴇʟᴇᴛᴇ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ.</code>\n• /delallg - <code>ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀʟʟ ɢʟᴏʙᴀʟ ꜰɪʟᴛᴇʀꜱ.</code>\"\"\"\n \n FILE_STORE_TXT = \"\"\"\n<b>Fɪʟᴇ sᴛᴏʀᴇ ɪs ᴛʜᴇ ғᴇᴀᴛᴜʀᴇ ᴡʜɪᴄʜ ᴡɪʟʟ ᴄʀᴇᴀᴛᴇ ᴀ sʜᴀʀᴇᴀʙʟᴇ ʟɪɴᴋ ᴏғ ᴀ sɪɴɢʟᴇ ᴏʀ ᴍᴜʟᴛɪᴘʟᴇ ғɪʟᴇs.</b>\n\nAᴠᴀɪʟᴀʙʟᴇ ᴄᴏᴍᴍᴀɴᴅs:\n• /batch - <code>Tᴏ ᴄʀᴇᴀᴛᴇ ᴀ ʙᴀᴛᴄʜ ʟɪɴᴋ ᴏғ ᴍᴜʟᴛɪᴘʟᴇ ғɪʟᴇs.</code>\n• /link - <code>Tᴏ ᴄʀᴇᴀᴛᴇ ᴀ sɪɴɢʟᴇ ғɪʟᴇ sᴛᴏʀᴇ ʟɪɴᴋ.</code>\n• /pbatch - <code>Jᴜsᴛ ʟɪᴋᴇ /batch, ʙᴜᴛ ᴛʜᴇ ғɪʟᴇs ᴡɪʟʟ ʙᴇ sᴇɴᴅ ᴡɪᴛʜ ғᴏʀᴡᴀʀᴅ ʀᴇsᴛʀɪᴄᴛɪᴏɴs.</code>\n• /plink - <code>Jᴜsᴛ ʟɪᴋᴇ /link, ʙᴜᴛ ᴛʜᴇ ғɪʟᴇ ᴡɪʟʟ ʙᴇ sᴇɴᴅ ᴡɪᴛʜ ғᴏʀᴡᴀʀᴅ ʀᴇsᴛʀɪᴄᴛɪᴏɴ.</code>\"\"\"\n\n RESTART_TXT = \"\"\"\n<b>Bᴏᴛ Rᴇsᴛᴀʀᴛᴇᴅ !\n\n📅 Dᴀᴛᴇ : <code>{}</code>\n⏰ Tɪᴍᴇ : <code>{}</code>\n🌐 Tɪᴍᴇᴢᴏɴᴇ : <code>Asia/Kolkata</code>\n🛠️ Bᴜɪʟᴅ Sᴛᴀᴛᴜs: <code>v2.7.1 [ Sᴛᴀʙʟᴇ ]</code></b>\n\"\"\"\n\n LOGO = \"\"\"\n𝑺𝒕𝒂𝒓𝒕𝒊𝒏𝒈.......🥵\"\"\"" } ]
from pyrogram import Client, filters, enums from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery from pyrogram.errors.exceptions.bad_request_400 import MessageTooLong, PeerIdInvalid from info import ADMINS, LOG_CHANNEL, SUPPORT_CHAT, MELCOW_NEW_USERS, MELCOW_VID, CHNL_LNK, GRP_LNK from database.users_chats_db import db from database.ia_filterdb import Media from utils import get_size, temp, get_settings from Script import script from pyrogram.errors import ChatAdminRequired import asyncio
15,315
"""----------------------------------------- https://github.com/NobiDeveloper/Nobita-Filter-Bot --------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous" await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j)) await db.add_chat(message.chat.id, message.chat.title) if message.chat.id in temp.BANNED_CHATS: # Inspired from a boat of a banana tree buttons = [[ InlineKeyboardButton('Support', url='https://telegram.me/NobiDeveloperSupport') ]] reply_markup=InlineKeyboardMarkup(buttons) k = await message.reply( text='<b>CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..</b>', reply_markup=reply_markup, ) try: await k.pin() except: pass await bot.leave_chat(message.chat.id) return buttons = [[ InlineKeyboardButton('🥷 ʜᴇʟᴘ 🥷', url='https://telegram.me/NobiDeveloperSupport'), InlineKeyboardButton('♻️ ᴜᴘᴅᴀᴛᴇꜱ ♻️', url='https://telegram.me/NobiDeveloper') ]] reply_markup=InlineKeyboardMarkup(buttons) await message.reply_text( text=f"<b>☤ ᴛʜᴀɴᴋ ʏᴏᴜ ꜰᴏʀ ᴀᴅᴅɪɴɢ ᴍᴇ ɪɴ {message.chat.title}\n\n🤖 ᴅᴏɴ’ᴛ ꜰᴏʀɢᴇᴛ ᴛᴏ ᴍᴀᴋᴇ ᴍᴇ ᴀᴅᴍɪɴ 🤖\n\n🕵️ ɪꜰ ʏᴏᴜ ʜᴀᴠᴇ ᴀɴʏ ᴅᴏᴜʙᴛ ʏᴏᴜ ᴄʟᴇᴀʀ ɪᴛ ᴜsɪɴɢ ʙᴇʟᴏᴡ ʙᴜᴛᴛᴏɴs</b>", reply_markup=reply_markup) else: settings = await get_settings(message.chat.id) if settings["welcome"]: for u in message.new_chat_members: if (temp.MELCOW).get('welcome') is not None: try: await (temp.MELCOW['welcome']).delete() except: pass temp.MELCOW['welcome'] = await message.reply_photo(
"""----------------------------------------- https://github.com/NobiDeveloper/Nobita-Filter-Bot --------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous" await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j)) await db.add_chat(message.chat.id, message.chat.title) if message.chat.id in temp.BANNED_CHATS: # Inspired from a boat of a banana tree buttons = [[ InlineKeyboardButton('Support', url='https://telegram.me/NobiDeveloperSupport') ]] reply_markup=InlineKeyboardMarkup(buttons) k = await message.reply( text='<b>CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..</b>', reply_markup=reply_markup, ) try: await k.pin() except: pass await bot.leave_chat(message.chat.id) return buttons = [[ InlineKeyboardButton('🥷 ʜᴇʟᴘ 🥷', url='https://telegram.me/NobiDeveloperSupport'), InlineKeyboardButton('♻️ ᴜᴘᴅᴀᴛᴇꜱ ♻️', url='https://telegram.me/NobiDeveloper') ]] reply_markup=InlineKeyboardMarkup(buttons) await message.reply_text( text=f"<b>☤ ᴛʜᴀɴᴋ ʏᴏᴜ ꜰᴏʀ ᴀᴅᴅɪɴɢ ᴍᴇ ɪɴ {message.chat.title}\n\n🤖 ᴅᴏɴ’ᴛ ꜰᴏʀɢᴇᴛ ᴛᴏ ᴍᴀᴋᴇ ᴍᴇ ᴀᴅᴍɪɴ 🤖\n\n🕵️ ɪꜰ ʏᴏᴜ ʜᴀᴠᴇ ᴀɴʏ ᴅᴏᴜʙᴛ ʏᴏᴜ ᴄʟᴇᴀʀ ɪᴛ ᴜsɪɴɢ ʙᴇʟᴏᴡ ʙᴜᴛᴛᴏɴs</b>", reply_markup=reply_markup) else: settings = await get_settings(message.chat.id) if settings["welcome"]: for u in message.new_chat_members: if (temp.MELCOW).get('welcome') is not None: try: await (temp.MELCOW['welcome']).delete() except: pass temp.MELCOW['welcome'] = await message.reply_photo(
photo=(MELCOW_VID),
4
2023-11-28 13:36:56+00:00
24k
chenxx89/BFRffusion
models/models.py
[ { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "UNetModel", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class UNetModel(nn.Module):\n \"\"\"\n The full UNet model with attention and timestep embedding.\n :param in_channels: channels in the input Tensor.\n :param model_channels: base channel count for the model.\n :param out_channels: channels in the output Tensor.\n :param num_res_blocks: number of residual blocks per downsample.\n :param attention_resolutions: a collection of downsample rates at which\n attention will take place. May be a set, list, or tuple.\n For example, if this contains 4, then at 4x downsampling, attention\n will be used.\n :param dropout: the dropout probability.\n :param channel_mult: channel multiplier for each level of the UNet.\n :param conv_resample: if True, use learned convolutions for upsampling and\n downsampling.\n :param dims: determines if the signal is 1D, 2D, or 3D.\n :param num_classes: if specified (as an int), then this model will be\n class-conditional with `num_classes` classes.\n :param use_checkpoint: use gradient checkpointing to reduce memory usage.\n :param num_heads: the number of attention heads in each attention layer.\n :param num_heads_channels: if specified, ignore num_heads and instead use\n a fixed channel width per attention head.\n :param num_heads_upsample: works with num_heads to set a different number\n of heads for upsampling. Deprecated.\n :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.\n :param resblock_updown: use residual blocks for up/downsampling.\n :param use_new_attention_order: use a different attention pattern for potentially\n increased efficiency.\n \"\"\"\n\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n disable_self_attentions=None,\n num_attention_blocks=None,\n disable_middle_self_attn=False,\n use_linear_in_transformer=False,\n ):\n super().__init__()\n if use_spatial_transformer:\n assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'\n\n if context_dim is not None:\n assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'\n from omegaconf.listconfig import ListConfig\n if type(context_dim) == ListConfig:\n context_dim = list(context_dim)\n\n if num_heads_upsample == -1:\n num_heads_upsample = num_heads\n\n if num_heads == -1:\n assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'\n\n if num_head_channels == -1:\n assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'\n\n self.image_size = image_size\n self.in_channels = in_channels\n self.model_channels = model_channels\n self.out_channels = out_channels\n if isinstance(num_res_blocks, int):\n self.num_res_blocks = len(channel_mult) * [num_res_blocks]\n else:\n if len(num_res_blocks) != len(channel_mult):\n raise ValueError(\"provide num_res_blocks either as an int (globally constant) or \"\n \"as a list/tuple (per-level) with the same length as channel_mult\")\n self.num_res_blocks = num_res_blocks\n if disable_self_attentions is not None:\n # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not\n assert len(disable_self_attentions) == len(channel_mult)\n if num_attention_blocks is not None:\n assert len(num_attention_blocks) == len(self.num_res_blocks)\n assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))\n print(f\"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. \"\n f\"This option has LESS priority than attention_resolutions {attention_resolutions}, \"\n f\"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, \"\n f\"attention will still not be set.\")\n\n self.attention_resolutions = attention_resolutions\n self.dropout = dropout\n self.channel_mult = channel_mult\n self.conv_resample = conv_resample\n self.num_classes = num_classes\n self.use_checkpoint = use_checkpoint\n self.dtype = th.float16 if use_fp16 else th.float32\n self.num_heads = num_heads\n self.num_head_channels = num_head_channels\n self.num_heads_upsample = num_heads_upsample\n self.predict_codebook_ids = n_embed is not None\n\n time_embed_dim = model_channels * 4\n self.time_embed = nn.Sequential(\n linear(model_channels, time_embed_dim),\n nn.SiLU(),\n linear(time_embed_dim, time_embed_dim),\n )\n\n if self.num_classes is not None:\n if isinstance(self.num_classes, int):\n self.label_emb = nn.Embedding(num_classes, time_embed_dim)\n elif self.num_classes == \"continuous\":\n print(\"setting up linear c_adm embedding layer\")\n self.label_emb = nn.Linear(1, time_embed_dim)\n else:\n raise ValueError()\n\n self.input_blocks = nn.ModuleList(\n [\n TimestepEmbedSequential(\n conv_nd(dims, in_channels, model_channels, 3, padding=1)\n )\n ]\n )\n self._feature_size = model_channels\n input_block_chans = [model_channels]\n ch = model_channels\n ds = 1\n for level, mult in enumerate(channel_mult):\n for nr in range(self.num_res_blocks[level]):\n layers = [\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=mult * model_channels,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = mult * model_channels\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n )\n )\n self.input_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n input_block_chans.append(ch)\n if level != len(channel_mult) - 1:\n out_ch = ch\n self.input_blocks.append(\n TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n down=True,\n )\n if resblock_updown\n else Downsample(\n ch, conv_resample, dims=dims, out_channels=out_ch\n )\n )\n )\n ch = out_ch\n input_block_chans.append(ch)\n ds *= 2\n self._feature_size += ch\n\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n self.middle_block = TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n ),\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n )\n self._feature_size += ch\n\n self.output_blocks = nn.ModuleList([])\n for level, mult in list(enumerate(channel_mult))[::-1]:\n for i in range(self.num_res_blocks[level] + 1):\n ich = input_block_chans.pop()\n layers = [\n ResBlock(\n ch + ich,\n time_embed_dim,\n dropout,\n out_channels=model_channels * mult,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = model_channels * mult\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if not exists(num_attention_blocks) or i < num_attention_blocks[level]:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads_upsample,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n )\n )\n if level and i == self.num_res_blocks[level]:\n out_ch = ch\n layers.append(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n up=True,\n )\n if resblock_updown\n else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)\n )\n ds //= 2\n self.output_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),\n )\n if self.predict_codebook_ids:\n self.id_predictor = nn.Sequential(\n normalization(ch),\n conv_nd(dims, model_channels, n_embed, 1),\n #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits\n )\n\n def convert_to_fp16(self):\n \"\"\"\n Convert the torso of the model to float16.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f16)\n self.middle_block.apply(convert_module_to_f16)\n self.output_blocks.apply(convert_module_to_f16)\n\n def convert_to_fp32(self):\n \"\"\"\n Convert the torso of the model to float32.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f32)\n self.middle_block.apply(convert_module_to_f32)\n self.output_blocks.apply(convert_module_to_f32)\n\n def forward(self, x, timesteps=None, context=None, y=None,**kwargs):\n \"\"\"\n Apply the model to an input batch.\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :param context: conditioning plugged in via crossattn\n :param y: an [N] Tensor of labels, if class-conditional.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n assert (y is not None) == (\n self.num_classes is not None\n ), \"must specify y if and only if the model is class-conditional\"\n hs = []\n t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)\n emb = self.time_embed(t_emb)\n\n if self.num_classes is not None:\n assert y.shape[0] == x.shape[0]\n emb = emb + self.label_emb(y)\n\n h = x.type(self.dtype)\n for module in self.input_blocks:\n h = module(h, emb, context)\n hs.append(h)\n h = self.middle_block(h, emb, context)\n for module in self.output_blocks:\n h = th.cat([h, hs.pop()], dim=1)\n h = module(h, emb, context)\n h = h.type(x.dtype)\n if self.predict_codebook_ids:\n return self.id_predictor(h)\n else:\n return self.out(h)" }, { "identifier": "LatentDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(self,\n first_stage_config,\n cond_stage_config,\n num_timesteps_cond=None,\n cond_stage_key=\"image\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n force_null_conditioning=False,\n *args, **kwargs):\n self.force_null_conditioning = force_null_conditioning\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs['timesteps']\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = 'concat' if concat_mode else 'crossattn'\n if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n reset_ema = kwargs.pop(\"reset_ema\", False)\n reset_num_ema_updates = kwargs.pop(\"reset_num_ema_updates\", False)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.cond_stage_key = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer('scale_factor', torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n if reset_ema:\n assert self.use_ema\n print(\n f\"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.\")\n self.model_ema = LitEma(self.model)\n if reset_num_ema_updates:\n print(\" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ \")\n assert self.use_ema\n self.model_ema.reset_num_updates()\n\n def make_cond_schedule(self, ):\n self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)\n ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()\n self.cond_ids[:self.num_timesteps_cond] = ids\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n # only for very first batch\n if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:\n assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'\n # set rescale weight to 1./std of encodings\n print(\"### USING STD-RESCALING ###\")\n x = super().get_input(batch, self.first_stage_key)\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n del self.scale_factor\n self.register_buffer('scale_factor', 1. / z.flatten().std())\n print(f\"setting self.scale_factor to {self.scale_factor}\")\n print(\"### USING STD-RESCALING ###\")\n\n def register_schedule(self,\n given_betas=None, beta_schedule=\"linear\", timesteps=1000,\n linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != '__is_first_stage__'\n assert config != '__is_unconditional__'\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n\n def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):\n denoise_row = []\n for zd in tqdm(samples, desc=desc):\n denoise_row.append(self.decode_first_stage(zd.to(self.device),\n force_not_quantize=force_no_decoder_quantization))\n n_imgs_per_row = len(denoise_row)\n denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W\n denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')\n denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')\n denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)\n return denoise_grid\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\")\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]\n return edge_dist\n\n def get_weighting(self, h, w, Ly, Lx, device):\n weighting = self.delta_border(h, w)\n weighting = torch.clip(weighting, self.split_input_params[\"clip_min_weight\"],\n self.split_input_params[\"clip_max_weight\"], )\n weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)\n\n if self.split_input_params[\"tie_braker\"]:\n L_weighting = self.delta_border(Ly, Lx)\n L_weighting = torch.clip(L_weighting,\n self.split_input_params[\"clip_min_tie_weight\"],\n self.split_input_params[\"clip_max_tie_weight\"])\n\n L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)\n weighting = weighting * L_weighting\n return weighting\n\n def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code\n \"\"\"\n :param x: img of size (bs, c, h, w)\n :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])\n \"\"\"\n bs, nc, h, w = x.shape\n\n # number of crops in image\n Ly = (h - kernel_size[0]) // stride[0] + 1\n Lx = (w - kernel_size[1]) // stride[1] + 1\n\n if uf == 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)\n\n weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))\n\n elif uf > 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),\n dilation=1, padding=0,\n stride=(stride[0] * uf, stride[1] * uf))\n fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))\n\n elif df > 1 and uf == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),\n dilation=1, padding=0,\n stride=(stride[0] // df, stride[1] // df))\n fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))\n\n else:\n raise NotImplementedError\n\n return fold, unfold, normalization, weighting\n\n @torch.no_grad()\n def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,\n cond_key=None, return_original_cond=False, bs=None, return_x=False):\n x = super().get_input(batch, k)\n if bs is not None:\n x = x[:bs]\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n\n if self.model.conditioning_key is not None and not self.force_null_conditioning:\n if cond_key is None:\n cond_key = self.cond_stage_key\n if cond_key != self.first_stage_key:\n if cond_key in ['caption', 'coordinates_bbox', \"txt\"]:\n xc = batch[cond_key]\n elif cond_key in ['class_label', 'cls']:\n xc = batch\n else:\n xc = super().get_input(batch, cond_key).to(self.device)\n else:\n xc = x\n if not self.cond_stage_trainable or force_c_encode:\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n c = self.get_learned_conditioning(xc.to(self.device))\n else:\n c = xc\n if bs is not None:\n c = c[:bs]\n\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n ckey = __conditioning_keys__[self.model.conditioning_key]\n c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}\n\n else:\n c = None\n xc = None\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n c = {'pos_x': pos_x, 'pos_y': pos_y}\n out = [z, c]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_x:\n out.extend([x])\n if return_original_cond:\n out.append(xc)\n return out\n\n @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, 'b h w c -> b c h w').contiguous()\n\n z = 1. / self.scale_factor * z\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c)\n return loss\n\n def forward(self, x, c, *args, **kwargs):\n t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()\n if self.model.conditioning_key is not None:\n assert c is not None\n # if self.cond_stage_trainable:\n # c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n if isinstance(cond, dict):\n # hybrid case, cond is expected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'\n cond = {key: cond}\n\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \\\n extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.apply_model(x_noisy, t, cond)\n\n loss_dict = {}\n prefix = 'train' if self.training else 'val'\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n elif self.parameterization == \"v\":\n target = self.get_v(x_start, noise, t)\n else:\n raise NotImplementedError()\n\n loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3])\n loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})\n loss_dict.update({'logvar': self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})\n loss += (self.original_elbo_weight * loss_vlb)\n loss_dict.update({f'{prefix}/loss': loss})\n\n return loss, loss_dict\n\n def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,\n return_x0=False, score_corrector=None, corrector_kwargs=None):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1., 1.)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,\n return_codebook_ids=False, quantize_denoised=False, return_x0=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,\n img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,\n score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,\n log_every_t=None):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',\n total=timesteps) if verbose else reversed(\n range(0, timesteps))\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised, return_x0=True,\n temperature=temperature[i], noise_dropout=noise_dropout,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(self, cond, shape, return_intermediates=False,\n x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, start_T=None,\n log_every_t=None):\n\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(\n range(0, timesteps))\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised)\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,\n verbose=True, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, shape=None, **kwargs):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n return self.p_sample_loop(cond,\n shape,\n return_intermediates=return_intermediates, x_T=x_T,\n verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,\n mask=mask, x0=x0)\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):\n if ddim:\n ddim_sampler = DDIMSampler(self)\n shape = (self.channels, self.image_size, self.image_size)\n samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,\n shape, cond, verbose=False, **kwargs)\n\n else:\n samples, intermediates = self.sample(cond=cond, batch_size=batch_size,\n return_intermediates=True, **kwargs)\n\n return samples, intermediates\n\n @torch.no_grad()\n def get_unconditional_conditioning(self, batch_size, null_label=None):\n if null_label is not None:\n xc = null_label\n if isinstance(xc, ListConfig):\n xc = list(xc)\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n if hasattr(xc, \"to\"):\n xc = xc.to(self.device)\n c = self.get_learned_conditioning(xc)\n else:\n if self.cond_stage_key in [\"class_label\", \"cls\"]:\n xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)\n return self.get_learned_conditioning(xc)\n else:\n raise NotImplementedError(\"todo\")\n if isinstance(c, list): # in case the encoder gives us a list\n for i in range(len(c)):\n c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)\n else:\n c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)\n return c\n\n @torch.no_grad()\n def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,\n quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,\n plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,\n use_ema_scope=True,\n **kwargs):\n ema_scope = self.ema_scope if use_ema_scope else nullcontext\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=N)\n N = min(x.shape[0], N)\n n_row = min(x.shape[0], n_row)\n log[\"inputs\"] = x\n log[\"reconstruction\"] = xrec\n if self.model.conditioning_key is not None:\n if hasattr(self.cond_stage_model, \"decode\"):\n xc = self.cond_stage_model.decode(c)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in [\"caption\", \"txt\"]:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in ['class_label', \"cls\"]:\n try:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[\"human_label\"], size=x.shape[2] // 25)\n log['conditioning'] = xc\n except KeyError:\n # probably no \"human_label\" in batch\n pass\n elif isimage(xc):\n log[\"conditioning\"] = xc\n if ismap(xc):\n log[\"original_conditioning\"] = self.to_rgb(xc)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), '1 -> b', b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')\n diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n with ema_scope(\"Sampling\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(\n self.first_stage_model, IdentityFirstStage):\n # also display when quantizing x0 while sampling\n with ema_scope(\"Plotting Quantized Denoised\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n quantize_denoised=True)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,\n # quantize_denoised=True)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_x0_quantized\"] = x_samples\n\n if unconditional_guidance_scale > 1.0:\n uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)\n if self.model.conditioning_key == \"crossattn-adm\":\n uc = {\"c_crossattn\": [uc], \"c_adm\": c[\"c_adm\"]}\n with ema_scope(\"Sampling with classifier-free guidance\"):\n samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc,\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"] = x_samples_cfg\n\n if inpaint:\n # make a simple center square\n b, h, w = z.shape[0], z.shape[2], z.shape[3]\n mask = torch.ones(N, h, w).to(self.device)\n # zeros will be filled in\n mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.\n mask = mask[:, None, ...]\n with ema_scope(\"Plotting Inpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_inpainting\"] = x_samples\n log[\"mask\"] = mask\n\n # outpaint\n mask = 1. - mask\n with ema_scope(\"Plotting Outpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_outpainting\"] = x_samples\n\n if plot_progressive_rows:\n with ema_scope(\"Plotting Progressives\"):\n img, progressives = self.progressive_denoising(c,\n shape=(self.channels, self.image_size, self.image_size),\n batch_size=N)\n prog_row = self._get_denoise_row_from_list(progressives, desc=\"Progressive Generation\")\n log[\"progressive_row\"] = prog_row\n\n if return_keys:\n if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:\n return log\n else:\n return {key: log[key] for key in return_keys}\n return log\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = list(self.model.parameters())\n if self.cond_stage_trainable:\n print(f\"{self.__class__.__name__}: Also optimizing conditioner params!\")\n params = params + list(self.cond_stage_model.parameters())\n if self.learn_logvar:\n print('Diffusion model optimizing logvar')\n params.append(self.logvar)\n opt = torch.optim.AdamW(params, lr=lr)\n if self.use_scheduler:\n assert 'target' in self.scheduler_config\n scheduler = instantiate_from_config(self.scheduler_config)\n\n print(\"Setting up LambdaLR scheduler...\")\n scheduler = [\n {\n 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),\n 'interval': 'step',\n 'frequency': 1\n }]\n return [opt], scheduler\n return opt\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.\n return x" }, { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n # print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n # print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n # iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(time_range):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" }, { "identifier": "instantiate_from_config", "path": "data/dataset_instantiate.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(config.get(\"params\", dict()))" }, { "identifier": "calculate_psnr_ssim", "path": "metrics/metrics_all.py", "snippet": "def calculate_psnr_ssim(gt_path, restored_path, test_y_channel = False, crop_border = 0, suffix = '', correct_mean_var = False, show_details =False):\n \"\"\"\n Calculate PSNR and SSIM for images.\n gt_path: Path to gt (Ground-Truth)\n restored_path: Path to restored images\n test_y_channel: If True, test Y channel (In MatLab YCbCr format). If False, test RGB channels.\n crop_border: Crop border for each side\n suffix: Suffix for restored images\n \"\"\"\n print(\"Calculate PSNR and SSIM for images\")\n psnr_all = []\n ssim_all = []\n img_list_gt = sorted(list(scandir(gt_path, recursive=True, full_path=True)))\n img_list_restored = sorted(list(scandir(restored_path, recursive=True, full_path=True)))\n\n if test_y_channel:\n print('Testing Y channel.')\n else:\n print('Testing RGB channels.')\n\n for i, img_path in tqdm(enumerate(img_list_gt)):\n basename, ext = osp.splitext(osp.basename(img_path))\n img_gt = cv2.imread(img_path, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255.\n if suffix == '':\n img_path_restored = img_list_restored[i]\n else:\n img_path_restored = osp.join(restored_path, basename + suffix + ext)\n img_restored = cv2.imread(img_path_restored, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255.\n # img_restored = cv2.imread(img_path_restored, cv2.IMREAD_COLOR).astype(np.float32) / 255.\n img_restored\n if correct_mean_var:\n mean_l = []\n std_l = []\n for j in range(3):\n mean_l.append(np.mean(img_gt[:, :, j]))\n std_l.append(np.std(img_gt[:, :, j]))\n for j in range(3):\n # correct twice\n mean = np.mean(img_restored[:, :, j])\n img_restored[:, :, j] = img_restored[:, :, j] - mean + mean_l[j]\n std = np.std(img_restored[:, :, j])\n img_restored[:, :, j] = img_restored[:, :, j] / std * std_l[j]\n\n mean = np.mean(img_restored[:, :, j])\n img_restored[:, :, j] = img_restored[:, :, j] - mean + mean_l[j]\n std = np.std(img_restored[:, :, j])\n img_restored[:, :, j] = img_restored[:, :, j] / std * std_l[j]\n\n if test_y_channel and img_gt.ndim == 3 and img_gt.shape[2] == 3:\n img_gt = bgr2ycbcr(img_gt, y_only=True)\n img_restored = bgr2ycbcr(img_restored, y_only=True)\n\n # calculate PSNR and SSIM\n psnr = calculate_psnr(img_gt * 255, img_restored * 255, crop_border=crop_border, input_order='HWC')\n ssim = calculate_ssim(img_gt * 255, img_restored * 255, crop_border=crop_border, input_order='HWC')\n if show_details:\n print(f'{basename + suffix + ext:25}. \\tPSNR: {psnr:.6f} dB, \\tSSIM: {ssim:.6f}')\n psnr_all.append(psnr)\n ssim_all.append(ssim)\n Average_psnr = sum(psnr_all) / len(psnr_all)\n Average_ssim = sum(ssim_all) / len(ssim_all)\n print(f'PSNR: {Average_psnr:.6f} dB, SSIM: {Average_ssim:.6f}')\n return Average_psnr, Average_ssim" }, { "identifier": "calculate_lpips", "path": "metrics/metrics_all.py", "snippet": "def calculate_lpips(gt_path, restored_path, suffix = '', show_details =False):\n \"\"\"\n Calculate LPIPS for images.\n gt_path: Path to gt (Ground-Truth)\n restored_path: Path to restored images\n suffix: Suffix for restored images\n \"\"\"\n print(\"Calculate LPIPS for images\")\n loss_fn_vgg = lpips.LPIPS(net='vgg').cuda() # RGB, normalized to [-1,1]\n lpips_all = []\n img_list = sorted(glob.glob(osp.join(gt_path, '*')))\n img_list_restored = sorted(list(scandir(restored_path, recursive=True, full_path=True)))\n\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n for i, img_path in tqdm(enumerate(img_list)):\n basename, ext = osp.splitext(osp.basename(img_path))\n img_gt = cv2.imread(img_path, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255.\n\n if suffix == '':\n img_path_restored = img_list_restored[i]\n else:\n img_path_restored = osp.join(restored_path, basename + suffix + ext)\n img_restored = cv2.imread(img_path_restored, cv2.IMREAD_UNCHANGED).astype(np.float32) / 255. \n # img_restored = cv2.imread(img_path_restored, cv2.IMREAD_COLOR).astype(np.float32) / 255. \n\n img_gt, img_restored = img2tensor([img_gt, img_restored], bgr2rgb=True, float32=True)\n # norm to [-1, 1]\n normalize(img_gt, mean, std, inplace=True)\n normalize(img_restored, mean, std, inplace=True)\n\n # calculate lpips\n lpips_val = loss_fn_vgg(img_restored.unsqueeze(0).cuda(), img_gt.unsqueeze(0).cuda())\n lpips_val = lpips_val.cpu().item()\n if show_details:\n print(f'{i+1:3d}: {basename:25}. \\tLPIPS: {lpips_val:.6f}.')\n lpips_all.append(lpips_val)\n Average_lpips = sum(lpips_all) / len(lpips_all)\n print(f'LPIPS: {Average_lpips:.6f}')\n return Average_lpips" }, { "identifier": "calculate_NIQE", "path": "metrics/metrics_all.py", "snippet": "def calculate_NIQE(restored_path, crop_border = 0, show_details =False):\n \"\"\"\n Calculate NIQE for images.\n restored_path: Path to restored images\n crop_border: Crop border for each side\n \"\"\"\n print(\"Calculate NIQE for images\")\n niqe_all = []\n img_list = sorted(scandir(restored_path, recursive=True, full_path=True))\n\n for i, img_path in tqdm(enumerate(img_list)):\n basename, _ = os.path.splitext(os.path.basename(img_path))\n img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', category=RuntimeWarning)\n niqe_score = calculate_niqe(img, crop_border, input_order='HWC', convert_to='y')\n if show_details:\n print(f'{i+1:3d}: {basename:25}. \\tNIQE: {niqe_score:.6f}')\n niqe_all.append(niqe_score)\n Average_niqe = sum(niqe_all) / len(niqe_all)\n print(f'NIQE: {Average_niqe:.6f}')\n return Average_niqe " }, { "identifier": "calculate_fid_folder", "path": "metrics/metrics_all.py", "snippet": "def calculate_fid_folder(restored_path):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n fid_stats = ''\n batch_size = 64\n num_sample = 50000\n num_workers = 4\n backend = 'disk'\n\n # inception model\n inception = load_patched_inception_v3(device)\n\n # create dataset\n opt = {}\n opt['name'] = 'SingleImageDataset'\n opt['type'] = 'SingleImageDataset'\n opt['dataroot_lq'] = restored_path\n opt['io_backend'] = dict(type=backend)\n opt['mean'] = [0.5, 0.5, 0.5]\n opt['std'] = [0.5, 0.5, 0.5]\n dataset = build_dataset(opt)\n\n # create dataloader\n data_loader = DataLoader(\n dataset=dataset,\n batch_size=batch_size,\n shuffle=False,\n num_workers=num_workers,\n sampler=None,\n drop_last=False)\n num_sample = min(num_sample, len(dataset))\n total_batch = math.ceil(num_sample / batch_size)\n\n def data_generator(data_loader, total_batch):\n for idx, data in enumerate(data_loader):\n if idx >= total_batch:\n break\n else:\n yield data['lq']\n\n features = extract_inception_features(data_generator(data_loader, total_batch), inception, total_batch, device)\n features = features.numpy()\n total_len = features.shape[0]\n features = features[:num_sample]\n print(f'Extracted {total_len} features, use the first {features.shape[0]} features to calculate stats.')\n\n sample_mean = np.mean(features, 0)\n sample_cov = np.cov(features, rowvar=False)\n\n # load the dataset stats\n stats = torch.load(fid_stats)\n real_mean = stats['mean']\n real_cov = stats['cov']\n\n # calculate FID metric\n fid = calculate_fid(sample_mean, sample_cov, real_mean, real_cov)\n print('fid:', fid)\n return fid" } ]
import torch import os import numpy as np import math import shutil import safetensors.torch from ldm.modules.diffusionmodules.util import timestep_embedding from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.diffusionmodules.openaimodel import UNetModel from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.util import log_txt_as_img, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from data.dataset_instantiate import instantiate_from_config as instantiate_dataset_from_config from torch.utils.tensorboard import SummaryWriter from tqdm import tqdm from metrics.metrics_all import calculate_psnr_ssim, calculate_lpips, calculate_NIQE, calculate_fid_folder from torch.utils.data import DataLoader from PIL import Image from torch.optim.lr_scheduler import LambdaLR from omegaconf import OmegaConf
20,351
def get_state_dict(d): return d.get('state_dict', d) def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict def create_model(config_path): config = OmegaConf.load(config_path)
def get_state_dict(d): return d.get('state_dict', d) def load_state_dict(ckpt_path, location='cpu'): _, extension = os.path.splitext(ckpt_path) if extension.lower() == ".safetensors": state_dict = safetensors.torch.load_file(ckpt_path, device=location) else: state_dict = get_state_dict(torch.load(ckpt_path, map_location=torch.device(location))) state_dict = get_state_dict(state_dict) print(f'Loaded state_dict from [{ckpt_path}]') return state_dict def create_model(config_path): config = OmegaConf.load(config_path)
model = instantiate_from_config(config.model).cpu()
6
2023-11-30 13:50:58+00:00
24k
IanYeung/MGLD-VSR
ldm/models/autoencoder.py
[ { "identifier": "from_5d_to_4d", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def from_5d_to_4d(inp, b, c, t, h, w):\n out = rearrange(inp, 'b c t h w -> (b t) c h w')\n return out" }, { "identifier": "from_4d_to_5d", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def from_4d_to_5d(inp, b, c, t, h, w):\n out = rearrange(inp, '(b t) c h w -> b c t h w', b=b, c=c, t=t, h=h, w=w)\n return out" }, { "identifier": "Encoder", "path": "ldm/modules/diffusionmodules/model.py", "snippet": "class Encoder(nn.Module):\n def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,\n attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,\n resolution, z_channels, double_z=True, use_linear_attn=False, attn_type=\"vanilla\",\n **ignore_kwargs):\n super().__init__()\n if use_linear_attn: attn_type = \"linear\"\n self.ch = ch\n self.temb_ch = 0\n self.num_resolutions = len(ch_mult)\n self.num_res_blocks = num_res_blocks\n self.resolution = resolution\n self.in_channels = in_channels\n\n # downsampling\n self.conv_in = torch.nn.Conv2d(in_channels,\n self.ch,\n kernel_size=3,\n stride=1,\n padding=1)\n\n curr_res = resolution\n in_ch_mult = (1,)+tuple(ch_mult)\n self.in_ch_mult = in_ch_mult\n self.down = nn.ModuleList()\n for i_level in range(self.num_resolutions):\n block = nn.ModuleList()\n attn = nn.ModuleList()\n block_in = ch*in_ch_mult[i_level]\n block_out = ch*ch_mult[i_level]\n for i_block in range(self.num_res_blocks):\n block.append(ResnetBlock(in_channels=block_in,\n out_channels=block_out,\n temb_channels=self.temb_ch,\n dropout=dropout))\n block_in = block_out\n if curr_res in attn_resolutions:\n attn.append(make_attn(block_in, attn_type=attn_type))\n down = nn.Module()\n down.block = block\n down.attn = attn\n if i_level != self.num_resolutions-1:\n down.downsample = Downsample(block_in, resamp_with_conv)\n curr_res = curr_res // 2\n self.down.append(down)\n\n # middle\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)\n self.mid.block_2 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n\n # end\n self.norm_out = Normalize(block_in)\n self.conv_out = torch.nn.Conv2d(block_in,\n 2*z_channels if double_z else z_channels,\n kernel_size=3,\n stride=1,\n padding=1)\n\n def forward(self, x, return_fea=False):\n # timestep embedding\n temb = None\n\n # downsampling\n hs = [self.conv_in(x)]\n fea_list = []\n for i_level in range(self.num_resolutions):\n for i_block in range(self.num_res_blocks):\n h = self.down[i_level].block[i_block](hs[-1], temb)\n if len(self.down[i_level].attn) > 0:\n h = self.down[i_level].attn[i_block](h)\n hs.append(h)\n if return_fea:\n if i_level==1 or i_level==2:\n fea_list.append(h)\n if i_level != self.num_resolutions-1:\n hs.append(self.down[i_level].downsample(hs[-1]))\n\n # middle\n h = hs[-1]\n h = self.mid.block_1(h, temb)\n h = self.mid.attn_1(h)\n h = self.mid.block_2(h, temb)\n\n # end\n h = self.norm_out(h)\n h = nonlinearity(h)\n h = self.conv_out(h)\n\n if return_fea:\n return h, fea_list\n\n return h" }, { "identifier": "Decoder", "path": "ldm/modules/diffusionmodules/model.py", "snippet": "class Decoder(nn.Module):\n def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,\n attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,\n resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,\n attn_type=\"vanilla\", **ignorekwargs):\n super().__init__()\n if use_linear_attn: attn_type = \"linear\"\n self.ch = ch\n self.temb_ch = 0\n self.num_resolutions = len(ch_mult)\n self.num_res_blocks = num_res_blocks\n self.resolution = resolution\n self.in_channels = in_channels\n self.give_pre_end = give_pre_end\n self.tanh_out = tanh_out\n\n # compute in_ch_mult, block_in and curr_res at lowest res\n in_ch_mult = (1,)+tuple(ch_mult)\n block_in = ch*ch_mult[self.num_resolutions-1]\n curr_res = resolution // 2**(self.num_resolutions-1)\n self.z_shape = (1,z_channels,curr_res,curr_res)\n print(\"Working with z of shape {} = {} dimensions.\".format(\n self.z_shape, np.prod(self.z_shape)))\n\n # z to block_in\n self.conv_in = torch.nn.Conv2d(z_channels,\n block_in,\n kernel_size=3,\n stride=1,\n padding=1)\n\n # middle\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)\n self.mid.block_2 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n\n # upsampling\n self.up = nn.ModuleList()\n for i_level in reversed(range(self.num_resolutions)):\n block = nn.ModuleList()\n attn = nn.ModuleList()\n block_out = ch*ch_mult[i_level]\n for i_block in range(self.num_res_blocks+1):\n block.append(ResnetBlock(in_channels=block_in,\n out_channels=block_out,\n temb_channels=self.temb_ch,\n dropout=dropout))\n block_in = block_out\n if curr_res in attn_resolutions:\n attn.append(make_attn(block_in, attn_type=attn_type))\n up = nn.Module()\n up.block = block\n up.attn = attn\n if i_level != 0:\n up.upsample = Upsample(block_in, resamp_with_conv)\n curr_res = curr_res * 2\n self.up.insert(0, up) # prepend to get consistent order\n\n # end\n self.norm_out = Normalize(block_in)\n self.conv_out = torch.nn.Conv2d(block_in,\n out_ch,\n kernel_size=3,\n stride=1,\n padding=1)\n\n def forward(self, z):\n #assert z.shape[1:] == self.z_shape[1:]\n self.last_z_shape = z.shape\n\n # timestep embedding\n temb = None\n\n # z to block_in\n h = self.conv_in(z)\n\n # middle\n h = self.mid.block_1(h, temb)\n h = self.mid.attn_1(h)\n h = self.mid.block_2(h, temb)\n\n # upsampling\n for i_level in reversed(range(self.num_resolutions)):\n for i_block in range(self.num_res_blocks+1):\n h = self.up[i_level].block[i_block](h, temb)\n if len(self.up[i_level].attn) > 0:\n h = self.up[i_level].attn[i_block](h)\n if i_level != 0:\n h = self.up[i_level].upsample(h)\n # print(h.shape)\n\n # end\n if self.give_pre_end:\n return h\n\n h = self.norm_out(h)\n h = nonlinearity(h)\n h = self.conv_out(h)\n if self.tanh_out:\n h = torch.tanh(h)\n return h" }, { "identifier": "Decoder_Mix", "path": "ldm/modules/diffusionmodules/model.py", "snippet": "class Decoder_Mix(nn.Module):\n def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,\n attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels,\n resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,\n attn_type=\"vanilla\", num_fuse_block=2, fusion_w=1.0, **ignorekwargs):\n super().__init__()\n if use_linear_attn: attn_type = \"linear\"\n self.ch = ch\n self.temb_ch = 0\n self.num_resolutions = len(ch_mult)\n self.num_res_blocks = num_res_blocks\n self.resolution = resolution\n self.in_channels = in_channels\n self.give_pre_end = give_pre_end\n self.tanh_out = tanh_out\n self.fusion_w = fusion_w\n\n # compute in_ch_mult, block_in and curr_res at lowest res\n in_ch_mult = (1,)+tuple(ch_mult)\n block_in = ch*ch_mult[self.num_resolutions-1]\n curr_res = resolution // 2**(self.num_resolutions-1)\n self.z_shape = (1,z_channels,curr_res,curr_res)\n print(\"Working with z of shape {} = {} dimensions.\".format(\n self.z_shape, np.prod(self.z_shape)))\n\n # z to block_in\n self.conv_in = torch.nn.Conv2d(z_channels,\n block_in,\n kernel_size=3,\n stride=1,\n padding=1)\n\n # middle\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)\n self.mid.block_2 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n\n # upsampling\n self.up = nn.ModuleList()\n for i_level in reversed(range(self.num_resolutions)):\n block = nn.ModuleList()\n attn = nn.ModuleList()\n block_out = ch*ch_mult[i_level]\n\n if i_level != self.num_resolutions-1:\n if i_level != 0:\n fuse_layer = Fuse_sft_block_RRDB(in_ch=block_out, out_ch=block_out, num_block=num_fuse_block)\n setattr(self, 'fusion_layer_{}'.format(i_level), fuse_layer)\n\n for i_block in range(self.num_res_blocks+1):\n block.append(ResnetBlock(in_channels=block_in,\n out_channels=block_out,\n temb_channels=self.temb_ch,\n dropout=dropout))\n block_in = block_out\n if curr_res in attn_resolutions:\n attn.append(make_attn(block_in, attn_type=attn_type))\n\n up = nn.Module()\n up.block = block\n up.attn = attn\n if i_level != 0:\n up.upsample = Upsample(block_in, resamp_with_conv)\n curr_res = curr_res * 2\n self.up.insert(0, up) # prepend to get consistent order\n\n # end\n self.norm_out = Normalize(block_in)\n self.conv_out = torch.nn.Conv2d(block_in,\n out_ch,\n kernel_size=3,\n stride=1,\n padding=1)\n\n def forward(self, z, enc_fea):\n #assert z.shape[1:] == self.z_shape[1:]\n self.last_z_shape = z.shape\n\n # timestep embedding\n temb = None\n\n # z to block_in\n h = self.conv_in(z)\n\n # middle\n h = self.mid.block_1(h, temb)\n h = self.mid.attn_1(h)\n h = self.mid.block_2(h, temb)\n\n # upsampling\n for i_level in reversed(range(self.num_resolutions)):\n for i_block in range(self.num_res_blocks+1):\n h = self.up[i_level].block[i_block](h, temb)\n if len(self.up[i_level].attn) > 0:\n h = self.up[i_level].attn[i_block](h)\n\n if i_level != self.num_resolutions-1 and i_level != 0:\n cur_fuse_layer = getattr(self, 'fusion_layer_{}'.format(i_level))\n h = cur_fuse_layer(enc_fea[i_level-1], h, self.fusion_w)\n\n if i_level != 0:\n h = self.up[i_level].upsample(h)\n # end\n if self.give_pre_end:\n return h\n\n h = self.norm_out(h)\n h = nonlinearity(h)\n h = self.conv_out(h)\n if self.tanh_out:\n h = torch.tanh(h)\n return h" }, { "identifier": "VideoDecoder", "path": "ldm/modules/diffusionmodules/model.py", "snippet": "class VideoDecoder(nn.Module):\n def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,\n attn_resolutions, num_frames=1, dropout=0.0, resamp_with_conv=True, in_channels,\n resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,\n attn_type=\"vanilla\", **ignorekwargs):\n super().__init__()\n if use_linear_attn: attn_type = \"linear\"\n self.ch = ch\n self.temb_ch = 0\n self.num_resolutions = len(ch_mult)\n self.num_res_blocks = num_res_blocks\n self.resolution = resolution\n self.in_channels = in_channels\n self.num_frames = num_frames\n self.give_pre_end = give_pre_end\n self.tanh_out = tanh_out\n\n # compute in_ch_mult, block_in and curr_res at lowest res\n in_ch_mult = (1,)+tuple(ch_mult)\n block_in = ch*ch_mult[self.num_resolutions-1]\n curr_res = resolution // 2**(self.num_resolutions-1)\n self.z_shape = (1,z_channels,curr_res,curr_res)\n print(\"Working with z of shape {} = {} dimensions.\".format(\n self.z_shape, np.prod(self.z_shape)))\n\n # z to block_in\n self.conv_in = torch.nn.Conv2d(z_channels,\n block_in,\n kernel_size=3,\n stride=1,\n padding=1)\n\n # middle\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n # spatial temporal mixing here\n self.temporal_mixing = SpatialTemporalConv(num_feat=block_in,\n num_frames=num_frames)\n self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)\n self.mid.block_2 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n\n # upsampling\n self.up = nn.ModuleList()\n for i_level in reversed(range(self.num_resolutions)):\n block = nn.ModuleList()\n attn = nn.ModuleList()\n # spatial temporal mixing here\n temporal_mixing = nn.ModuleList()\n block_out = ch*ch_mult[i_level]\n for i_block in range(self.num_res_blocks+1):\n block.append(ResnetBlock(in_channels=block_in,\n out_channels=block_out,\n temb_channels=self.temb_ch,\n dropout=dropout))\n block_in = block_out\n if curr_res in attn_resolutions:\n attn.append(make_attn(block_in, attn_type=attn_type))\n temporal_mixing.append(SpatialTemporalConv(num_feat=block_in,\n num_frames=num_frames))\n up = nn.Module()\n up.block = block\n up.attn = attn\n up.temporal_mixing = temporal_mixing\n if i_level != 0:\n up.upsample = Upsample(block_in, resamp_with_conv)\n curr_res = curr_res * 2\n self.up.insert(0, up) # prepend to get consistent order\n\n # end\n self.norm_out = Normalize(block_in)\n self.conv_out = torch.nn.Conv2d(block_in,\n out_ch,\n kernel_size=3,\n stride=1,\n padding=1)\n\n def forward(self, z):\n #assert z.shape[1:] == self.z_shape[1:]\n self.last_z_shape = z.shape\n\n # timestep embedding\n temb = None\n\n # z to block_in\n h = self.conv_in(z)\n\n # middle\n h = self.mid.block_1(h, temb)\n h = self.temporal_mixing(h)\n h = self.mid.attn_1(h)\n h = self.mid.block_2(h, temb)\n\n # upsampling\n for i_level in reversed(range(self.num_resolutions)):\n for i_block in range(self.num_res_blocks+1):\n h = self.up[i_level].block[i_block](h, temb)\n h = self.up[i_level].temporal_mixing[i_block](h)\n if len(self.up[i_level].attn) > 0:\n h = self.up[i_level].attn[i_block](h)\n if i_level != 0:\n h = self.up[i_level].upsample(h)\n\n # end\n if self.give_pre_end:\n return h\n\n h = self.norm_out(h)\n h = nonlinearity(h)\n h = self.conv_out(h)\n if self.tanh_out:\n h = torch.tanh(h)\n return h" }, { "identifier": "VideoDecoder_Mix", "path": "ldm/modules/diffusionmodules/model.py", "snippet": "class VideoDecoder_Mix(nn.Module):\n def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,\n attn_resolutions, num_frames=1, dropout=0.0, resamp_with_conv=True, in_channels,\n resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,\n attn_type=\"vanilla\", num_fuse_block=2, fusion_w=1.0, **ignorekwargs):\n super().__init__()\n if use_linear_attn: attn_type = \"linear\"\n self.ch = ch\n self.temb_ch = 0\n self.num_resolutions = len(ch_mult)\n self.num_res_blocks = num_res_blocks\n self.resolution = resolution\n self.in_channels = in_channels\n self.num_frames = num_frames\n self.give_pre_end = give_pre_end\n self.tanh_out = tanh_out\n self.fusion_w = fusion_w\n\n # compute in_ch_mult, block_in and curr_res at lowest res\n in_ch_mult = (1,)+tuple(ch_mult)\n block_in = ch*ch_mult[self.num_resolutions-1]\n curr_res = resolution // 2**(self.num_resolutions-1)\n self.z_shape = (1,z_channels,curr_res,curr_res)\n print(\"Working with z of shape {} = {} dimensions.\".format(\n self.z_shape, np.prod(self.z_shape)))\n\n # z to block_in\n self.conv_in = torch.nn.Conv2d(z_channels,\n block_in,\n kernel_size=3,\n stride=1,\n padding=1)\n\n # middle\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n # spatial temporal mixing here\n self.temporal_mixing = SpatialTemporalConv(num_feat=block_in,\n num_frames=num_frames)\n self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)\n self.mid.block_2 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n\n # upsampling\n self.up = nn.ModuleList()\n for i_level in reversed(range(self.num_resolutions)):\n block = nn.ModuleList()\n attn = nn.ModuleList()\n # spatial temporal mixing here\n temporal_mixing = nn.ModuleList()\n\n block_out = ch*ch_mult[i_level]\n\n if i_level != self.num_resolutions-1:\n if i_level != 0:\n fuse_layer = Fuse_sft_block_ResidualDenseBlock(in_ch=block_out, out_ch=block_out, num_block=num_fuse_block)\n setattr(self, 'fusion_layer_{}'.format(i_level), fuse_layer)\n\n for i_block in range(self.num_res_blocks+1):\n block.append(ResnetBlock(in_channels=block_in,\n out_channels=block_out,\n temb_channels=self.temb_ch,\n dropout=dropout))\n block_in = block_out\n temporal_mixing.append(SpatialTemporalConv(num_feat=block_in,\n num_frames=num_frames))\n if curr_res in attn_resolutions:\n attn.append(make_attn(block_in, attn_type=attn_type))\n\n up = nn.Module()\n up.block = block\n up.temporal_mixing = temporal_mixing\n up.attn = attn\n if i_level != 0:\n up.upsample = Upsample(block_in, resamp_with_conv)\n curr_res = curr_res * 2\n self.up.insert(0, up) # prepend to get consistent order\n\n # end\n self.norm_out = Normalize(block_in)\n self.conv_out = torch.nn.Conv2d(block_in,\n out_ch,\n kernel_size=3,\n stride=1,\n padding=1)\n\n def forward(self, z, enc_fea):\n #assert z.shape[1:] == self.z_shape[1:]\n self.last_z_shape = z.shape\n\n # timestep embedding\n temb = None\n\n # z to block_in\n h = self.conv_in(z)\n\n # middle\n h = self.mid.block_1(h, temb)\n h = self.temporal_mixing(h)\n h = self.mid.attn_1(h)\n h = self.mid.block_2(h, temb)\n\n # upsampling\n for i_level in reversed(range(self.num_resolutions)):\n for i_block in range(self.num_res_blocks+1):\n h = self.up[i_level].block[i_block](h, temb)\n h = self.up[i_level].temporal_mixing[i_block](h)\n if len(self.up[i_level].attn) > 0:\n h = self.up[i_level].attn[i_block](h)\n\n if i_level != self.num_resolutions-1 and i_level != 0:\n cur_fuse_layer = getattr(self, 'fusion_layer_{}'.format(i_level))\n h = cur_fuse_layer(enc_fea[i_level-1], h, self.fusion_w)\n\n if i_level != 0:\n h = self.up[i_level].upsample(h)\n # end\n if self.give_pre_end:\n return h\n\n h = self.norm_out(h)\n h = nonlinearity(h)\n h = self.conv_out(h)\n if self.tanh_out:\n h = torch.tanh(h)\n return h" }, { "identifier": "VideoDecoderV2", "path": "ldm/modules/diffusionmodules/model.py", "snippet": "class VideoDecoderV2(nn.Module):\n def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,\n attn_resolutions, num_frames=1, dropout=0.0, resamp_with_conv=True, in_channels,\n resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,\n attn_type=\"vanilla\", **ignorekwargs):\n super().__init__()\n if use_linear_attn: attn_type = \"linear\"\n self.ch = ch\n self.temb_ch = 0\n self.num_resolutions = len(ch_mult)\n self.num_res_blocks = num_res_blocks\n self.resolution = resolution\n self.in_channels = in_channels\n self.num_frames = num_frames\n self.give_pre_end = give_pre_end\n self.tanh_out = tanh_out\n\n # compute in_ch_mult, block_in and curr_res at lowest res\n in_ch_mult = (1,)+tuple(ch_mult)\n block_in = ch*ch_mult[self.num_resolutions-1]\n curr_res = resolution // 2**(self.num_resolutions-1)\n self.z_shape = (1,z_channels,curr_res,curr_res)\n print(\"Working with z of shape {} = {} dimensions.\".format(\n self.z_shape, np.prod(self.z_shape)))\n\n # z to block_in\n self.conv_in = torch.nn.Conv2d(z_channels,\n block_in,\n kernel_size=3,\n stride=1,\n padding=1)\n\n # middle\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n # spatial temporal mixing here\n self.temporal_mixing = MultiDimTemporalConv(num_feat=block_in,\n num_frames=num_frames)\n self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)\n self.mid.block_2 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n\n # upsampling\n self.up = nn.ModuleList()\n for i_level in reversed(range(self.num_resolutions)):\n block = nn.ModuleList()\n attn = nn.ModuleList()\n # spatial temporal mixing here\n temporal_mixing = nn.ModuleList()\n block_out = ch*ch_mult[i_level]\n for i_block in range(self.num_res_blocks+1):\n block.append(ResnetBlock(in_channels=block_in,\n out_channels=block_out,\n temb_channels=self.temb_ch,\n dropout=dropout))\n block_in = block_out\n if curr_res in attn_resolutions:\n attn.append(make_attn(block_in, attn_type=attn_type))\n temporal_mixing.append(MultiDimTemporalConv(num_feat=block_in,\n num_frames=num_frames))\n up = nn.Module()\n up.block = block\n up.attn = attn\n up.temporal_mixing = temporal_mixing\n if i_level != 0:\n up.upsample = Upsample(block_in, resamp_with_conv)\n curr_res = curr_res * 2\n self.up.insert(0, up) # prepend to get consistent order\n\n # end\n self.norm_out = Normalize(block_in)\n self.conv_out = torch.nn.Conv2d(block_in,\n out_ch,\n kernel_size=3,\n stride=1,\n padding=1)\n\n def forward(self, z):\n #assert z.shape[1:] == self.z_shape[1:]\n self.last_z_shape = z.shape\n\n # timestep embedding\n temb = None\n\n # z to block_in\n h = self.conv_in(z)\n\n # middle\n h = self.mid.block_1(h, temb)\n h = self.temporal_mixing(h)\n h = self.mid.attn_1(h)\n h = self.mid.block_2(h, temb)\n\n # upsampling\n for i_level in reversed(range(self.num_resolutions)):\n for i_block in range(self.num_res_blocks+1):\n h = self.up[i_level].block[i_block](h, temb)\n h = self.up[i_level].temporal_mixing[i_block](h)\n if len(self.up[i_level].attn) > 0:\n h = self.up[i_level].attn[i_block](h)\n if i_level != 0:\n h = self.up[i_level].upsample(h)\n\n # end\n if self.give_pre_end:\n return h\n\n h = self.norm_out(h)\n h = nonlinearity(h)\n h = self.conv_out(h)\n if self.tanh_out:\n h = torch.tanh(h)\n return h" }, { "identifier": "VideoDecoder_MixV2", "path": "ldm/modules/diffusionmodules/model.py", "snippet": "class VideoDecoder_MixV2(nn.Module):\n def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks,\n attn_resolutions, num_frames=1, dropout=0.0, resamp_with_conv=True, in_channels,\n resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False,\n attn_type=\"vanilla\", num_fuse_block=2, fusion_w=1.0, **ignorekwargs):\n super().__init__()\n if use_linear_attn: attn_type = \"linear\"\n self.ch = ch\n self.temb_ch = 0\n self.num_resolutions = len(ch_mult)\n self.num_res_blocks = num_res_blocks\n self.resolution = resolution\n self.in_channels = in_channels\n self.num_frames = num_frames\n self.give_pre_end = give_pre_end\n self.tanh_out = tanh_out\n self.fusion_w = fusion_w\n\n # compute in_ch_mult, block_in and curr_res at lowest res\n in_ch_mult = (1,)+tuple(ch_mult)\n block_in = ch*ch_mult[self.num_resolutions-1]\n curr_res = resolution // 2**(self.num_resolutions-1)\n self.z_shape = (1,z_channels,curr_res,curr_res)\n print(\"Working with z of shape {} = {} dimensions.\".format(\n self.z_shape, np.prod(self.z_shape)))\n\n # z to block_in\n self.conv_in = torch.nn.Conv2d(z_channels,\n block_in,\n kernel_size=3,\n stride=1,\n padding=1)\n\n # middle\n self.mid = nn.Module()\n self.mid.block_1 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n # spatial temporal mixing here\n self.temporal_mixing = MultiDimTemporalConv(num_feat=block_in,\n num_frames=num_frames)\n self.mid.attn_1 = make_attn(block_in, attn_type=attn_type)\n self.mid.block_2 = ResnetBlock(in_channels=block_in,\n out_channels=block_in,\n temb_channels=self.temb_ch,\n dropout=dropout)\n\n # upsampling\n self.up = nn.ModuleList()\n for i_level in reversed(range(self.num_resolutions)):\n block = nn.ModuleList()\n attn = nn.ModuleList()\n # spatial temporal mixing here\n temporal_mixing = nn.ModuleList()\n\n block_out = ch*ch_mult[i_level]\n\n if i_level != self.num_resolutions-1:\n if i_level != 0:\n fuse_layer = Fuse_sft_block_ResidualDenseBlock(in_ch=block_out, out_ch=block_out, num_block=num_fuse_block)\n setattr(self, 'fusion_layer_{}'.format(i_level), fuse_layer)\n\n for i_block in range(self.num_res_blocks+1):\n block.append(ResnetBlock(in_channels=block_in,\n out_channels=block_out,\n temb_channels=self.temb_ch,\n dropout=dropout))\n block_in = block_out\n temporal_mixing.append(MultiDimTemporalConv(num_feat=block_in,\n num_frames=num_frames))\n if curr_res in attn_resolutions:\n attn.append(make_attn(block_in, attn_type=attn_type))\n\n up = nn.Module()\n up.block = block\n up.temporal_mixing = temporal_mixing\n up.attn = attn\n if i_level != 0:\n up.upsample = Upsample(block_in, resamp_with_conv)\n curr_res = curr_res * 2\n self.up.insert(0, up) # prepend to get consistent order\n\n # end\n self.norm_out = Normalize(block_in)\n self.conv_out = torch.nn.Conv2d(block_in,\n out_ch,\n kernel_size=3,\n stride=1,\n padding=1)\n\n def forward(self, z, enc_fea):\n #assert z.shape[1:] == self.z_shape[1:]\n self.last_z_shape = z.shape\n\n # timestep embedding\n temb = None\n\n # z to block_in\n h = self.conv_in(z)\n\n # middle\n h = self.mid.block_1(h, temb)\n h = self.temporal_mixing(h)\n h = self.mid.attn_1(h)\n h = self.mid.block_2(h, temb)\n\n # upsampling\n for i_level in reversed(range(self.num_resolutions)):\n for i_block in range(self.num_res_blocks+1):\n h = self.up[i_level].block[i_block](h, temb)\n h = self.up[i_level].temporal_mixing[i_block](h)\n if len(self.up[i_level].attn) > 0:\n h = self.up[i_level].attn[i_block](h)\n\n if i_level != self.num_resolutions-1 and i_level != 0:\n cur_fuse_layer = getattr(self, 'fusion_layer_{}'.format(i_level))\n h = cur_fuse_layer(enc_fea[i_level-1], h, self.fusion_w)\n\n if i_level != 0:\n h = self.up[i_level].upsample(h)\n # end\n if self.give_pre_end:\n return h\n\n h = self.norm_out(h)\n h = nonlinearity(h)\n h = self.conv_out(h)\n if self.tanh_out:\n h = torch.tanh(h)\n return h" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "DiffJPEG", "path": "basicsr/utils/diffjpeg.py", "snippet": "class DiffJPEG(nn.Module):\n \"\"\"This JPEG algorithm result is slightly different from cv2.\n DiffJPEG supports batch processing.\n\n Args:\n differentiable(bool): If True, uses custom differentiable rounding function, if False, uses standard torch.round\n \"\"\"\n\n def __init__(self, differentiable=True):\n super(DiffJPEG, self).__init__()\n if differentiable:\n rounding = diff_round\n else:\n rounding = torch.round\n\n self.compress = CompressJpeg(rounding=rounding)\n self.decompress = DeCompressJpeg(rounding=rounding)\n\n def forward(self, x, quality):\n \"\"\"\n Args:\n x (Tensor): Input image, bchw, rgb, [0, 1]\n quality(float): Quality factor for jpeg compression scheme.\n \"\"\"\n factor = quality\n if isinstance(factor, (int, float)):\n factor = quality_to_factor(factor)\n else:\n for i in range(factor.size(0)):\n factor[i] = quality_to_factor(factor[i])\n h, w = x.size()[-2:]\n h_pad, w_pad = 0, 0\n # why should use 16\n if h % 16 != 0:\n h_pad = 16 - h % 16\n if w % 16 != 0:\n w_pad = 16 - w % 16\n x = F.pad(x, (0, w_pad, 0, h_pad), mode='constant', value=0)\n\n y, cb, cr = self.compress(x, factor=factor)\n recovered = self.decompress(y, cb, cr, (h + h_pad), (w + w_pad), factor=factor)\n recovered = recovered[:, :, 0:h, 0:w]\n return recovered" }, { "identifier": "USMSharp", "path": "basicsr/utils/img_process_util.py", "snippet": "class USMSharp(torch.nn.Module):\n\n def __init__(self, radius=50, sigma=0):\n super(USMSharp, self).__init__()\n if radius % 2 == 0:\n radius += 1\n self.radius = radius\n kernel = cv2.getGaussianKernel(radius, sigma)\n kernel = torch.FloatTensor(np.dot(kernel, kernel.transpose())).unsqueeze_(0)\n self.register_buffer('kernel', kernel)\n\n def forward(self, img, weight=0.5, threshold=10):\n blur = filter2D(img, self.kernel)\n residual = img - blur\n\n mask = torch.abs(residual) * 255 > threshold\n mask = mask.float()\n soft_mask = filter2D(mask, self.kernel)\n sharp = img + weight * residual\n sharp = torch.clip(sharp, 0, 1)\n return soft_mask * sharp + (1 - soft_mask) * img" }, { "identifier": "filter2D", "path": "basicsr/utils/img_process_util.py", "snippet": "def filter2D(img, kernel):\n \"\"\"PyTorch version of cv2.filter2D\n\n Args:\n img (Tensor): (b, c, h, w)\n kernel (Tensor): (b, k, k)\n \"\"\"\n k = kernel.size(-1)\n b, c, h, w = img.size()\n if k % 2 == 1:\n img = F.pad(img, (k // 2, k // 2, k // 2, k // 2), mode='reflect')\n else:\n raise ValueError('Wrong kernel size')\n\n ph, pw = img.size()[-2:]\n\n if kernel.size(0) == 1:\n # apply the same kernel to all batch images\n img = img.view(b * c, 1, ph, pw)\n kernel = kernel.view(1, 1, k, k)\n return F.conv2d(img, kernel, padding=0).view(b, c, h, w)\n else:\n img = img.view(1, b * c, ph, pw)\n kernel = kernel.view(b, 1, k, k).repeat(1, c, 1, 1).view(b * c, 1, k, k)\n return F.conv2d(img, kernel, groups=b * c).view(b, c, h, w)" }, { "identifier": "paired_random_crop", "path": "basicsr/data/transforms.py", "snippet": "def paired_random_crop(img_gts, img_lqs, gt_patch_size, scale, gt_path=None):\n \"\"\"Paired random crop. Support Numpy array and Tensor inputs.\n\n It crops lists of lq and gt images with corresponding locations.\n\n Args:\n img_gts (list[ndarray] | ndarray | list[Tensor] | Tensor): GT images. Note that all images\n should have the same shape. If the input is an ndarray, it will\n be transformed to a list containing itself.\n img_lqs (list[ndarray] | ndarray): LQ images. Note that all images\n should have the same shape. If the input is an ndarray, it will\n be transformed to a list containing itself.\n gt_patch_size (int): GT patch size.\n scale (int): Scale factor.\n gt_path (str): Path to ground-truth. Default: None.\n\n Returns:\n list[ndarray] | ndarray: GT images and LQ images. If returned results\n only have one element, just return ndarray.\n \"\"\"\n\n if not isinstance(img_gts, list):\n img_gts = [img_gts]\n if not isinstance(img_lqs, list):\n img_lqs = [img_lqs]\n\n # determine input type: Numpy array or Tensor\n input_type = 'Tensor' if torch.is_tensor(img_gts[0]) else 'Numpy'\n\n if input_type == 'Tensor':\n h_lq, w_lq = img_lqs[0].size()[-2:]\n h_gt, w_gt = img_gts[0].size()[-2:]\n else:\n h_lq, w_lq = img_lqs[0].shape[0:2]\n h_gt, w_gt = img_gts[0].shape[0:2]\n lq_patch_size = gt_patch_size // scale\n\n if h_gt != h_lq * scale or w_gt != w_lq * scale:\n raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',\n f'multiplication of LQ ({h_lq}, {w_lq}).')\n if h_lq < lq_patch_size or w_lq < lq_patch_size:\n raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '\n f'({lq_patch_size}, {lq_patch_size}). '\n f'Please remove {gt_path}.')\n\n # randomly choose top and left coordinates for lq patch\n top = random.randint(0, h_lq - lq_patch_size)\n left = random.randint(0, w_lq - lq_patch_size)\n\n # crop lq patch\n if input_type == 'Tensor':\n img_lqs = [v[:, :, top:top + lq_patch_size, left:left + lq_patch_size] for v in img_lqs]\n else:\n img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs]\n\n # crop corresponding gt patch\n top_gt, left_gt = int(top * scale), int(left * scale)\n if input_type == 'Tensor':\n img_gts = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_gts]\n else:\n img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts]\n if len(img_gts) == 1:\n img_gts = img_gts[0]\n if len(img_lqs) == 1:\n img_lqs = img_lqs[0]\n return img_gts, img_lqs" }, { "identifier": "triplet_random_crop", "path": "basicsr/data/transforms.py", "snippet": "def triplet_random_crop(img_gts, img_lqs, img_segs, gt_patch_size, scale, gt_path=None):\n\n if not isinstance(img_gts, list):\n img_gts = [img_gts]\n if not isinstance(img_lqs, list):\n img_lqs = [img_lqs]\n if not isinstance(img_segs, list):\n img_segs = [img_segs]\n\n # determine input type: Numpy array or Tensor\n input_type = 'Tensor' if torch.is_tensor(img_gts[0]) else 'Numpy'\n\n if input_type == 'Tensor':\n h_lq, w_lq = img_lqs[0].size()[-2:]\n h_gt, w_gt = img_gts[0].size()[-2:]\n h_seg, w_seg = img_segs[0].size()[-2:]\n else:\n h_lq, w_lq = img_lqs[0].shape[0:2]\n h_gt, w_gt = img_gts[0].shape[0:2]\n h_seg, w_seg = img_segs[0].shape[0:2]\n lq_patch_size = gt_patch_size // scale\n\n if h_gt != h_lq * scale or w_gt != w_lq * scale:\n raise ValueError(f'Scale mismatches. GT ({h_gt}, {w_gt}) is not {scale}x ',\n f'multiplication of LQ ({h_lq}, {w_lq}).')\n if h_lq < lq_patch_size or w_lq < lq_patch_size:\n raise ValueError(f'LQ ({h_lq}, {w_lq}) is smaller than patch size '\n f'({lq_patch_size}, {lq_patch_size}). '\n f'Please remove {gt_path}.')\n\n # randomly choose top and left coordinates for lq patch\n top = random.randint(0, h_lq - lq_patch_size)\n left = random.randint(0, w_lq - lq_patch_size)\n\n # crop lq patch\n if input_type == 'Tensor':\n img_lqs = [v[:, :, top:top + lq_patch_size, left:left + lq_patch_size] for v in img_lqs]\n else:\n img_lqs = [v[top:top + lq_patch_size, left:left + lq_patch_size, ...] for v in img_lqs]\n\n # crop corresponding gt patch\n top_gt, left_gt = int(top * scale), int(left * scale)\n if input_type == 'Tensor':\n img_gts = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_gts]\n else:\n img_gts = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_gts]\n\n if input_type == 'Tensor':\n img_segs = [v[:, :, top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size] for v in img_segs]\n else:\n img_segs = [v[top_gt:top_gt + gt_patch_size, left_gt:left_gt + gt_patch_size, ...] for v in img_segs]\n\n if len(img_gts) == 1:\n img_gts = img_gts[0]\n if len(img_lqs) == 1:\n img_lqs = img_lqs[0]\n if len(img_segs) == 1:\n img_segs = img_segs[0]\n\n return img_gts, img_lqs, img_segs" }, { "identifier": "random_add_gaussian_noise_pt", "path": "basicsr/data/degradations.py", "snippet": "def random_add_gaussian_noise_pt(img, sigma_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):\n noise = random_generate_gaussian_noise_pt(img, sigma_range, gray_prob)\n out = img + noise\n if clip and rounds:\n out = torch.clamp((out * 255.0).round(), 0, 255) / 255.\n elif clip:\n out = torch.clamp(out, 0, 1)\n elif rounds:\n out = (out * 255.0).round() / 255.\n return out" }, { "identifier": "random_add_poisson_noise_pt", "path": "basicsr/data/degradations.py", "snippet": "def random_add_poisson_noise_pt(img, scale_range=(0, 1.0), gray_prob=0, clip=True, rounds=False):\n noise = random_generate_poisson_noise_pt(img, scale_range, gray_prob)\n out = img + noise\n if clip and rounds:\n out = torch.clamp((out * 255.0).round(), 0, 255) / 255.\n elif clip:\n out = torch.clamp(out, 0, 1)\n elif rounds:\n out = (out * 255.0).round() / 255.\n return out" }, { "identifier": "random_add_speckle_noise_pt", "path": "basicsr/data/degradations.py", "snippet": "def random_add_speckle_noise_pt(img, speckle_std):\n std_range = speckle_std\n std_l = std_range[0]\n std_r = std_range[1]\n mean=0\n std=random.uniform(std_l/255.,std_r/255.)\n gauss=torch.normal(mean=mean,std=std,size=img.size()).to(img.device)\n noisy=img+gauss*img\n noisy=torch.clamp(noisy,0,1)\n return noisy" }, { "identifier": "random_add_saltpepper_noise_pt", "path": "basicsr/data/degradations.py", "snippet": "def random_add_saltpepper_noise_pt(imgs, saltpepper_amount, saltpepper_svsp):\n p_range = saltpepper_amount\n p = random.uniform(p_range[0], p_range[1])\n q_range = saltpepper_svsp\n q = random.uniform(q_range[0], q_range[1])\n\n imgs = imgs.permute(0,2,3,1)\n\n outputs = []\n for i in range(imgs.size(0)):\n img = imgs[i]\n out = img.clone()\n flipped = np.random.choice([True, False], size=img.shape,\n p=[p, 1 - p])\n salted = np.random.choice([True, False], size=img.shape,\n p=[q, 1 - q])\n peppered = ~salted\n temp = flipped & salted\n out[flipped & salted] = 1\n out[flipped & peppered] = 0.\n noisy = torch.clamp(out, 0, 1)\n\n outputs.append(noisy.permute(2,0,1))\n if len(outputs)>1:\n return torch.cat(outputs, dim=0)\n else:\n return outputs[0].unsqueeze(0)" } ]
import torch import pytorch_lightning as pl import torch.nn.functional as F import random import torchvision.transforms as transforms from contextlib import contextmanager from einops import repeat, rearrange from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer from ldm.modules.diffusionmodules.util import from_5d_to_4d, from_4d_to_5d from ldm.modules.diffusionmodules.model import Encoder, Decoder, Decoder_Mix, VideoDecoder, VideoDecoder_Mix, VideoDecoderV2, VideoDecoder_MixV2 from ldm.modules.distributions.distributions import DiagonalGaussianDistribution from ldm.util import instantiate_from_config from basicsr.utils import DiffJPEG, USMSharp from basicsr.utils.img_process_util import filter2D from basicsr.data.transforms import paired_random_crop, triplet_random_crop from basicsr.data.degradations import \ random_add_gaussian_noise_pt, \ random_add_poisson_noise_pt, \ random_add_speckle_noise_pt, \ random_add_saltpepper_noise_pt
15,638
def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: if 'first_stage_model' in k: sd[k[18:]] = sd[k] del sd[k] for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Encoder Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") return missing def encode(self, x): h, enc_fea = self.encoder(x, return_fea=True) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) # posterior = h return posterior, enc_fea def encode_gt(self, x, new_encoder): h = new_encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) return posterior, moments def decode(self, z, enc_fea): z = self.post_quant_conv(z) dec = self.decoder(z, enc_fea) return dec def forward(self, input, latent, sample_posterior=True): posterior, enc_fea_lq = self.encode(input) dec = self.decode(latent, enc_fea_lq) return dec, posterior @torch.no_grad() def _dequeue_and_enqueue(self): """It is the training pair pool for increasing the diversity in a batch. Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a batch could not have different resize scaling factors. Therefore, we employ this training pair pool to increase the degradation diversity in a batch. """ # initialize b, c, h, w = self.lq.size() _, c_, h_, w_ = self.latent.size() if b == self.configs.data.params.batch_size: if not hasattr(self, 'queue_size'): self.queue_size = self.configs.data.params.train.params.get('queue_size', b*50) if not hasattr(self, 'queue_lr'): assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() _, c, h, w = self.gt.size() self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() self.queue_sample = torch.zeros(self.queue_size, c, h, w).cuda() self.queue_latent = torch.zeros(self.queue_size, c_, h_, w_).cuda() self.queue_ptr = 0 if self.queue_ptr == self.queue_size: # the pool is full # do dequeue and enqueue # shuffle idx = torch.randperm(self.queue_size) self.queue_lr = self.queue_lr[idx] self.queue_gt = self.queue_gt[idx] self.queue_sample = self.queue_sample[idx] self.queue_latent = self.queue_latent[idx] # get first b samples lq_dequeue = self.queue_lr[0:b, :, :, :].clone() gt_dequeue = self.queue_gt[0:b, :, :, :].clone() sample_dequeue = self.queue_sample[0:b, :, :, :].clone() latent_dequeue = self.queue_latent[0:b, :, :, :].clone() # update the queue self.queue_lr[0:b, :, :, :] = self.lq.clone() self.queue_gt[0:b, :, :, :] = self.gt.clone() self.queue_sample[0:b, :, :, :] = self.sample.clone() self.queue_latent[0:b, :, :, :] = self.latent.clone() self.lq = lq_dequeue self.gt = gt_dequeue self.sample = sample_dequeue self.latent = latent_dequeue else: # only do enqueue self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() self.queue_sample[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.sample.clone() self.queue_latent[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.latent.clone() self.queue_ptr = self.queue_ptr + b def get_input(self, batch): input = batch['lq'] gt = batch['gt'] latent = batch['latent'] sample = batch['sample'] assert not torch.isnan(latent).any() input = input.to(memory_format=torch.contiguous_format).float() gt = gt.to(memory_format=torch.contiguous_format).float() latent = latent.to(memory_format=torch.contiguous_format).float() / 0.18215 gt = gt * 2.0 - 1.0 input = input * 2.0 - 1.0 sample = sample * 2.0 -1.0 return input, gt, latent, sample @torch.no_grad() def get_input_synthesis(self, batch, val=False, test_gt=False):
class VQModel(pl.LightningModule): def __init__(self, ddconfig, lossconfig, n_embed, embed_dim, ckpt_path=None, ignore_keys=[], image_key="image", colorize_nlabels=None, monitor=None, batch_resize_range=None, scheduler_config=None, lr_g_factor=1.0, remap=None, sane_index_shape=False, # tell vector quantizer to return indices as bhw use_ema=False ): super().__init__() self.embed_dim = embed_dim self.n_embed = n_embed self.image_key = image_key self.encoder = Encoder(**ddconfig) self.decoder = Decoder(**ddconfig) self.loss = instantiate_from_config(lossconfig) self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, remap=remap, sane_index_shape=sane_index_shape) self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) if colorize_nlabels is not None: assert type(colorize_nlabels)==int self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) if monitor is not None: self.monitor = monitor self.batch_resize_range = batch_resize_range if self.batch_resize_range is not None: print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.") self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) self.scheduler_config = scheduler_config self.lr_g_factor = lr_g_factor @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.parameters()) self.model_ema.copy_to(self) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list()): sd = torch.load(path, map_location="cpu")["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") print(f"Unexpected Keys: {unexpected}") def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self) def encode(self, x): h = self.encoder(x) h = self.quant_conv(h) quant, emb_loss, info = self.quantize(h) return quant, emb_loss, info def encode_to_prequant(self, x): h = self.encoder(x) h = self.quant_conv(h) return h def decode(self, quant): quant = self.post_quant_conv(quant) dec = self.decoder(quant) return dec def decode_code(self, code_b): quant_b = self.quantize.embed_code(code_b) dec = self.decode(quant_b) return dec def forward(self, input, return_pred_indices=False): quant, diff, (_,_,ind) = self.encode(input) dec = self.decode(quant) if return_pred_indices: return dec, diff, ind return dec, diff def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() if self.batch_resize_range is not None: lower_size = self.batch_resize_range[0] upper_size = self.batch_resize_range[1] if self.global_step <= 4: # do the first few batches with max size to avoid later oom new_resize = upper_size else: new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16)) if new_resize != x.shape[2]: x = F.interpolate(x, size=new_resize, mode="bicubic") x = x.detach() return x def training_step(self, batch, batch_idx, optimizer_idx): # https://github.com/pytorch/pytorch/issues/37142 # try not to fool the heuristics x = self.get_input(batch, self.image_key) xrec, qloss, ind = self(x, return_pred_indices=True) if optimizer_idx == 0: # autoencode aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train", predicted_indices=ind) self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) return aeloss if optimizer_idx == 1: # discriminator discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train") self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) return discloss def validation_step(self, batch, batch_idx): log_dict = self._validation_step(batch, batch_idx) with self.ema_scope(): log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") return log_dict def _validation_step(self, batch, batch_idx, suffix=""): x = self.get_input(batch, self.image_key) xrec, qloss, ind = self(x, return_pred_indices=True) aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, self.global_step, last_layer=self.get_last_layer(), split="val"+suffix, predicted_indices=ind ) discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, self.global_step, last_layer=self.get_last_layer(), split="val"+suffix, predicted_indices=ind ) rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] self.log(f"val{suffix}/rec_loss", rec_loss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) self.log(f"val{suffix}/aeloss", aeloss, prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) if version.parse(pl.__version__) >= version.parse('1.4.0'): del log_dict_ae[f"val{suffix}/rec_loss"] self.log_dict(log_dict_ae) self.log_dict(log_dict_disc) return self.log_dict def configure_optimizers(self): lr_d = self.learning_rate lr_g = self.lr_g_factor*self.learning_rate print("lr_d", lr_d) print("lr_g", lr_g) opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ list(self.decoder.parameters())+ list(self.quantize.parameters())+ list(self.quant_conv.parameters())+ list(self.post_quant_conv.parameters()), lr=lr_g, betas=(0.5, 0.9)) opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr_d, betas=(0.5, 0.9)) if self.scheduler_config is not None: scheduler = instantiate_from_config(self.scheduler_config) print("Setting up LambdaLR scheduler...") scheduler = [ { 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), 'interval': 'step', 'frequency': 1 }, { 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), 'interval': 'step', 'frequency': 1 }, ] return [opt_ae, opt_disc], scheduler return [opt_ae, opt_disc], [] def get_last_layer(self): return self.decoder.conv_out.weight def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): log = dict() x = self.get_input(batch, self.image_key) x = x.to(self.device) if only_inputs: log["inputs"] = x return log xrec, _ = self(x) if x.shape[1] > 3: # colorize with random projection assert xrec.shape[1] > 3 x = self.to_rgb(x) xrec = self.to_rgb(xrec) log["inputs"] = x log["reconstructions"] = xrec if plot_ema: with self.ema_scope(): xrec_ema, _ = self(x) if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) log["reconstructions_ema"] = xrec_ema return log def to_rgb(self, x): assert self.image_key == "segmentation" if not hasattr(self, "colorize"): self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) x = F.conv2d(x, weight=self.colorize) x = 2.*(x-x.min())/(x.max()-x.min()) - 1. return x class VQModelInterface(VQModel): def __init__(self, embed_dim, *args, **kwargs): super().__init__(embed_dim=embed_dim, *args, **kwargs) self.embed_dim = embed_dim def encode(self, x): h = self.encoder(x) h = self.quant_conv(h) return h def decode(self, h, force_not_quantize=False): # also go through quantization layer if not force_not_quantize: quant, emb_loss, info = self.quantize(h) else: quant = h quant = self.post_quant_conv(quant) dec = self.decoder(quant) return dec class AutoencoderKL(pl.LightningModule): def __init__(self, ddconfig, lossconfig, embed_dim, ckpt_path=None, ignore_keys=[], image_key="image", colorize_nlabels=None, monitor=None, ): super().__init__() self.image_key = image_key self.encoder = Encoder(**ddconfig) self.decoder = Decoder(**ddconfig) self.loss = instantiate_from_config(lossconfig) assert ddconfig["double_z"] self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) self.embed_dim = embed_dim if colorize_nlabels is not None: assert type(colorize_nlabels)==int self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: if 'first_stage_model' in k: sd[k[18:]] = sd[k] for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Encoder Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") # if len(unexpected) > 0: # print(f"Unexpected Keys: {unexpected}") def encode(self, x, return_encfea=False): h = self.encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) if return_encfea: return posterior, moments return posterior def encode_gt(self, x, new_encoder): h = new_encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) return posterior, moments def decode(self, z): z = self.post_quant_conv(z) dec = self.decoder(z) return dec def forward(self, input, sample_posterior=True): posterior = self.encode(input) if sample_posterior: z = posterior.sample() else: z = posterior.mode() dec = self.decode(z) return dec, posterior def get_input(self, batch, k): x = batch[k] if len(x.shape) == 3: x = x[..., None] # x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() x = x.to(memory_format=torch.contiguous_format).float() # x = x*2.0-1.0 return x def training_step(self, batch, batch_idx, optimizer_idx): inputs = self.get_input(batch, self.image_key) reconstructions, posterior = self(inputs) if optimizer_idx == 0: # train encoder+decoder+logvar aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train") self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) return aeloss if optimizer_idx == 1: # train the discriminator discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, last_layer=self.get_last_layer(), split="train") self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) return discloss def validation_step(self, batch, batch_idx): inputs = self.get_input(batch, self.image_key) reconstructions, posterior = self(inputs) aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, last_layer=self.get_last_layer(), split="val") discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, last_layer=self.get_last_layer(), split="val") self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) self.log_dict(log_dict_ae) self.log_dict(log_dict_disc) return self.log_dict def configure_optimizers(self): lr = self.learning_rate opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ list(self.decoder.parameters())+ list(self.quant_conv.parameters())+ list(self.post_quant_conv.parameters()), lr=lr, betas=(0.5, 0.9)) opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9)) return [opt_ae, opt_disc], [] def get_last_layer(self): return self.decoder.conv_out.weight @torch.no_grad() def log_images(self, batch, only_inputs=False, **kwargs): log = dict() x = self.get_input(batch, self.image_key) x = x.to(self.device) if not only_inputs: xrec, posterior = self(x) if x.shape[1] > 3: # colorize with random projection assert xrec.shape[1] > 3 x = self.to_rgb(x) xrec = self.to_rgb(xrec) # log["samples"] = self.decode(torch.randn_like(posterior.sample())) log["reconstructions"] = xrec log["inputs"] = x return log def to_rgb(self, x): assert self.image_key == "segmentation" if not hasattr(self, "colorize"): self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) x = F.conv2d(x, weight=self.colorize) x = 2.*(x-x.min())/(x.max()-x.min()) - 1. return x class IdentityFirstStage(torch.nn.Module): def __init__(self, *args, vq_interface=False, **kwargs): self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff super().__init__() def encode(self, x, *args, **kwargs): return x def decode(self, x, *args, **kwargs): return x def quantize(self, x, *args, **kwargs): if self.vq_interface: return x, None, [None, None, None] return x def forward(self, x, *args, **kwargs): return x class AutoencoderKLResi(pl.LightningModule): def __init__(self, ddconfig, lossconfig, embed_dim, ckpt_path=None, ignore_keys=[], image_key="image", colorize_nlabels=None, monitor=None, fusion_w=1.0, freeze_dec=True, synthesis_data=False, use_usm=False, test_gt=False, ): super().__init__() self.image_key = image_key self.encoder = Encoder(**ddconfig) self.decoder = Decoder_Mix(**ddconfig) self.decoder.fusion_w = fusion_w self.loss = instantiate_from_config(lossconfig) self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) self.embed_dim = embed_dim if colorize_nlabels is not None: assert type(colorize_nlabels)==int self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) if monitor is not None: self.monitor = monitor if ckpt_path is not None: missing_list = self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) else: missing_list = [] print('>>>>>>>>>>>>>>>>>missing>>>>>>>>>>>>>>>>>>>') print(missing_list) self.synthesis_data = synthesis_data self.use_usm = use_usm self.test_gt = test_gt if freeze_dec: for name, param in self.named_parameters(): if 'fusion_layer' in name: param.requires_grad = True # elif 'encoder' in name: # param.requires_grad = True # elif 'quant_conv' in name and 'post_quant_conv' not in name: # param.requires_grad = True elif 'loss.discriminator' in name: param.requires_grad = True else: param.requires_grad = False print('>>>>>>>>>>>>>>>>>trainable_list>>>>>>>>>>>>>>>>>>>') trainable_list = [] for name, params in self.named_parameters(): if params.requires_grad: trainable_list.append(name) print(trainable_list) print('>>>>>>>>>>>>>>>>>Untrainable_list>>>>>>>>>>>>>>>>>>>') untrainable_list = [] for name, params in self.named_parameters(): if not params.requires_grad: untrainable_list.append(name) print(untrainable_list) # untrainable_list = list(set(trainable_list).difference(set(missing_list))) # print('>>>>>>>>>>>>>>>>>untrainable_list>>>>>>>>>>>>>>>>>>>') # print(untrainable_list) # def init_from_ckpt(self, path, ignore_keys=list()): # sd = torch.load(path, map_location="cpu")["state_dict"] # keys = list(sd.keys()) # for k in keys: # for ik in ignore_keys: # if k.startswith(ik): # print("Deleting key {} from state_dict.".format(k)) # del sd[k] # self.load_state_dict(sd, strict=False) # print(f"Restored from {path}") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: if 'first_stage_model' in k: sd[k[18:]] = sd[k] del sd[k] for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Encoder Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") return missing def encode(self, x): h, enc_fea = self.encoder(x, return_fea=True) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) # posterior = h return posterior, enc_fea def encode_gt(self, x, new_encoder): h = new_encoder(x) moments = self.quant_conv(h) posterior = DiagonalGaussianDistribution(moments) return posterior, moments def decode(self, z, enc_fea): z = self.post_quant_conv(z) dec = self.decoder(z, enc_fea) return dec def forward(self, input, latent, sample_posterior=True): posterior, enc_fea_lq = self.encode(input) dec = self.decode(latent, enc_fea_lq) return dec, posterior @torch.no_grad() def _dequeue_and_enqueue(self): """It is the training pair pool for increasing the diversity in a batch. Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a batch could not have different resize scaling factors. Therefore, we employ this training pair pool to increase the degradation diversity in a batch. """ # initialize b, c, h, w = self.lq.size() _, c_, h_, w_ = self.latent.size() if b == self.configs.data.params.batch_size: if not hasattr(self, 'queue_size'): self.queue_size = self.configs.data.params.train.params.get('queue_size', b*50) if not hasattr(self, 'queue_lr'): assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() _, c, h, w = self.gt.size() self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() self.queue_sample = torch.zeros(self.queue_size, c, h, w).cuda() self.queue_latent = torch.zeros(self.queue_size, c_, h_, w_).cuda() self.queue_ptr = 0 if self.queue_ptr == self.queue_size: # the pool is full # do dequeue and enqueue # shuffle idx = torch.randperm(self.queue_size) self.queue_lr = self.queue_lr[idx] self.queue_gt = self.queue_gt[idx] self.queue_sample = self.queue_sample[idx] self.queue_latent = self.queue_latent[idx] # get first b samples lq_dequeue = self.queue_lr[0:b, :, :, :].clone() gt_dequeue = self.queue_gt[0:b, :, :, :].clone() sample_dequeue = self.queue_sample[0:b, :, :, :].clone() latent_dequeue = self.queue_latent[0:b, :, :, :].clone() # update the queue self.queue_lr[0:b, :, :, :] = self.lq.clone() self.queue_gt[0:b, :, :, :] = self.gt.clone() self.queue_sample[0:b, :, :, :] = self.sample.clone() self.queue_latent[0:b, :, :, :] = self.latent.clone() self.lq = lq_dequeue self.gt = gt_dequeue self.sample = sample_dequeue self.latent = latent_dequeue else: # only do enqueue self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() self.queue_sample[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.sample.clone() self.queue_latent[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.latent.clone() self.queue_ptr = self.queue_ptr + b def get_input(self, batch): input = batch['lq'] gt = batch['gt'] latent = batch['latent'] sample = batch['sample'] assert not torch.isnan(latent).any() input = input.to(memory_format=torch.contiguous_format).float() gt = gt.to(memory_format=torch.contiguous_format).float() latent = latent.to(memory_format=torch.contiguous_format).float() / 0.18215 gt = gt * 2.0 - 1.0 input = input * 2.0 - 1.0 sample = sample * 2.0 -1.0 return input, gt, latent, sample @torch.no_grad() def get_input_synthesis(self, batch, val=False, test_gt=False):
jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts
11
2023-11-30 01:50:29+00:00
24k
Czm369/MixPL
mmdet/configs/rtmdet/rtmdet_ins_tiny_8xb32_300e_coco.py
[ { "identifier": "PackDetInputs", "path": "mmdet/datasets/transforms/formatting.py", "snippet": "class PackDetInputs(BaseTransform):\n \"\"\"Pack the inputs data for the detection / semantic segmentation /\n panoptic segmentation.\n\n The ``img_meta`` item is always populated. The contents of the\n ``img_meta`` dictionary depends on ``meta_keys``. By default this includes:\n\n - ``img_id``: id of the image\n\n - ``img_path``: path to the image file\n\n - ``ori_shape``: original shape of the image as a tuple (h, w)\n\n - ``img_shape``: shape of the image input to the network as a tuple \\\n (h, w). Note that images may be zero padded on the \\\n bottom/right if the batch tensor is larger than this shape.\n\n - ``scale_factor``: a float indicating the preprocessing scale\n\n - ``flip``: a boolean indicating if image flip transform was used\n\n - ``flip_direction``: the flipping direction\n\n Args:\n meta_keys (Sequence[str], optional): Meta keys to be converted to\n ``mmcv.DataContainer`` and collected in ``data[img_metas]``.\n Default: ``('img_id', 'img_path', 'ori_shape', 'img_shape',\n 'scale_factor', 'flip', 'flip_direction')``\n \"\"\"\n mapping_table = {\n 'gt_bboxes': 'bboxes',\n 'gt_bboxes_labels': 'labels',\n 'gt_masks': 'masks'\n }\n\n def __init__(self,\n meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',\n 'scale_factor', 'flip', 'flip_direction')):\n self.meta_keys = meta_keys\n\n def transform(self, results: dict) -> dict:\n \"\"\"Method to pack the input data.\n\n Args:\n results (dict): Result dict from the data pipeline.\n\n Returns:\n dict:\n\n - 'inputs' (obj:`torch.Tensor`): The forward data of models.\n - 'data_sample' (obj:`DetDataSample`): The annotation info of the\n sample.\n \"\"\"\n packed_results = dict()\n if 'img' in results:\n img = results['img']\n if len(img.shape) < 3:\n img = np.expand_dims(img, -1)\n # To improve the computational speed by by 3-5 times, apply:\n # If image is not contiguous, use\n # `numpy.transpose()` followed by `numpy.ascontiguousarray()`\n # If image is already contiguous, use\n # `torch.permute()` followed by `torch.contiguous()`\n # Refer to https://github.com/open-mmlab/mmdetection/pull/9533\n # for more details\n if not img.flags.c_contiguous:\n img = np.ascontiguousarray(img.transpose(2, 0, 1))\n img = to_tensor(img)\n else:\n img = to_tensor(img).permute(2, 0, 1).contiguous()\n\n packed_results['inputs'] = img\n\n if 'gt_ignore_flags' in results:\n valid_idx = np.where(results['gt_ignore_flags'] == 0)[0]\n ignore_idx = np.where(results['gt_ignore_flags'] == 1)[0]\n\n data_sample = DetDataSample()\n instance_data = InstanceData()\n ignore_instance_data = InstanceData()\n\n for key in self.mapping_table.keys():\n if key not in results:\n continue\n if key == 'gt_masks' or isinstance(results[key], BaseBoxes):\n if 'gt_ignore_flags' in results:\n instance_data[\n self.mapping_table[key]] = results[key][valid_idx]\n ignore_instance_data[\n self.mapping_table[key]] = results[key][ignore_idx]\n else:\n instance_data[self.mapping_table[key]] = results[key]\n else:\n if 'gt_ignore_flags' in results:\n instance_data[self.mapping_table[key]] = to_tensor(\n results[key][valid_idx])\n ignore_instance_data[self.mapping_table[key]] = to_tensor(\n results[key][ignore_idx])\n else:\n instance_data[self.mapping_table[key]] = to_tensor(\n results[key])\n data_sample.gt_instances = instance_data\n data_sample.ignored_instances = ignore_instance_data\n\n if 'proposals' in results:\n proposals = InstanceData(\n bboxes=to_tensor(results['proposals']),\n scores=to_tensor(results['proposals_scores']))\n data_sample.proposals = proposals\n\n if 'gt_seg_map' in results:\n gt_sem_seg_data = dict(\n sem_seg=to_tensor(results['gt_seg_map'][None, ...].copy()))\n gt_sem_seg_data = PixelData(**gt_sem_seg_data)\n if 'ignore_index' in results:\n metainfo = dict(ignore_index=results['ignore_index'])\n gt_sem_seg_data.set_metainfo(metainfo)\n data_sample.gt_sem_seg = gt_sem_seg_data\n\n img_meta = {}\n for key in self.meta_keys:\n if key in results:\n img_meta[key] = results[key]\n data_sample.set_metainfo(img_meta)\n packed_results['data_samples'] = data_sample\n\n return packed_results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(meta_keys={self.meta_keys})'\n return repr_str" }, { "identifier": "FilterAnnotations", "path": "mmdet/datasets/transforms/loading.py", "snippet": "class FilterAnnotations(BaseTransform):\n \"\"\"Filter invalid annotations.\n\n Required Keys:\n\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_ignore_flags (bool) (optional)\n\n Modified Keys:\n\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_masks (optional)\n - gt_ignore_flags (optional)\n\n Args:\n min_gt_bbox_wh (tuple[float]): Minimum width and height of ground truth\n boxes. Default: (1., 1.)\n min_gt_mask_area (int): Minimum foreground area of ground truth masks.\n Default: 1\n by_box (bool): Filter instances with bounding boxes not meeting the\n min_gt_bbox_wh threshold. Default: True\n by_mask (bool): Filter instances with masks not meeting\n min_gt_mask_area threshold. Default: False\n keep_empty (bool): Whether to return None when it\n becomes an empty bbox after filtering. Defaults to True.\n \"\"\"\n\n def __init__(self,\n min_gt_bbox_wh: Tuple[int, int] = (1, 1),\n min_gt_mask_area: int = 1,\n by_box: bool = True,\n by_mask: bool = False,\n keep_empty: bool = True) -> None:\n # TODO: add more filter options\n assert by_box or by_mask\n self.min_gt_bbox_wh = min_gt_bbox_wh\n self.min_gt_mask_area = min_gt_mask_area\n self.by_box = by_box\n self.by_mask = by_mask\n self.keep_empty = keep_empty\n\n @autocast_box_type()\n def transform(self, results: dict) -> Union[dict, None]:\n \"\"\"Transform function to filter annotations.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n assert 'gt_bboxes' in results\n gt_bboxes = results['gt_bboxes']\n if gt_bboxes.shape[0] == 0:\n return results\n\n tests = []\n if self.by_box:\n tests.append(\n ((gt_bboxes.widths > self.min_gt_bbox_wh[0]) &\n (gt_bboxes.heights > self.min_gt_bbox_wh[1])).numpy())\n if self.by_mask:\n assert 'gt_masks' in results\n gt_masks = results['gt_masks']\n tests.append(gt_masks.areas >= self.min_gt_mask_area)\n\n keep = tests[0]\n for t in tests[1:]:\n keep = keep & t\n\n if not keep.any():\n if self.keep_empty:\n return None\n\n keys = ('gt_bboxes', 'gt_bboxes_labels', 'gt_masks', 'gt_ignore_flags')\n for key in keys:\n if key in results:\n results[key] = results[key][keep]\n\n return results\n\n def __repr__(self):\n return self.__class__.__name__ + \\\n f'(min_gt_bbox_wh={self.min_gt_bbox_wh}, ' \\\n f'keep_empty={self.keep_empty})'" }, { "identifier": "LoadAnnotations", "path": "mmdet/datasets/transforms/loading.py", "snippet": "class LoadAnnotations(MMCV_LoadAnnotations):\n \"\"\"Load and process the ``instances`` and ``seg_map`` annotation provided\n by dataset.\n\n The annotation format is as the following:\n\n .. code-block:: python\n\n {\n 'instances':\n [\n {\n # List of 4 numbers representing the bounding box of the\n # instance, in (x1, y1, x2, y2) order.\n 'bbox': [x1, y1, x2, y2],\n\n # Label of image classification.\n 'bbox_label': 1,\n\n # Used in instance/panoptic segmentation. The segmentation mask\n # of the instance or the information of segments.\n # 1. If list[list[float]], it represents a list of polygons,\n # one for each connected component of the object. Each\n # list[float] is one simple polygon in the format of\n # [x1, y1, ..., xn, yn] (n >= 3). The Xs and Ys are absolute\n # coordinates in unit of pixels.\n # 2. If dict, it represents the per-pixel segmentation mask in\n # COCO's compressed RLE format. The dict should have keys\n # “size” and “counts”. Can be loaded by pycocotools\n 'mask': list[list[float]] or dict,\n\n }\n ]\n # Filename of semantic or panoptic segmentation ground truth file.\n 'seg_map_path': 'a/b/c'\n }\n\n After this module, the annotation has been changed to the format below:\n\n .. code-block:: python\n\n {\n # In (x1, y1, x2, y2) order, float type. N is the number of bboxes\n # in an image\n 'gt_bboxes': BaseBoxes(N, 4)\n # In int type.\n 'gt_bboxes_labels': np.ndarray(N, )\n # In built-in class\n 'gt_masks': PolygonMasks (H, W) or BitmapMasks (H, W)\n # In uint8 type.\n 'gt_seg_map': np.ndarray (H, W)\n # in (x, y, v) order, float type.\n }\n\n Required Keys:\n\n - height\n - width\n - instances\n\n - bbox (optional)\n - bbox_label\n - mask (optional)\n - ignore_flag\n\n - seg_map_path (optional)\n\n Added Keys:\n\n - gt_bboxes (BaseBoxes[torch.float32])\n - gt_bboxes_labels (np.int64)\n - gt_masks (BitmapMasks | PolygonMasks)\n - gt_seg_map (np.uint8)\n - gt_ignore_flags (bool)\n\n Args:\n with_bbox (bool): Whether to parse and load the bbox annotation.\n Defaults to True.\n with_label (bool): Whether to parse and load the label annotation.\n Defaults to True.\n with_mask (bool): Whether to parse and load the mask annotation.\n Default: False.\n with_seg (bool): Whether to parse and load the semantic segmentation\n annotation. Defaults to False.\n poly2mask (bool): Whether to convert mask to bitmap. Default: True.\n box_type (str): The box type used to wrap the bboxes. If ``box_type``\n is None, gt_bboxes will keep being np.ndarray. Defaults to 'hbox'.\n reduce_zero_label (bool): Whether reduce all label value\n by 1. Usually used for datasets where 0 is background label.\n Defaults to False.\n ignore_index (int): The label index to be ignored.\n Valid only if reduce_zero_label is true. Defaults is 255.\n imdecode_backend (str): The image decoding backend type. The backend\n argument for :func:``mmcv.imfrombytes``.\n See :fun:``mmcv.imfrombytes`` for details.\n Defaults to 'cv2'.\n backend_args (dict, optional): Arguments to instantiate the\n corresponding backend. Defaults to None.\n \"\"\"\n\n def __init__(\n self,\n with_mask: bool = False,\n poly2mask: bool = True,\n box_type: str = 'hbox',\n # use for semseg\n reduce_zero_label: bool = False,\n ignore_index: int = 255,\n **kwargs) -> None:\n super(LoadAnnotations, self).__init__(**kwargs)\n self.with_mask = with_mask\n self.poly2mask = poly2mask\n self.box_type = box_type\n self.reduce_zero_label = reduce_zero_label\n self.ignore_index = ignore_index\n\n def _load_bboxes(self, results: dict) -> None:\n \"\"\"Private function to load bounding box annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n Returns:\n dict: The dict contains loaded bounding box annotations.\n \"\"\"\n gt_bboxes = []\n gt_ignore_flags = []\n for instance in results.get('instances', []):\n gt_bboxes.append(instance['bbox'])\n gt_ignore_flags.append(instance['ignore_flag'])\n if self.box_type is None:\n results['gt_bboxes'] = np.array(\n gt_bboxes, dtype=np.float32).reshape((-1, 4))\n else:\n _, box_type_cls = get_box_type(self.box_type)\n results['gt_bboxes'] = box_type_cls(gt_bboxes, dtype=torch.float32)\n results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)\n\n def _load_labels(self, results: dict) -> None:\n \"\"\"Private function to load label annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded label annotations.\n \"\"\"\n gt_bboxes_labels = []\n for instance in results.get('instances', []):\n gt_bboxes_labels.append(instance['bbox_label'])\n # TODO: Inconsistent with mmcv, consider how to deal with it later.\n results['gt_bboxes_labels'] = np.array(\n gt_bboxes_labels, dtype=np.int64)\n\n def _poly2mask(self, mask_ann: Union[list, dict], img_h: int,\n img_w: int) -> np.ndarray:\n \"\"\"Private function to convert masks represented with polygon to\n bitmaps.\n\n Args:\n mask_ann (list | dict): Polygon mask annotation input.\n img_h (int): The height of output mask.\n img_w (int): The width of output mask.\n\n Returns:\n np.ndarray: The decode bitmap mask of shape (img_h, img_w).\n \"\"\"\n\n if isinstance(mask_ann, list):\n # polygon -- a single object might consist of multiple parts\n # we merge all parts into one mask rle code\n rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n rle = maskUtils.merge(rles)\n elif isinstance(mask_ann['counts'], list):\n # uncompressed RLE\n rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)\n else:\n # rle\n rle = mask_ann\n mask = maskUtils.decode(rle)\n return mask\n\n def _process_masks(self, results: dict) -> list:\n \"\"\"Process gt_masks and filter invalid polygons.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n list: Processed gt_masks.\n \"\"\"\n gt_masks = []\n gt_ignore_flags = []\n for instance in results.get('instances', []):\n gt_mask = instance['mask']\n # If the annotation of segmentation mask is invalid,\n # ignore the whole instance.\n if isinstance(gt_mask, list):\n gt_mask = [\n np.array(polygon) for polygon in gt_mask\n if len(polygon) % 2 == 0 and len(polygon) >= 6\n ]\n if len(gt_mask) == 0:\n # ignore this instance and set gt_mask to a fake mask\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n elif not self.poly2mask:\n # `PolygonMasks` requires a ploygon of format List[np.array],\n # other formats are invalid.\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n elif isinstance(gt_mask, dict) and \\\n not (gt_mask.get('counts') is not None and\n gt_mask.get('size') is not None and\n isinstance(gt_mask['counts'], (list, str))):\n # if gt_mask is a dict, it should include `counts` and `size`,\n # so that `BitmapMasks` can uncompressed RLE\n instance['ignore_flag'] = 1\n gt_mask = [np.zeros(6)]\n gt_masks.append(gt_mask)\n # re-process gt_ignore_flags\n gt_ignore_flags.append(instance['ignore_flag'])\n results['gt_ignore_flags'] = np.array(gt_ignore_flags, dtype=bool)\n return gt_masks\n\n def _load_masks(self, results: dict) -> None:\n \"\"\"Private function to load mask annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n \"\"\"\n h, w = results['ori_shape']\n gt_masks = self._process_masks(results)\n if self.poly2mask:\n gt_masks = BitmapMasks(\n [self._poly2mask(mask, h, w) for mask in gt_masks], h, w)\n else:\n # fake polygon masks will be ignored in `PackDetInputs`\n gt_masks = PolygonMasks([mask for mask in gt_masks], h, w)\n results['gt_masks'] = gt_masks\n\n def _load_seg_map(self, results: dict) -> None:\n \"\"\"Private function to load semantic segmentation annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmcv.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded semantic segmentation annotations.\n \"\"\"\n if results.get('seg_map_path', None) is None:\n return\n\n img_bytes = get(\n results['seg_map_path'], backend_args=self.backend_args)\n gt_semantic_seg = mmcv.imfrombytes(\n img_bytes, flag='unchanged',\n backend=self.imdecode_backend).squeeze()\n\n if self.reduce_zero_label:\n # avoid using underflow conversion\n gt_semantic_seg[gt_semantic_seg == 0] = self.ignore_index\n gt_semantic_seg = gt_semantic_seg - 1\n gt_semantic_seg[gt_semantic_seg == self.ignore_index -\n 1] = self.ignore_index\n\n # modify if custom classes\n if results.get('label_map', None) is not None:\n # Add deep copy to solve bug of repeatedly\n # replace `gt_semantic_seg`, which is reported in\n # https://github.com/open-mmlab/mmsegmentation/pull/1445/\n gt_semantic_seg_copy = gt_semantic_seg.copy()\n for old_id, new_id in results['label_map'].items():\n gt_semantic_seg[gt_semantic_seg_copy == old_id] = new_id\n results['gt_seg_map'] = gt_semantic_seg\n results['ignore_index'] = self.ignore_index\n\n def transform(self, results: dict) -> dict:\n \"\"\"Function to load multiple types annotations.\n\n Args:\n results (dict): Result dict from :obj:``mmengine.BaseDataset``.\n\n Returns:\n dict: The dict contains loaded bounding box, label and\n semantic segmentation.\n \"\"\"\n\n if self.with_bbox:\n self._load_bboxes(results)\n if self.with_label:\n self._load_labels(results)\n if self.with_mask:\n self._load_masks(results)\n if self.with_seg:\n self._load_seg_map(results)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(with_bbox={self.with_bbox}, '\n repr_str += f'with_label={self.with_label}, '\n repr_str += f'with_mask={self.with_mask}, '\n repr_str += f'with_seg={self.with_seg}, '\n repr_str += f'poly2mask={self.poly2mask}, '\n repr_str += f\"imdecode_backend='{self.imdecode_backend}', \"\n repr_str += f'backend_args={self.backend_args})'\n return repr_str" }, { "identifier": "CachedMixUp", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class CachedMixUp(BaseTransform):\n \"\"\"Cached mixup data augmentation.\n\n .. code:: text\n\n mixup transform\n +------------------------------+\n | mixup image | |\n | +--------|--------+ |\n | | | | |\n |---------------+ | |\n | | | |\n | | image | |\n | | | |\n | | | |\n | |-----------------+ |\n | pad |\n +------------------------------+\n\n The cached mixup transform steps are as follows:\n\n 1. Append the results from the last transform into the cache.\n 2. Another random image is picked from the cache and embedded in\n the top left patch(after padding and resizing)\n 3. The target of mixup transform is the weighted average of mixup\n image and origin image.\n\n Required Keys:\n\n - img\n - gt_bboxes (np.float32) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_ignore_flags (bool) (optional)\n - mix_results (List[dict])\n\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_ignore_flags (optional)\n\n\n Args:\n img_scale (Sequence[int]): Image output size after mixup pipeline.\n The shape order should be (width, height). Defaults to (640, 640).\n ratio_range (Sequence[float]): Scale ratio of mixup image.\n Defaults to (0.5, 1.5).\n flip_ratio (float): Horizontal flip ratio of mixup image.\n Defaults to 0.5.\n pad_val (int): Pad value. Defaults to 114.\n max_iters (int): The maximum number of iterations. If the number of\n iterations is greater than `max_iters`, but gt_bbox is still\n empty, then the iteration is terminated. Defaults to 15.\n bbox_clip_border (bool, optional): Whether to clip the objects outside\n the border of the image. In some dataset like MOT17, the gt bboxes\n are allowed to cross the border of images. Therefore, we don't\n need to clip the gt bboxes in these cases. Defaults to True.\n max_cached_images (int): The maximum length of the cache. The larger\n the cache, the stronger the randomness of this transform. As a\n rule of thumb, providing 10 caches for each image suffices for\n randomness. Defaults to 20.\n random_pop (bool): Whether to randomly pop a result from the cache\n when the cache is full. If set to False, use FIFO popping method.\n Defaults to True.\n prob (float): Probability of applying this transformation.\n Defaults to 1.0.\n \"\"\"\n\n def __init__(self,\n img_scale: Tuple[int, int] = (640, 640),\n ratio_range: Tuple[float, float] = (0.5, 1.5),\n flip_ratio: float = 0.5,\n pad_val: float = 114.0,\n max_iters: int = 15,\n bbox_clip_border: bool = True,\n max_cached_images: int = 20,\n random_pop: bool = True,\n prob: float = 1.0) -> None:\n assert isinstance(img_scale, tuple)\n assert max_cached_images >= 2, 'The length of cache must >= 2, ' \\\n f'but got {max_cached_images}.'\n assert 0 <= prob <= 1.0, 'The probability should be in range [0,1]. ' \\\n f'got {prob}.'\n self.dynamic_scale = img_scale\n self.ratio_range = ratio_range\n self.flip_ratio = flip_ratio\n self.pad_val = pad_val\n self.max_iters = max_iters\n self.bbox_clip_border = bbox_clip_border\n self.results_cache = []\n\n self.max_cached_images = max_cached_images\n self.random_pop = random_pop\n self.prob = prob\n\n @cache_randomness\n def get_indexes(self, cache: list) -> int:\n \"\"\"Call function to collect indexes.\n\n Args:\n cache (list): The result cache.\n\n Returns:\n int: index.\n \"\"\"\n\n for i in range(self.max_iters):\n index = random.randint(0, len(cache) - 1)\n gt_bboxes_i = cache[index]['gt_bboxes']\n if len(gt_bboxes_i) != 0:\n break\n return index\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"MixUp transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n # cache and pop images\n self.results_cache.append(copy.deepcopy(results))\n if len(self.results_cache) > self.max_cached_images:\n if self.random_pop:\n index = random.randint(0, len(self.results_cache) - 1)\n else:\n index = 0\n self.results_cache.pop(index)\n\n if len(self.results_cache) <= 1:\n return results\n\n if random.uniform(0, 1) > self.prob:\n return results\n\n index = self.get_indexes(self.results_cache)\n retrieve_results = copy.deepcopy(self.results_cache[index])\n\n # TODO: refactor mixup to reuse these code.\n if retrieve_results['gt_bboxes'].shape[0] == 0:\n # empty bbox\n return results\n\n retrieve_img = retrieve_results['img']\n with_mask = True if 'gt_masks' in results else False\n\n jit_factor = random.uniform(*self.ratio_range)\n is_flip = random.uniform(0, 1) > self.flip_ratio\n\n if len(retrieve_img.shape) == 3:\n out_img = np.ones(\n (self.dynamic_scale[1], self.dynamic_scale[0], 3),\n dtype=retrieve_img.dtype) * self.pad_val\n else:\n out_img = np.ones(\n self.dynamic_scale[::-1],\n dtype=retrieve_img.dtype) * self.pad_val\n\n # 1. keep_ratio resize\n scale_ratio = min(self.dynamic_scale[1] / retrieve_img.shape[0],\n self.dynamic_scale[0] / retrieve_img.shape[1])\n retrieve_img = mmcv.imresize(\n retrieve_img, (int(retrieve_img.shape[1] * scale_ratio),\n int(retrieve_img.shape[0] * scale_ratio)))\n\n # 2. paste\n out_img[:retrieve_img.shape[0], :retrieve_img.shape[1]] = retrieve_img\n\n # 3. scale jit\n scale_ratio *= jit_factor\n out_img = mmcv.imresize(out_img, (int(out_img.shape[1] * jit_factor),\n int(out_img.shape[0] * jit_factor)))\n\n # 4. flip\n if is_flip:\n out_img = out_img[:, ::-1, :]\n\n # 5. random crop\n ori_img = results['img']\n origin_h, origin_w = out_img.shape[:2]\n target_h, target_w = ori_img.shape[:2]\n padded_img = np.ones((max(origin_h, target_h), max(\n origin_w, target_w), 3)) * self.pad_val\n padded_img = padded_img.astype(np.uint8)\n padded_img[:origin_h, :origin_w] = out_img\n\n x_offset, y_offset = 0, 0\n if padded_img.shape[0] > target_h:\n y_offset = random.randint(0, padded_img.shape[0] - target_h)\n if padded_img.shape[1] > target_w:\n x_offset = random.randint(0, padded_img.shape[1] - target_w)\n padded_cropped_img = padded_img[y_offset:y_offset + target_h,\n x_offset:x_offset + target_w]\n\n # 6. adjust bbox\n retrieve_gt_bboxes = retrieve_results['gt_bboxes']\n retrieve_gt_bboxes.rescale_([scale_ratio, scale_ratio])\n if with_mask:\n retrieve_gt_masks = retrieve_results['gt_masks'].rescale(\n scale_ratio)\n\n if self.bbox_clip_border:\n retrieve_gt_bboxes.clip_([origin_h, origin_w])\n\n if is_flip:\n retrieve_gt_bboxes.flip_([origin_h, origin_w],\n direction='horizontal')\n if with_mask:\n retrieve_gt_masks = retrieve_gt_masks.flip()\n\n # 7. filter\n cp_retrieve_gt_bboxes = retrieve_gt_bboxes.clone()\n cp_retrieve_gt_bboxes.translate_([-x_offset, -y_offset])\n if with_mask:\n retrieve_gt_masks = retrieve_gt_masks.translate(\n out_shape=(target_h, target_w),\n offset=-x_offset,\n direction='horizontal')\n retrieve_gt_masks = retrieve_gt_masks.translate(\n out_shape=(target_h, target_w),\n offset=-y_offset,\n direction='vertical')\n\n if self.bbox_clip_border:\n cp_retrieve_gt_bboxes.clip_([target_h, target_w])\n\n # 8. mix up\n ori_img = ori_img.astype(np.float32)\n mixup_img = 0.5 * ori_img + 0.5 * padded_cropped_img.astype(np.float32)\n\n retrieve_gt_bboxes_labels = retrieve_results['gt_bboxes_labels']\n retrieve_gt_ignore_flags = retrieve_results['gt_ignore_flags']\n\n mixup_gt_bboxes = cp_retrieve_gt_bboxes.cat(\n (results['gt_bboxes'], cp_retrieve_gt_bboxes), dim=0)\n mixup_gt_bboxes_labels = np.concatenate(\n (results['gt_bboxes_labels'], retrieve_gt_bboxes_labels), axis=0)\n mixup_gt_ignore_flags = np.concatenate(\n (results['gt_ignore_flags'], retrieve_gt_ignore_flags), axis=0)\n if with_mask:\n mixup_gt_masks = retrieve_gt_masks.cat(\n [results['gt_masks'], retrieve_gt_masks])\n\n # remove outside bbox\n inside_inds = mixup_gt_bboxes.is_inside([target_h, target_w]).numpy()\n mixup_gt_bboxes = mixup_gt_bboxes[inside_inds]\n mixup_gt_bboxes_labels = mixup_gt_bboxes_labels[inside_inds]\n mixup_gt_ignore_flags = mixup_gt_ignore_flags[inside_inds]\n if with_mask:\n mixup_gt_masks = mixup_gt_masks[inside_inds]\n\n results['img'] = mixup_img.astype(np.uint8)\n results['img_shape'] = mixup_img.shape[:2]\n results['gt_bboxes'] = mixup_gt_bboxes\n results['gt_bboxes_labels'] = mixup_gt_bboxes_labels\n results['gt_ignore_flags'] = mixup_gt_ignore_flags\n if with_mask:\n results['gt_masks'] = mixup_gt_masks\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(dynamic_scale={self.dynamic_scale}, '\n repr_str += f'ratio_range={self.ratio_range}, '\n repr_str += f'flip_ratio={self.flip_ratio}, '\n repr_str += f'pad_val={self.pad_val}, '\n repr_str += f'max_iters={self.max_iters}, '\n repr_str += f'bbox_clip_border={self.bbox_clip_border}, '\n repr_str += f'max_cached_images={self.max_cached_images}, '\n repr_str += f'random_pop={self.random_pop}, '\n repr_str += f'prob={self.prob})'\n return repr_str" }, { "identifier": "CachedMosaic", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class CachedMosaic(Mosaic):\n \"\"\"Cached mosaic augmentation.\n\n Cached mosaic transform will random select images from the cache\n and combine them into one output image.\n\n .. code:: text\n\n mosaic transform\n center_x\n +------------------------------+\n | pad | pad |\n | +-----------+ |\n | | | |\n | | image1 |--------+ |\n | | | | |\n | | | image2 | |\n center_y |----+-------------+-----------|\n | | cropped | |\n |pad | image3 | image4 |\n | | | |\n +----|-------------+-----------+\n | |\n +-------------+\n\n The cached mosaic transform steps are as follows:\n\n 1. Append the results from the last transform into the cache.\n 2. Choose the mosaic center as the intersections of 4 images\n 3. Get the left top image according to the index, and randomly\n sample another 3 images from the result cache.\n 4. Sub image will be cropped if image is larger than mosaic patch\n\n Required Keys:\n\n - img\n - gt_bboxes (np.float32) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_ignore_flags (bool) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_ignore_flags (optional)\n\n Args:\n img_scale (Sequence[int]): Image size before mosaic pipeline of single\n image. The shape order should be (width, height).\n Defaults to (640, 640).\n center_ratio_range (Sequence[float]): Center ratio range of mosaic\n output. Defaults to (0.5, 1.5).\n bbox_clip_border (bool, optional): Whether to clip the objects outside\n the border of the image. In some dataset like MOT17, the gt bboxes\n are allowed to cross the border of images. Therefore, we don't\n need to clip the gt bboxes in these cases. Defaults to True.\n pad_val (int): Pad value. Defaults to 114.\n prob (float): Probability of applying this transformation.\n Defaults to 1.0.\n max_cached_images (int): The maximum length of the cache. The larger\n the cache, the stronger the randomness of this transform. As a\n rule of thumb, providing 10 caches for each image suffices for\n randomness. Defaults to 40.\n random_pop (bool): Whether to randomly pop a result from the cache\n when the cache is full. If set to False, use FIFO popping method.\n Defaults to True.\n \"\"\"\n\n def __init__(self,\n *args,\n max_cached_images: int = 40,\n random_pop: bool = True,\n **kwargs) -> None:\n super().__init__(*args, **kwargs)\n self.results_cache = []\n self.random_pop = random_pop\n assert max_cached_images >= 4, 'The length of cache must >= 4, ' \\\n f'but got {max_cached_images}.'\n self.max_cached_images = max_cached_images\n\n @cache_randomness\n def get_indexes(self, cache: list) -> list:\n \"\"\"Call function to collect indexes.\n\n Args:\n cache (list): The results cache.\n\n Returns:\n list: indexes.\n \"\"\"\n\n indexes = [random.randint(0, len(cache) - 1) for _ in range(3)]\n return indexes\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"Mosaic transform function.\n\n Args:\n results (dict): Result dict.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n # cache and pop images\n self.results_cache.append(copy.deepcopy(results))\n if len(self.results_cache) > self.max_cached_images:\n if self.random_pop:\n index = random.randint(0, len(self.results_cache) - 1)\n else:\n index = 0\n self.results_cache.pop(index)\n\n if len(self.results_cache) <= 4:\n return results\n\n if random.uniform(0, 1) > self.prob:\n return results\n indices = self.get_indexes(self.results_cache)\n mix_results = [copy.deepcopy(self.results_cache[i]) for i in indices]\n\n # TODO: refactor mosaic to reuse these code.\n mosaic_bboxes = []\n mosaic_bboxes_labels = []\n mosaic_ignore_flags = []\n mosaic_masks = []\n with_mask = True if 'gt_masks' in results else False\n\n if len(results['img'].shape) == 3:\n mosaic_img = np.full(\n (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2), 3),\n self.pad_val,\n dtype=results['img'].dtype)\n else:\n mosaic_img = np.full(\n (int(self.img_scale[1] * 2), int(self.img_scale[0] * 2)),\n self.pad_val,\n dtype=results['img'].dtype)\n\n # mosaic center x, y\n center_x = int(\n random.uniform(*self.center_ratio_range) * self.img_scale[0])\n center_y = int(\n random.uniform(*self.center_ratio_range) * self.img_scale[1])\n center_position = (center_x, center_y)\n\n loc_strs = ('top_left', 'top_right', 'bottom_left', 'bottom_right')\n for i, loc in enumerate(loc_strs):\n if loc == 'top_left':\n results_patch = copy.deepcopy(results)\n else:\n results_patch = copy.deepcopy(mix_results[i - 1])\n\n img_i = results_patch['img']\n h_i, w_i = img_i.shape[:2]\n # keep_ratio resize\n scale_ratio_i = min(self.img_scale[1] / h_i,\n self.img_scale[0] / w_i)\n img_i = mmcv.imresize(\n img_i, (int(w_i * scale_ratio_i), int(h_i * scale_ratio_i)))\n\n # compute the combine parameters\n paste_coord, crop_coord = self._mosaic_combine(\n loc, center_position, img_i.shape[:2][::-1])\n x1_p, y1_p, x2_p, y2_p = paste_coord\n x1_c, y1_c, x2_c, y2_c = crop_coord\n\n # crop and paste image\n mosaic_img[y1_p:y2_p, x1_p:x2_p] = img_i[y1_c:y2_c, x1_c:x2_c]\n\n # adjust coordinate\n gt_bboxes_i = results_patch['gt_bboxes']\n gt_bboxes_labels_i = results_patch['gt_bboxes_labels']\n gt_ignore_flags_i = results_patch['gt_ignore_flags']\n\n padw = x1_p - x1_c\n padh = y1_p - y1_c\n gt_bboxes_i.rescale_([scale_ratio_i, scale_ratio_i])\n gt_bboxes_i.translate_([padw, padh])\n mosaic_bboxes.append(gt_bboxes_i)\n mosaic_bboxes_labels.append(gt_bboxes_labels_i)\n mosaic_ignore_flags.append(gt_ignore_flags_i)\n if with_mask and results_patch.get('gt_masks', None) is not None:\n gt_masks_i = results_patch['gt_masks']\n gt_masks_i = gt_masks_i.rescale(float(scale_ratio_i))\n gt_masks_i = gt_masks_i.translate(\n out_shape=(int(self.img_scale[0] * 2),\n int(self.img_scale[1] * 2)),\n offset=padw,\n direction='horizontal')\n gt_masks_i = gt_masks_i.translate(\n out_shape=(int(self.img_scale[0] * 2),\n int(self.img_scale[1] * 2)),\n offset=padh,\n direction='vertical')\n mosaic_masks.append(gt_masks_i)\n\n mosaic_bboxes = mosaic_bboxes[0].cat(mosaic_bboxes, 0)\n mosaic_bboxes_labels = np.concatenate(mosaic_bboxes_labels, 0)\n mosaic_ignore_flags = np.concatenate(mosaic_ignore_flags, 0)\n\n if self.bbox_clip_border:\n mosaic_bboxes.clip_([2 * self.img_scale[1], 2 * self.img_scale[0]])\n # remove outside bboxes\n inside_inds = mosaic_bboxes.is_inside(\n [2 * self.img_scale[1], 2 * self.img_scale[0]]).numpy()\n mosaic_bboxes = mosaic_bboxes[inside_inds]\n mosaic_bboxes_labels = mosaic_bboxes_labels[inside_inds]\n mosaic_ignore_flags = mosaic_ignore_flags[inside_inds]\n\n results['img'] = mosaic_img\n results['img_shape'] = mosaic_img.shape[:2]\n results['gt_bboxes'] = mosaic_bboxes\n results['gt_bboxes_labels'] = mosaic_bboxes_labels\n results['gt_ignore_flags'] = mosaic_ignore_flags\n\n if with_mask:\n mosaic_masks = mosaic_masks[0].cat(mosaic_masks)\n results['gt_masks'] = mosaic_masks[inside_inds]\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(img_scale={self.img_scale}, '\n repr_str += f'center_ratio_range={self.center_ratio_range}, '\n repr_str += f'pad_val={self.pad_val}, '\n repr_str += f'prob={self.prob}, '\n repr_str += f'max_cached_images={self.max_cached_images}, '\n repr_str += f'random_pop={self.random_pop})'\n return repr_str" }, { "identifier": "Pad", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class Pad(MMCV_Pad):\n \"\"\"Pad the image & segmentation map.\n\n There are three padding modes: (1) pad to a fixed size and (2) pad to the\n minimum size that is divisible by some number. and (3)pad to square. Also,\n pad to square and pad to the minimum size can be used as the same time.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_masks\n - gt_seg_map\n\n Added Keys:\n\n - pad_shape\n - pad_fixed_size\n - pad_size_divisor\n\n Args:\n size (tuple, optional): Fixed padding size.\n Expected padding shape (width, height). Defaults to None.\n size_divisor (int, optional): The divisor of padded size. Defaults to\n None.\n pad_to_square (bool): Whether to pad the image into a square.\n Currently only used for YOLOX. Defaults to False.\n pad_val (Number | dict[str, Number], optional) - Padding value for if\n the pad_mode is \"constant\". If it is a single number, the value\n to pad the image is the number and to pad the semantic\n segmentation map is 255. If it is a dict, it should have the\n following keys:\n\n - img: The value to pad the image.\n - seg: The value to pad the semantic segmentation map.\n Defaults to dict(img=0, seg=255).\n padding_mode (str): Type of padding. Should be: constant, edge,\n reflect or symmetric. Defaults to 'constant'.\n\n - constant: pads with a constant value, this value is specified\n with pad_val.\n - edge: pads with the last value at the edge of the image.\n - reflect: pads with reflection of image without repeating the last\n value on the edge. For example, padding [1, 2, 3, 4] with 2\n elements on both sides in reflect mode will result in\n [3, 2, 1, 2, 3, 4, 3, 2].\n - symmetric: pads with reflection of image repeating the last value\n on the edge. For example, padding [1, 2, 3, 4] with 2 elements on\n both sides in symmetric mode will result in\n [2, 1, 1, 2, 3, 4, 4, 3]\n \"\"\"\n\n def _pad_masks(self, results: dict) -> None:\n \"\"\"Pad masks according to ``results['pad_shape']``.\"\"\"\n if results.get('gt_masks', None) is not None:\n pad_val = self.pad_val.get('masks', 0)\n pad_shape = results['pad_shape'][:2]\n results['gt_masks'] = results['gt_masks'].pad(\n pad_shape, pad_val=pad_val)\n\n def transform(self, results: dict) -> dict:\n \"\"\"Call function to pad images, masks, semantic segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n dict: Updated result dict.\n \"\"\"\n self._pad_img(results)\n self._pad_seg(results)\n self._pad_masks(results)\n return results" }, { "identifier": "RandomCrop", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class RandomCrop(BaseTransform):\n \"\"\"Random crop the image & bboxes & masks.\n\n The absolute ``crop_size`` is sampled based on ``crop_type`` and\n ``image_size``, then the cropped results are generated.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_bboxes_labels (np.int64) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_ignore_flags (bool) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes (optional)\n - gt_bboxes_labels (optional)\n - gt_masks (optional)\n - gt_ignore_flags (optional)\n - gt_seg_map (optional)\n - gt_instances_ids (options, only used in MOT/VIS)\n\n Added Keys:\n\n - homography_matrix\n\n Args:\n crop_size (tuple): The relative ratio or absolute pixels of\n (width, height).\n crop_type (str, optional): One of \"relative_range\", \"relative\",\n \"absolute\", \"absolute_range\". \"relative\" randomly crops\n (h * crop_size[0], w * crop_size[1]) part from an input of size\n (h, w). \"relative_range\" uniformly samples relative crop size from\n range [crop_size[0], 1] and [crop_size[1], 1] for height and width\n respectively. \"absolute\" crops from an input with absolute size\n (crop_size[0], crop_size[1]). \"absolute_range\" uniformly samples\n crop_h in range [crop_size[0], min(h, crop_size[1])] and crop_w\n in range [crop_size[0], min(w, crop_size[1])].\n Defaults to \"absolute\".\n allow_negative_crop (bool, optional): Whether to allow a crop that does\n not contain any bbox area. Defaults to False.\n recompute_bbox (bool, optional): Whether to re-compute the boxes based\n on cropped instance masks. Defaults to False.\n bbox_clip_border (bool, optional): Whether clip the objects outside\n the border of the image. Defaults to True.\n\n Note:\n - If the image is smaller than the absolute crop size, return the\n original image.\n - The keys for bboxes, labels and masks must be aligned. That is,\n ``gt_bboxes`` corresponds to ``gt_labels`` and ``gt_masks``, and\n ``gt_bboxes_ignore`` corresponds to ``gt_labels_ignore`` and\n ``gt_masks_ignore``.\n - If the crop does not contain any gt-bbox region and\n ``allow_negative_crop`` is set to False, skip this image.\n \"\"\"\n\n def __init__(self,\n crop_size: tuple,\n crop_type: str = 'absolute',\n allow_negative_crop: bool = False,\n recompute_bbox: bool = False,\n bbox_clip_border: bool = True) -> None:\n if crop_type not in [\n 'relative_range', 'relative', 'absolute', 'absolute_range'\n ]:\n raise ValueError(f'Invalid crop_type {crop_type}.')\n if crop_type in ['absolute', 'absolute_range']:\n assert crop_size[0] > 0 and crop_size[1] > 0\n assert isinstance(crop_size[0], int) and isinstance(\n crop_size[1], int)\n if crop_type == 'absolute_range':\n assert crop_size[0] <= crop_size[1]\n else:\n assert 0 < crop_size[0] <= 1 and 0 < crop_size[1] <= 1\n self.crop_size = crop_size\n self.crop_type = crop_type\n self.allow_negative_crop = allow_negative_crop\n self.bbox_clip_border = bbox_clip_border\n self.recompute_bbox = recompute_bbox\n\n def _crop_data(self, results: dict, crop_size: Tuple[int, int],\n allow_negative_crop: bool) -> Union[dict, None]:\n \"\"\"Function to randomly crop images, bounding boxes, masks, semantic\n segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n crop_size (Tuple[int, int]): Expected absolute size after\n cropping, (h, w).\n allow_negative_crop (bool): Whether to allow a crop that does not\n contain any bbox area.\n\n Returns:\n results (Union[dict, None]): Randomly cropped results, 'img_shape'\n key in result dict is updated according to crop size. None will\n be returned when there is no valid bbox after cropping.\n \"\"\"\n assert crop_size[0] > 0 and crop_size[1] > 0\n img = results['img']\n margin_h = max(img.shape[0] - crop_size[0], 0)\n margin_w = max(img.shape[1] - crop_size[1], 0)\n offset_h, offset_w = self._rand_offset((margin_h, margin_w))\n crop_y1, crop_y2 = offset_h, offset_h + crop_size[0]\n crop_x1, crop_x2 = offset_w, offset_w + crop_size[1]\n\n # Record the homography matrix for the RandomCrop\n homography_matrix = np.array(\n [[1, 0, -offset_w], [0, 1, -offset_h], [0, 0, 1]],\n dtype=np.float32)\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n # crop the image\n img = img[crop_y1:crop_y2, crop_x1:crop_x2, ...]\n img_shape = img.shape\n results['img'] = img\n results['img_shape'] = img_shape[:2]\n\n # crop bboxes accordingly and clip to the image boundary\n if results.get('gt_bboxes', None) is not None:\n bboxes = results['gt_bboxes']\n bboxes.translate_([-offset_w, -offset_h])\n if self.bbox_clip_border:\n bboxes.clip_(img_shape[:2])\n valid_inds = bboxes.is_inside(img_shape[:2]).numpy()\n # If the crop does not contain any gt-bbox area and\n # allow_negative_crop is False, skip this image.\n if (not valid_inds.any() and not allow_negative_crop):\n return None\n\n results['gt_bboxes'] = bboxes[valid_inds]\n\n if results.get('gt_ignore_flags', None) is not None:\n results['gt_ignore_flags'] = \\\n results['gt_ignore_flags'][valid_inds]\n\n if results.get('gt_bboxes_labels', None) is not None:\n results['gt_bboxes_labels'] = \\\n results['gt_bboxes_labels'][valid_inds]\n\n if results.get('gt_masks', None) is not None:\n results['gt_masks'] = results['gt_masks'][\n valid_inds.nonzero()[0]].crop(\n np.asarray([crop_x1, crop_y1, crop_x2, crop_y2]))\n if self.recompute_bbox:\n results['gt_bboxes'] = results['gt_masks'].get_bboxes(\n type(results['gt_bboxes']))\n\n # We should remove the instance ids corresponding to invalid boxes.\n if results.get('gt_instances_ids', None) is not None:\n results['gt_instances_ids'] = \\\n results['gt_instances_ids'][valid_inds]\n\n # crop semantic seg\n if results.get('gt_seg_map', None) is not None:\n results['gt_seg_map'] = results['gt_seg_map'][crop_y1:crop_y2,\n crop_x1:crop_x2]\n\n return results\n\n @cache_randomness\n def _rand_offset(self, margin: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"Randomly generate crop offset.\n\n Args:\n margin (Tuple[int, int]): The upper bound for the offset generated\n randomly.\n\n Returns:\n Tuple[int, int]: The random offset for the crop.\n \"\"\"\n margin_h, margin_w = margin\n offset_h = np.random.randint(0, margin_h + 1)\n offset_w = np.random.randint(0, margin_w + 1)\n\n return offset_h, offset_w\n\n @cache_randomness\n def _get_crop_size(self, image_size: Tuple[int, int]) -> Tuple[int, int]:\n \"\"\"Randomly generates the absolute crop size based on `crop_type` and\n `image_size`.\n\n Args:\n image_size (Tuple[int, int]): (h, w).\n\n Returns:\n crop_size (Tuple[int, int]): (crop_h, crop_w) in absolute pixels.\n \"\"\"\n h, w = image_size\n if self.crop_type == 'absolute':\n return min(self.crop_size[1], h), min(self.crop_size[0], w)\n elif self.crop_type == 'absolute_range':\n crop_h = np.random.randint(\n min(h, self.crop_size[0]),\n min(h, self.crop_size[1]) + 1)\n crop_w = np.random.randint(\n min(w, self.crop_size[0]),\n min(w, self.crop_size[1]) + 1)\n return crop_h, crop_w\n elif self.crop_type == 'relative':\n crop_w, crop_h = self.crop_size\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n else:\n # 'relative_range'\n crop_size = np.asarray(self.crop_size, dtype=np.float32)\n crop_h, crop_w = crop_size + np.random.rand(2) * (1 - crop_size)\n return int(h * crop_h + 0.5), int(w * crop_w + 0.5)\n\n @autocast_box_type()\n def transform(self, results: dict) -> Union[dict, None]:\n \"\"\"Transform function to randomly crop images, bounding boxes, masks,\n semantic segmentation maps.\n\n Args:\n results (dict): Result dict from loading pipeline.\n\n Returns:\n results (Union[dict, None]): Randomly cropped results, 'img_shape'\n key in result dict is updated according to crop size. None will\n be returned when there is no valid bbox after cropping.\n \"\"\"\n image_size = results['img'].shape[:2]\n crop_size = self._get_crop_size(image_size)\n results = self._crop_data(results, crop_size, self.allow_negative_crop)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(crop_size={self.crop_size}, '\n repr_str += f'crop_type={self.crop_type}, '\n repr_str += f'allow_negative_crop={self.allow_negative_crop}, '\n repr_str += f'recompute_bbox={self.recompute_bbox}, '\n repr_str += f'bbox_clip_border={self.bbox_clip_border})'\n return repr_str" }, { "identifier": "RandomFlip", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class RandomFlip(MMCV_RandomFlip):\n \"\"\"Flip the image & bbox & mask & segmentation map. Added or Updated keys:\n flip, flip_direction, img, gt_bboxes, and gt_seg_map. There are 3 flip\n modes:\n\n - ``prob`` is float, ``direction`` is string: the image will be\n ``direction``ly flipped with probability of ``prob`` .\n E.g., ``prob=0.5``, ``direction='horizontal'``,\n then image will be horizontally flipped with probability of 0.5.\n - ``prob`` is float, ``direction`` is list of string: the image will\n be ``direction[i]``ly flipped with probability of\n ``prob/len(direction)``.\n E.g., ``prob=0.5``, ``direction=['horizontal', 'vertical']``,\n then image will be horizontally flipped with probability of 0.25,\n vertically with probability of 0.25.\n - ``prob`` is list of float, ``direction`` is list of string:\n given ``len(prob) == len(direction)``, the image will\n be ``direction[i]``ly flipped with probability of ``prob[i]``.\n E.g., ``prob=[0.3, 0.5]``, ``direction=['horizontal',\n 'vertical']``, then image will be horizontally flipped with\n probability of 0.3, vertically with probability of 0.5.\n\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - gt_bboxes\n - gt_masks\n - gt_seg_map\n\n Added Keys:\n\n - flip\n - flip_direction\n - homography_matrix\n\n\n Args:\n prob (float | list[float], optional): The flipping probability.\n Defaults to None.\n direction(str | list[str]): The flipping direction. Options\n If input is a list, the length must equal ``prob``. Each\n element in ``prob`` indicates the flip probability of\n corresponding direction. Defaults to 'horizontal'.\n \"\"\"\n\n def _record_homography_matrix(self, results: dict) -> None:\n \"\"\"Record the homography matrix for the RandomFlip.\"\"\"\n cur_dir = results['flip_direction']\n h, w = results['img'].shape[:2]\n\n if cur_dir == 'horizontal':\n homography_matrix = np.array([[-1, 0, w], [0, 1, 0], [0, 0, 1]],\n dtype=np.float32)\n elif cur_dir == 'vertical':\n homography_matrix = np.array([[1, 0, 0], [0, -1, h], [0, 0, 1]],\n dtype=np.float32)\n elif cur_dir == 'diagonal':\n homography_matrix = np.array([[-1, 0, w], [0, -1, h], [0, 0, 1]],\n dtype=np.float32)\n else:\n homography_matrix = np.eye(3, dtype=np.float32)\n\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n @autocast_box_type()\n def _flip(self, results: dict) -> None:\n \"\"\"Flip images, bounding boxes, and semantic segmentation map.\"\"\"\n # flip image\n results['img'] = mmcv.imflip(\n results['img'], direction=results['flip_direction'])\n\n img_shape = results['img'].shape[:2]\n\n # flip bboxes\n if results.get('gt_bboxes', None) is not None:\n results['gt_bboxes'].flip_(img_shape, results['flip_direction'])\n\n # flip masks\n if results.get('gt_masks', None) is not None:\n results['gt_masks'] = results['gt_masks'].flip(\n results['flip_direction'])\n\n # flip segs\n if results.get('gt_seg_map', None) is not None:\n results['gt_seg_map'] = mmcv.imflip(\n results['gt_seg_map'], direction=results['flip_direction'])\n\n # record homography matrix for flip\n self._record_homography_matrix(results)" }, { "identifier": "Resize", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class Resize(MMCV_Resize):\n \"\"\"Resize images & bbox & seg.\n\n This transform resizes the input image according to ``scale`` or\n ``scale_factor``. Bboxes, masks, and seg map are then resized\n with the same scale factor.\n if ``scale`` and ``scale_factor`` are both set, it will use ``scale`` to\n resize.\n\n Required Keys:\n\n - img\n - gt_bboxes (BaseBoxes[torch.float32]) (optional)\n - gt_masks (BitmapMasks | PolygonMasks) (optional)\n - gt_seg_map (np.uint8) (optional)\n\n Modified Keys:\n\n - img\n - img_shape\n - gt_bboxes\n - gt_masks\n - gt_seg_map\n\n\n Added Keys:\n\n - scale\n - scale_factor\n - keep_ratio\n - homography_matrix\n\n Args:\n scale (int or tuple): Images scales for resizing. Defaults to None\n scale_factor (float or tuple[float]): Scale factors for resizing.\n Defaults to None.\n keep_ratio (bool): Whether to keep the aspect ratio when resizing the\n image. Defaults to False.\n clip_object_border (bool): Whether to clip the objects\n outside the border of the image. In some dataset like MOT17, the gt\n bboxes are allowed to cross the border of images. Therefore, we\n don't need to clip the gt bboxes in these cases. Defaults to True.\n backend (str): Image resize backend, choices are 'cv2' and 'pillow'.\n These two backends generates slightly different results. Defaults\n to 'cv2'.\n interpolation (str): Interpolation method, accepted values are\n \"nearest\", \"bilinear\", \"bicubic\", \"area\", \"lanczos\" for 'cv2'\n backend, \"nearest\", \"bilinear\" for 'pillow' backend. Defaults\n to 'bilinear'.\n \"\"\"\n\n def _resize_masks(self, results: dict) -> None:\n \"\"\"Resize masks with ``results['scale']``\"\"\"\n if results.get('gt_masks', None) is not None:\n if self.keep_ratio:\n results['gt_masks'] = results['gt_masks'].rescale(\n results['scale'])\n else:\n results['gt_masks'] = results['gt_masks'].resize(\n results['img_shape'])\n\n def _resize_bboxes(self, results: dict) -> None:\n \"\"\"Resize bounding boxes with ``results['scale_factor']``.\"\"\"\n if results.get('gt_bboxes', None) is not None:\n results['gt_bboxes'].rescale_(results['scale_factor'])\n if self.clip_object_border:\n results['gt_bboxes'].clip_(results['img_shape'])\n\n def _record_homography_matrix(self, results: dict) -> None:\n \"\"\"Record the homography matrix for the Resize.\"\"\"\n w_scale, h_scale = results['scale_factor']\n homography_matrix = np.array(\n [[w_scale, 0, 0], [0, h_scale, 0], [0, 0, 1]], dtype=np.float32)\n if results.get('homography_matrix', None) is None:\n results['homography_matrix'] = homography_matrix\n else:\n results['homography_matrix'] = homography_matrix @ results[\n 'homography_matrix']\n\n @autocast_box_type()\n def transform(self, results: dict) -> dict:\n \"\"\"Transform function to resize images, bounding boxes and semantic\n segmentation map.\n\n Args:\n results (dict): Result dict from loading pipeline.\n Returns:\n dict: Resized results, 'img', 'gt_bboxes', 'gt_seg_map',\n 'scale', 'scale_factor', 'height', 'width', and 'keep_ratio' keys\n are updated in result dict.\n \"\"\"\n if self.scale:\n results['scale'] = self.scale\n else:\n img_shape = results['img'].shape[:2]\n results['scale'] = _scale_size(img_shape[::-1], self.scale_factor)\n self._resize_img(results)\n self._resize_bboxes(results)\n self._resize_masks(results)\n self._resize_seg(results)\n self._record_homography_matrix(results)\n return results\n\n def __repr__(self) -> str:\n repr_str = self.__class__.__name__\n repr_str += f'(scale={self.scale}, '\n repr_str += f'scale_factor={self.scale_factor}, '\n repr_str += f'keep_ratio={self.keep_ratio}, '\n repr_str += f'clip_object_border={self.clip_object_border}), '\n repr_str += f'backend={self.backend}), '\n repr_str += f'interpolation={self.interpolation})'\n return repr_str" }, { "identifier": "YOLOXHSVRandomAug", "path": "mmdet/datasets/transforms/transforms.py", "snippet": "class YOLOXHSVRandomAug(BaseTransform):\n \"\"\"Apply HSV augmentation to image sequentially. It is referenced from\n https://github.com/Megvii-\n BaseDetection/YOLOX/blob/main/yolox/data/data_augment.py#L21.\n\n Required Keys:\n\n - img\n\n Modified Keys:\n\n - img\n\n Args:\n hue_delta (int): delta of hue. Defaults to 5.\n saturation_delta (int): delta of saturation. Defaults to 30.\n value_delta (int): delat of value. Defaults to 30.\n \"\"\"\n\n def __init__(self,\n hue_delta: int = 5,\n saturation_delta: int = 30,\n value_delta: int = 30) -> None:\n self.hue_delta = hue_delta\n self.saturation_delta = saturation_delta\n self.value_delta = value_delta\n\n @cache_randomness\n def _get_hsv_gains(self):\n hsv_gains = np.random.uniform(-1, 1, 3) * [\n self.hue_delta, self.saturation_delta, self.value_delta\n ]\n # random selection of h, s, v\n hsv_gains *= np.random.randint(0, 2, 3)\n # prevent overflow\n hsv_gains = hsv_gains.astype(np.int16)\n return hsv_gains\n\n def transform(self, results: dict) -> dict:\n img = results['img']\n hsv_gains = self._get_hsv_gains()\n img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.int16)\n\n img_hsv[..., 0] = (img_hsv[..., 0] + hsv_gains[0]) % 180\n img_hsv[..., 1] = np.clip(img_hsv[..., 1] + hsv_gains[1], 0, 255)\n img_hsv[..., 2] = np.clip(img_hsv[..., 2] + hsv_gains[2], 0, 255)\n cv2.cvtColor(img_hsv.astype(img.dtype), cv2.COLOR_HSV2BGR, dst=img)\n\n results['img'] = img\n return results\n\n def __repr__(self):\n repr_str = self.__class__.__name__\n repr_str += f'(hue_delta={self.hue_delta}, '\n repr_str += f'saturation_delta={self.saturation_delta}, '\n repr_str += f'value_delta={self.value_delta})'\n return repr_str" } ]
from mmengine.config import read_base from .rtmdet_ins_s_8xb32_300e_coco import * from mmcv.transforms.loading import LoadImageFromFile from mmcv.transforms.processing import RandomResize from mmdet.datasets.transforms.formatting import PackDetInputs from mmdet.datasets.transforms.loading import (FilterAnnotations, LoadAnnotations) from mmdet.datasets.transforms.transforms import (CachedMixUp, CachedMosaic, Pad, RandomCrop, RandomFlip, Resize, YOLOXHSVRandomAug)
17,231
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.167, widen_factor=0.375, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1), bbox_head=dict(in_channels=96, feat_channels=96))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False), dict( type=CachedMosaic, img_scale=(640, 640), pad_val=114.0, max_cached_images=20, random_pop=False), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0), resize_type=Resize, keep_ratio=True),
# Copyright (c) OpenMMLab. All rights reserved. # Please refer to https://mmengine.readthedocs.io/en/latest/advanced_tutorials/config.html#a-pure-python-style-configuration-file-beta for more details. # noqa # mmcv >= 2.0.1 # mmengine >= 0.8.0 with read_base(): checkpoint = 'https://download.openmmlab.com/mmdetection/v3.0/rtmdet/cspnext_rsb_pretrain/cspnext-tiny_imagenet_600e.pth' # noqa model.update( dict( backbone=dict( deepen_factor=0.167, widen_factor=0.375, init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint)), neck=dict( in_channels=[96, 192, 384], out_channels=96, num_csp_blocks=1), bbox_head=dict(in_channels=96, feat_channels=96))) train_pipeline = [ dict(type=LoadImageFromFile, backend_args=backend_args), dict( type=LoadAnnotations, with_bbox=True, with_mask=True, poly2mask=False), dict( type=CachedMosaic, img_scale=(640, 640), pad_val=114.0, max_cached_images=20, random_pop=False), dict( type=RandomResize, scale=(1280, 1280), ratio_range=(0.5, 2.0), resize_type=Resize, keep_ratio=True),
dict(type=RandomCrop, crop_size=(640, 640)),
6
2023-11-30 08:58:00+00:00
24k
SEU-ProactiveSecurity-Group/MalPurifier
examples/amd_icnn_test.py
[ { "identifier": "Dataset", "path": "core/defense/dataset.py", "snippet": "class Dataset(torch.utils.data.Dataset):\n def __init__(self, seed=0, device='cuda', feature_ext_args=None):\n \"\"\"\n 为机器学习模型学习构建数据集。\n \n :param seed: 随机种子\n :param device: 设备类型,'cuda' 或 'cpu'\n :param feature_ext_args: 提取特征的参数\n \"\"\"\n \n # 设置随机种子,并确保随机性在不同库之间是一致的\n self.seed = seed\n random.seed(self.seed)\n np.random.seed(self.seed)\n torch.manual_seed(self.seed)\n \n # 设置PyTorch的默认数据类型为float32\n torch.set_default_dtype(torch.float32)\n \n # 初始化简化类的临时数据存储\n self.temp_data = utils.SimplifyClass(Manager())\n \n # 设定使用的设备\n self.device = device\n\n # 根据提供的参数初始化特征提取器\n self.feature_ext_args = feature_ext_args\n if feature_ext_args is None:\n self.feature_extractor = Apk2features(config.get('metadata', 'naive_data_pool'),\n config.get('dataset', 'intermediate'))\n else:\n assert isinstance(feature_ext_args, dict)\n self.feature_extractor = Apk2features(config.get('metadata', 'naive_data_pool'),\n config.get('dataset', 'intermediate'),\n **feature_ext_args)\n\n # 分割数据集为训练、验证和测试集\n data_saving_path = os.path.join(config.get('dataset', 'intermediate'), 'dataset.idx')\n \n # 检查是否已保存了分割数据,且不需要更新\n if os.path.exists(data_saving_path) and (not self.feature_extractor.update):\n (self.train_dataset, self.validation_dataset, self.test_dataset) = utils.read_pickle(data_saving_path)\n\n # # 计算良性和恶意apk的数量\n # benign_train = np.sum(self.train_dataset[1] == 0)\n # malicious_train = np.sum(self.train_dataset[1] == 1)\n\n # benign_val = np.sum(self.validation_dataset[1] == 0)\n # malicious_val = np.sum(self.validation_dataset[1] == 1)\n\n # benign_test = np.sum(self.test_dataset[1] == 0)\n # malicious_test = np.sum(self.test_dataset[1] == 1)\n\n # # 打印数据量\n # total_data = len(self.train_dataset[0]) + len(self.validation_dataset[0]) + len(self.test_dataset[0])\n # print(f\"总数据量: {total_data}\")\n # print(f\"训练数据量: {len(self.train_dataset[0])} (良性: {benign_train}, 恶意: {malicious_train})\")\n # print(f\"验证数据量: {len(self.validation_dataset[0])} (良性: {benign_val}, 恶意: {malicious_val})\")\n # print(f\"测试数据量: {len(self.test_dataset[0])} (良性: {benign_test}, 恶意: {malicious_test})\")\n\n # 更新数据路径\n def path_tran(data_paths):\n return np.array(\n [os.path.join(config.get('metadata', 'naive_data_pool'),\n os.path.splitext(os.path.basename(name))[0] + self.feature_extractor.file_ext) for \n name in data_paths])\n\n self.train_dataset = (path_tran(self.train_dataset[0]), self.train_dataset[1])\n self.validation_dataset = (path_tran(self.validation_dataset[0]), self.validation_dataset[1])\n self.test_dataset = (path_tran(self.test_dataset[0]), self.test_dataset[1])\n else:\n # 预处理恶意软件和良性软件的APK文件,并获取其特征路径\n mal_feature_paths = self.apk_preprocess(config.get('dataset', 'malware_dir'))\n ben_feature_paths = self.apk_preprocess(config.get('dataset', 'benware_dir'))\n feature_paths = mal_feature_paths + ben_feature_paths\n \n # 根据恶意软件和良性软件的数量生成标签\n gt_labels = np.zeros((len(mal_feature_paths) + len(ben_feature_paths)), dtype=np.int32)\n gt_labels[:len(mal_feature_paths)] = 1\n \n # 根据特征路径和标签分割数据\n self.train_dataset, self.validation_dataset, self.test_dataset = self.data_split(feature_paths, gt_labels)\n \n # 保存分割后的数据\n utils.dump_pickle((self.train_dataset, self.validation_dataset, self.test_dataset), data_saving_path)\n\n # 获取特征词汇表和大小\n self.vocab, _1, _2 = self.feature_extractor.get_vocab(*self.train_dataset)\n self.vocab_size = len(self.vocab)\n \n # 获取非API的数量\n self.non_api_size = self.feature_extractor.get_non_api_size(self.vocab)\n \n # 获取类别数量\n self.n_classes = np.unique(self.train_dataset[1]).size\n\n\n def data_split(self, feature_paths, labels):\n \"\"\"\n 将数据分为训练、验证和测试集。\n\n :param feature_paths: 特征文件的路径列表。\n :param labels: 对应的标签列表。\n :return: (训练数据, 训练标签), (验证数据, 验证标签), (测试数据, 测试标签)\n \"\"\"\n \n # 确保特征文件路径数量与标签数量相同\n assert len(feature_paths) == len(labels)\n \n # 初始化训练、验证和测试集的文件名列表为None\n train_dn, validation_dn, test_dn = None, None, None\n \n # 定义数据集切分文件的路径\n data_split_path = os.path.join(config.get('dataset', 'dataset_dir'), 'tr_te_va_split.name')\n \n # 检查数据切分文件是否存在\n if os.path.exists(data_split_path):\n train_dn, val_dn, test_dn = utils.read_pickle(data_split_path)\n\n # 如果任何文件名列表为空\n if (train_dn is None) or (validation_dn is None) or (test_dn is None):\n # 从特征文件路径中提取文件名\n data_names = [os.path.splitext(os.path.basename(path))[0] for path in feature_paths]\n \n # 分割数据为训练和测试集,20%为测试集\n train_dn, test_dn = train_test_split(data_names, test_size=0.2, random_state=self.seed, shuffle=True)\n \n # 从训练集中进一步分割出验证集,25%为验证集\n train_dn, validation_dn = train_test_split(train_dn, test_size=0.25, random_state=self.seed, shuffle=True)\n \n # 将切分结果保存为pickle文件\n utils.dump_pickle((train_dn, validation_dn, test_dn), path=data_split_path)\n\n # 根据提供的文件名列表查询路径\n def query_path(_data_names):\n return np.array(\n [path for path in feature_paths if os.path.splitext(os.path.basename(path))[0] in _data_names])\n\n # 根据提供的文件名列表查询对应的指示器(布尔列表)\n def query_indicator(_data_names):\n return [True if os.path.splitext(os.path.basename(path))[0] in _data_names else False for path in\n feature_paths]\n\n # 查询训练、验证和测试数据的路径\n train_data = query_path(train_dn)\n val_data = query_path(validation_dn)\n test_data = query_path(test_dn)\n \n # 为确保数据与标签一致,随机打乱训练数据和标签\n random.seed(self.seed)\n random.shuffle(train_data)\n train_y = labels[query_indicator(train_dn)]\n random.seed(self.seed)\n random.shuffle(train_y)\n \n # 查询训练、验证和测试数据的标签\n val_y = labels[query_indicator(validation_dn)]\n test_y = labels[query_indicator(test_dn)]\n \n # 返回切分的数据和标签\n return (train_data, train_y), (val_data, val_y), (test_data, test_y)\n\n\n def apk_preprocess(self, apk_paths, labels=None, update_feature_extraction=False):\n \"\"\"\n APK 文件的预处理。\n \n :param apk_paths: APK文件路径列表。\n :param labels: APK文件对应的标签列表,可以为None。\n :param update_feature_extraction: 是否更新特征提取器的状态。\n :return: 处理后的特征路径,和可选的标签。\n \"\"\"\n \n # 保存特征提取器的当前更新状态\n old_status = self.feature_extractor.update\n \n # 将特征提取器的更新状态设置为提供的参数值\n self.feature_extractor.update = update_feature_extraction\n \n # 如果没有提供标签\n if labels is None:\n # 使用特征提取器从apk_paths中提取特征\n feature_paths = self.feature_extractor.feature_extraction(apk_paths)\n \n # 恢复特征提取器的原始状态\n self.feature_extractor.update = old_status\n \n # 返回特征路径\n return feature_paths\n else:\n # 确保apk文件的数量与标签的数量相匹配\n assert len(apk_paths) == len(labels), \\\n '不匹配的数据形状 {} vs. {}'.format(len(apk_paths), len(labels))\n \n # 使用特征提取器从apk_paths中提取特征\n feature_paths = self.feature_extractor.feature_extraction(apk_paths)\n \n labels_ = []\n for i, feature_path in enumerate(feature_paths):\n # 获取不带扩展名的文件名\n fname = os.path.splitext(os.path.basename(feature_path))[0]\n \n # 确保当前文件名在对应的apk路径中\n if fname in apk_paths[i]:\n # 添加对应的标签到labels_列表中\n labels_.append(labels[i])\n \n # 恢复特征提取器的原始状态\n self.feature_extractor.update = old_status\n \n # 返回特征路径和对应的标签\n return feature_paths, np.array(labels_)\n\n\n def feature_preprocess(self, feature_paths):\n raise NotImplementedError\n # self.feature_extractor.update_cg(feature_paths)\n\n\n def feature_api_rpst_sum(self, api_feat_representation_list):\n \"\"\"\n 对API表示进行求和\n :param api_feat_representation_list: 一个稀疏矩阵列表\n \"\"\"\n \n # 确保输入是一个列表\n assert isinstance(api_feat_representation_list, list), \"期望输入是一个列表。\"\n \n # 如果列表不为空\n if len(api_feat_representation_list) > 0:\n # 确保列表中的第一个元素是 csr_matrix 类型的稀疏矩阵\n assert isinstance(api_feat_representation_list[0], csr_matrix)\n else:\n # 如果列表为空,则返回一个全为0的矩阵\n return np.zeros(shape=(self.vocab_size - self.non_api_size, self.vocab_size - self.non_api_size),\n dtype=np.float)\n \n # 将第一个稀疏矩阵转为密集型矩阵,并转换为浮点类型\n adj_array = np.asarray(api_feat_representation_list[0].todense()).astype(np.float32)\n \n # 遍历列表中的其余稀疏矩阵\n for sparse_mat in api_feat_representation_list[1:]:\n # 将稀疏矩阵转为密集型矩阵,转换为浮点类型,并与之前的结果进行相加\n adj_array += np.asarray(sparse_mat.todense()).astype(np.float32)\n \n # 将最终结果中的所有值限制在[0,1]之间\n return np.clip(adj_array, a_min=0, a_max=1)\n\n\n def get_numerical_input(self, feature_path, label):\n \"\"\"\n loading features for given a feature path\n # results:\n # --->> mapping feature path to numerical representations\n # --->> features: 1d array, and a list of sparse matrices\n # --->> label: scalar\n \"\"\"\n feature_vector, label = self.feature_extractor.feature2ipt(feature_path, label,\n self.vocab,\n None)\n return feature_vector, label\n\n\n def get_input_producer(self, feature_paths, y, batch_size, name='train', use_cache=False):\n \"\"\"\n 获取输入生产器,返回一个 DataLoader 对象。\n \n :param feature_paths: 特征路径列表。\n :param y: 标签。\n :param batch_size: 每个批次的数据数量。\n :param name: 使用场景名称,默认为'train'。\n :param use_cache: 是否使用缓存,默认为False。\n :return: 返回一个 DataLoader 对象。\n \"\"\"\n \n # 定义 DataLoader 的参数\n params = {\n 'batch_size': batch_size,\n 'num_workers': self.feature_ext_args['proc_number'],\n 'shuffle': False\n }\n \n # 如果是训练过程,则使用用户设定的缓存值;否则,不使用缓存\n use_cache = use_cache if name == 'train' else False\n \n # 创建 DataLoader,它会使用自定义的 DatasetTorch 数据集对象\n # worker_init_fn 参数用于为每个工作线程设定一个随机种子,确保数据的打乱是随机的\n return torch.utils.data.DataLoader(\n DatasetTorch(feature_paths, y, self, name=name, use_cache=use_cache),\n worker_init_fn=lambda x: np.random.seed(torch.randint(0, 2**31, [1,])[0] + x),\n **params\n )\n\n\n def clear_up(self):\n self.temp_data.reset()\n\n @staticmethod\n def get_modification(adv_x, x, idx, sp=True):\n # 确认adv_x和x是numpy.ndarray类型或torch.Tensor类型的实例\n assert isinstance(adv_x, (np.ndarray, torch.Tensor))\n assert isinstance(x, (np.ndarray, torch.Tensor))\n \n # 计算对抗样本和原始样本之间的差异\n x_mod = adv_x - x\n \n # 根据索引idx选择对应的元素\n if isinstance(x_mod, np.ndarray):\n x_mod = np.array([x_mod[i, idx[i]] for i in range(x.shape[0])])\n else:\n x_mod = torch.stack([x_mod[i, idx[i]] for i in range(x.shape[0])])\n \n # 判断是否需要转为稀疏表示\n if sp:\n # 如果x_mod是torch.Tensor,那么将其转换为稀疏表示并移到cpu上\n # 如果x_mod是numpy.ndarray,那么先将其转换为torch.Tensor,然后转换为稀疏表示并移到cpu上\n if isinstance(x_mod, torch.Tensor):\n return x_mod.to_sparse().cpu().unbind(dim=0)\n else:\n return torch.tensor(x_mod, dtype=torch.int).to_sparse().cpu().unbind(dim=0)\n else:\n # 如果不需要转为稀疏表示,那么直接将其移到cpu上或者分割为numpy数组\n if isinstance(x_mod, torch.Tensor):\n return x_mod.cpu().unbind(dim=0)\n else:\n return np.split(x_mod, x_mod.shape[0], axis=0)\n\n\n @staticmethod\n def modification_integ(x_mod_integrated, x_mod):\n # 确认x_mod_integrated和x_mod是列表类型的实例\n assert isinstance(x_mod_integrated, list) and isinstance(x_mod, list)\n \n # 如果x_mod_integrated为空列表,则返回x_mod\n if len(x_mod_integrated) == 0:\n return x_mod\n \n # 确认x_mod_integrated和x_mod的长度相同\n assert len(x_mod_integrated) == len(x_mod)\n \n # 遍历x_mod和x_mod_integrated中的每个元素\n for i in range(len(x_mod)):\n # 确认当前x_mod中的元素不在GPU上,\n # 因为在GPU上的Tensor进行list相加操作的时候是列表拼接,而在CPU上则是张量之间的加法\n assert not x_mod[i].is_cuda\n \n # 更新x_mod_integrated中的元素\n x_mod_integrated[i] += x_mod[i]\n \n # 返回更新后的x_mod_integrated\n return x_mod_integrated" }, { "identifier": "MalwareDetectionDNN", "path": "core/defense/md_dnn.py", "snippet": "class MalwareDetectionDNN(nn.Module):\n def __init__(self, input_size, n_classes, device='cpu', name='DNN', **kwargs):\n \"\"\"\n 初始化恶意软件检测器\n\n 参数:\n ----------\n @param input_size: 整数,输入向量的维度数量。\n @param n_classes: 整数,表示分类的数量,例如二分类问题中n=2。\n @param device: 字符串,可以是'cpu'或'cuda',表示模型应该在CPU还是GPU上运行。\n @param name: 字符串,用于命名模型。\n \"\"\"\n super(MalwareDetectionDNN, self).__init__() # 调用父类初始化\n self.input_size = input_size # 定义输入尺寸\n self.n_classes = n_classes # 定义分类数量\n self.device = device # 定义运行设备\n self.name = name # 定义模型名称\n\n self.parse_args(**kwargs) # 解析额外参数\n\n self.dense_layers = [] # 初始化一个空的密集层列表\n \n # 检查是否至少有一个隐藏层\n if len(self.dense_hidden_units) >= 1:\n # 添加第一个密集层\n self.dense_layers.append(nn.Linear(self.input_size, self.dense_hidden_units[0]))\n else:\n # 如果没有隐藏层,抛出异常\n raise ValueError(\"Expect at least one hidden layer.\")\n\n # 为每一对连续的隐藏单元添加一个密集层\n for i in range(len(self.dense_hidden_units[0:-1])):\n self.dense_layers.append(nn.Linear(self.dense_hidden_units[i], \n self.dense_hidden_units[i + 1]))\n \n # 添加最后一个连接到输出层的密集层\n self.dense_layers.append(nn.Linear(self.dense_hidden_units[-1], self.n_classes))\n \n # 将密集层添加到模型中以进行跟踪\n for idx_i, dense_layer in enumerate(self.dense_layers):\n self.add_module('nn_model_layer_{}'.format(idx_i), dense_layer)\n\n # 根据参数选择使用SELU或ReLU激活函数\n if self.smooth:\n self.activation_func = F.selu # 使用SELU激活函数\n else:\n self.activation_func = F.relu # 使用ReLU激活函数\n\n # 定义模型的保存路径\n self.model_save_path = path.join(config.get('experiments', 'md_dnn') + '_' + self.name,\n 'model.pth')\n \n # 日志中打印模型的结构信息\n logger.info('========================================dnn model architecture===============================')\n logger.info(self)\n logger.info('===============================================end==========================================')\n\n\n def parse_args(self,\n dense_hidden_units=None,\n dropout=0.6,\n alpha_=0.2,\n smooth=False,\n **kwargs\n ):\n \"\"\"\n 解析并设置网络的超参数。\n\n 参数:\n ----------\n dense_hidden_units : list, 可选\n 网络中每个隐藏层的单元数。如果没有指定,则默认为两个隐藏层,每层200个单元。\n dropout : float, 可选\n dropout正则化的比率,默认为0.6。\n alpha_ : float, 可选\n 某些激活函数的参数,默认为0.2。\n smooth : bool, 可选\n 是否使用平滑的激活函数,默认为False。\n **kwargs : dict\n 其他超参数。\n \"\"\"\n\n # 如果用户没有指定隐藏层,使用默认的配置\n if dense_hidden_units is None:\n self.dense_hidden_units = [200, 200]\n # 如果用户指定了一个列表,使用它\n elif isinstance(dense_hidden_units, list):\n self.dense_hidden_units = dense_hidden_units\n # 否则抛出一个异常\n else:\n raise TypeError(\"Expect a list of hidden units.\")\n\n # 设置dropout, alpha和smooth参数\n self.dropout = dropout\n self.alpha_ = alpha_\n self.smooth = smooth\n\n # 从kwargs中获取并设置proc_number\n self.proc_number = kwargs.get('proc_number', None) # 如果不存在,则返回None\n\n # 如果还有其他参数,记录警告,因为这些参数可能是未知的\n if len(kwargs) > 0:\n logger.warning(\"Unknown hyper-parameters {}\".format(str(kwargs)))\n\n\n def forward(self, x):\n \"\"\"\n 使输入数据 x 通过神经网络\n \n 参数\n ----------\n @param x: 2D张量,特征表示\n \"\"\"\n # 遍历神经网络的每一层,除了最后一层\n for dense_layer in self.dense_layers[:-1]:\n x = self.activation_func(dense_layer(x)) # 使用激活函数处理每一层的输出\n\n # 对处理过的数据进行 dropout 操作,用于防止过拟合\n latent_representation = F.dropout(x, self.dropout, training=self.training)\n \n # 用最后一层进行处理,得到logits(未归一化的预测或分类得分)\n logits = self.dense_layers[-1](latent_representation)\n return logits\n\n def inference(self, test_data_producer):\n \"\"\"\n 进行模型推理,获得预测的置信度和真实标签\n \n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n \n 返回值\n ----------\n 返回预测的置信度和真实标签\n \"\"\"\n confidences = [] # 存储每批数据的预测置信度\n gt_labels = [] # 存储每批数据的真实标签\n self.eval() # 设置模型为评估模式\n\n # 使用torch.no_grad()来告诉PyTorch不要在推理过程中计算梯度\n with torch.no_grad():\n # 遍历每一批测试数据\n for x, y in test_data_producer:\n # 将数据转移到指定的设备(CPU或GPU)并调整数据类型\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 得到每一批数据的logits\n logits = self.forward(x)\n # 使用softmax函数得到每一批数据的置信度,并将其添加到confidences列表中\n confidences.append(F.softmax(logits, dim=-1))\n # 将每一批数据的真实标签添加到gt_labels列表中\n gt_labels.append(y)\n\n # 将所有批次的置信度垂直堆叠成一个张量\n confidences = torch.vstack(confidences)\n # 将所有批次的真实标签连接成一个张量\n gt_labels = torch.cat(gt_labels, dim=0)\n \n return confidences, gt_labels\n\n def inference_dae(self, test_data_producer):\n \"\"\"\n 进行模型推理,获得预测的置信度和真实标签\n \n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n \n 返回值\n ----------\n 返回预测的置信度和真实标签\n \"\"\"\n confidences = [] # 存储每批数据的预测置信度\n gt_labels = [] # 存储每批数据的真实标签\n self.eval() # 设置模型为评估模式\n\n # 使用torch.no_grad()来告诉PyTorch不要在推理过程中计算梯度\n with torch.no_grad():\n # 遍历每一批测试数据\n for x, y in test_data_producer:\n # 将数据转移到指定的设备(CPU或GPU)并调整数据类型\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 得到每一批数据的logits\n logits = self.forward(x)\n # 使用softmax函数得到每一批数据的置信度,并将其添加到confidences列表中\n confidences.append(F.softmax(logits, dim=-1))\n # 将每一批数据的真实标签添加到gt_labels列表中\n gt_labels.append(y)\n \n return confidences, gt_labels\n\n\n def get_important_attributes(self, test_data_producer, target_label=1):\n \"\"\"\n 使用集成梯度(Integrated Gradients)方法获取重要的属性/特征\n\n 参数\n ----------\n @param test_data_producer: 数据生产者或数据加载器,用于产生测试数据\n @param target_label: 目标标签,默认为1\n \n 返回值\n ----------\n 返回重要的属性/特征\n \"\"\"\n attributions = [] # 存储属性或特征的重要性得分\n gt_labels = [] # 存储真实标签\n\n # 定义一个使用集成梯度方法的包装器\n def _ig_wrapper(_x):\n logits = self.forward(_x)\n return F.softmax(logits, dim=-1)\n\n # 初始化集成梯度对象\n ig = IntegratedGradients(_ig_wrapper)\n\n # 遍历测试数据集\n for i, (x, y) in enumerate(test_data_producer):\n # 将数据和标签转移到指定的设备上\n x, y = utils.to_device(x.double(), y.long(), self.device)\n # 使x能够计算梯度\n x.requires_grad = True\n # 定义基线,用于集成梯度的计算\n baseline = torch.zeros_like(x, dtype=torch.double, device=self.device)\n # 计算属性的重要性\n attribution_bs = ig.attribute(x,\n baselines=baseline,\n target=target_label)\n # 将所有批次的属性垂直堆叠\n attribution = torch.hstack(attribution_bs)\n # 保存得到的属性重要性得分和真实标签\n attributions.append(attribution.clone().detach().cpu().numpy())\n gt_labels.append(y.clone().detach().cpu().numpy())\n # 将真实标签保存为.npy文件\n np.save('./labels', np.concatenate(gt_labels))\n \n return np.vstack(attributions)\n\n\n def inference_batch_wise(self, x):\n \"\"\"\n 仅支持恶意软件样本的批量推理\n \n 参数\n ----------\n @param x: 输入数据的张量\n \n 返回值\n ----------\n 返回推理的置信度和标签\n \"\"\"\n # 确保输入是一个张量\n assert isinstance(x, torch.Tensor)\n \n # 获得模型的输出\n logit = self.forward(x)\n \n # 返回每个样本的置信度和一个与logit形状相同的全1数组(表示恶意软件样本)\n return torch.softmax(logit, dim=-1).detach().cpu().numpy(), np.ones((logit.size()[0],))\n\n\n def predict(self, test_data_producer, indicator_masking=True):\n \"\"\"\n 预测标签并进行评估\n\n 参数\n --------\n @param test_data_producer: torch.DataLoader, 用于生成测试数据的数据加载器\n \"\"\"\n # 进行评估\n confidence, y_true = self.inference(test_data_producer)\n y_pred = confidence.argmax(1).cpu().numpy() # 预测标签\n y_true = y_true.cpu().numpy() # 真实标签\n \n # print(\"y_true.shape:\", y_true.shape)\n # print(\"y_pred.shape:\", y_pred.shape)\n \n # 使用sklearn的评估指标进行评估\n from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, balanced_accuracy_score\n accuracy = accuracy_score(y_true, y_pred)\n b_accuracy = balanced_accuracy_score(y_true, y_pred)\n \n MSG = \"The accuracy on the test dataset is {:.5f}%\"\n logger.info(MSG.format(accuracy * 100))\n \n MSG = \"The balanced accuracy on the test dataset is {:.5f}%\"\n logger.info(MSG.format(b_accuracy * 100))\n\n # 检查数据中是否存在缺失的类别\n if np.any([np.all(y_true == i) for i in range(self.n_classes)]):\n logger.warning(\"class absent.\")\n return\n\n # 计算混淆矩阵\n tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()\n fpr = fp / float(tn + fp) # 计算假阳性率\n fnr = fn / float(tp + fn) # 计算假阴性率\n f1 = f1_score(y_true, y_pred, average='binary') # 计算F1分数\n\n print(\"Other evaluation metrics we may need:\")\n MSG = \"False Negative Rate (FNR) is {:.5f}%、False Positive Rate (FPR) is {:.5f}%, F1 score is {:.5f}%\"\n logger.info(MSG.format(fnr * 100, fpr * 100, f1 * 100))\n\n\n def customize_loss(self, logits, gt_labels, representation=None, mini_batch_idx=None):\n \"\"\"\n 自定义损失函数\n\n 参数\n --------\n @param logits: Tensor, 模型的输出\n @param gt_labels: Tensor, 真实的标签\n @param representation: Tensor, 可选参数,表示特征表示\n @param mini_batch_idx: Int, 可选参数,表示小批次的索引\n \n 返回值\n --------\n 返回交叉熵损失\n \"\"\"\n return F.cross_entropy(logits, gt_labels)\n\n\n def fit(self, train_data_producer, validation_data_producer, epochs=100, lr=0.005, weight_decay=0., weight_sampling=0.5, verbose=True):\n \"\"\"\n 训练恶意软件检测器,根据验证集上的交叉熵损失选择最佳模型。\n\n 参数\n ----------\n @param train_data_producer: 对象, 用于生成一批训练数据的迭代器\n @param validation_data_producer: 对象, 用于生成验证数据的迭代器\n @param epochs: 整数, 训练的周期数\n @param lr: 浮点数, Adam优化器的学习率\n @param weight_decay: 浮点数, 惩罚因子\n @param verbose: 布尔值, 是否显示详细的日志\n \"\"\"\n # 初始化优化器\n optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay)\n best_avg_acc = 0. # 记录验证集上的最佳准确率\n best_epoch = 0 # 记录最佳准确率对应的周期\n total_time = 0. # 总的训练时间\n\n # 获取训练数据批次的数量\n nbatches = len(train_data_producer)\n \n # 进行指定次数的训练周期\n for i in range(epochs):\n # 设置模型为训练模式\n self.train()\n # 初始化列表用于保存每批数据的损失值和准确率\n losses, accuracies = [], []\n\n # 对每个训练数据批次进行遍历\n for idx_batch, (x_train, y_train) in enumerate(train_data_producer):\n # 将数据转移到指定的计算设备(例如GPU或CPU)\n x_train, y_train = utils.to_device(x_train.double(), y_train.long(), self.device)\n\n # 记录开始训练的时间\n start_time = time.time()\n\n # 清空之前累积的梯度\n optimizer.zero_grad() \n \n # 对输入数据进行前向传播\n logits = self.forward(x_train) \n \n # 根据模型的输出和真实标签计算损失\n loss_train = self.customize_loss(logits, y_train) \n\n # 对损失进行反向传播\n loss_train.backward()\n \n # 使用优化器更新模型参数\n optimizer.step()\n\n # 计算训练这批数据所花费的总时间\n total_time += time.time() - start_time\n \n # 计算这批数据上的准确率\n acc_train = (logits.argmax(1) == y_train).sum().item() / x_train.size()[0]\n \n # 将时间转换为分钟和秒\n mins, secs = int(total_time / 60), int(total_time % 60)\n \n # 将这批数据的损失和准确率加入到列表中\n losses.append(loss_train.item())\n accuracies.append(acc_train)\n\n # 如果开启了详细输出模式,显示当前训练进度和这批数据上的损失和准确率\n if verbose:\n logger.info(f'小批次: {i * nbatches + idx_batch + 1}/{epochs * nbatches} | 训练时间为 {mins:.0f} 分钟, {secs} 秒。')\n logger.info(f'训练损失(小批次级别): {losses[-1]:.4f} | 训练精度: {acc_train * 100:.2f}')\n\n\n self.eval() # 将模型设置为评估模式\n avg_acc_val = []\n\n with torch.no_grad(): # 确保在评估模式下不进行梯度的计算\n for x_val, y_val in validation_data_producer:\n # 将数据移动到指定设备(例如GPU或CPU)上,并确保数据的类型为双精度浮点数和长整型\n x_val, y_val = utils.to_device(x_val.double(), y_val.long(), self.device)\n \n # 使用模型进行前向传播,得到输出结果\n logits = self.forward(x_val)\n \n # 计算验证数据上的准确率\n acc_val = (logits.argmax(1) == y_val).sum().item() / x_val.size()[0]\n \n # 保存每一批验证数据的准确率\n avg_acc_val.append(acc_val)\n \n # 计算所有验证数据的平均准确率\n avg_acc_val = np.mean(avg_acc_val)\n\n # 如果当前周期的验证精度超过之前的最佳验证精度\n if avg_acc_val >= best_avg_acc:\n # 更新最佳验证精度\n best_avg_acc = avg_acc_val\n best_epoch = i\n \n # 检查模型保存路径是否存在,如果不存在,则创建\n if not path.exists(self.model_save_path):\n utils.mkdir(path.dirname(self.model_save_path))\n \n # 保存当前的模型参数\n torch.save(self.state_dict(), self.model_save_path)\n \n # 如果开启了详细输出模式,显示模型保存路径\n if verbose:\n print(f'模型保存在路径: {self.model_save_path}')\n\n # 如果开启了详细输出模式,显示训练损失、训练精度、验证精度和最佳验证精度\n if verbose:\n logger.info(f'训练损失(周期级别): {np.mean(losses):.4f} | 训练精度: {np.mean(accuracies) * 100:.2f}')\n logger.info(f'验证精度: {avg_acc_val * 100:.2f} | 最佳验证精度: {best_avg_acc * 100:.2f} 在第 {best_epoch} 个周期')\n\n def load(self):\n \"\"\"\n 从磁盘加载模型参数\n \"\"\"\n self.load_state_dict(torch.load(self.model_save_path))" }, { "identifier": "AdvMalwareDetectorICNN", "path": "core/defense/amd_icnn.py", "snippet": "class AdvMalwareDetectorICNN(nn.Module, DetectorTemplate):\n # 初始化函数\n def __init__(self, md_nn_model, input_size, n_classes, ratio=0.98,\n device='cpu', name='', **kwargs):\n # 调用父类的初始化函数\n nn.Module.__init__(self)\n DetectorTemplate.__init__(self)\n \n # 设置输入大小、类别数、比例、设备和名称等属性\n self.input_size = input_size\n self.n_classes = n_classes\n self.ratio = 0.98\n # print(\"self.ratio:\", self.ratio)\n self.device = device\n self.name = name\n self.parse_args(**kwargs)\n \n # 检查md_nn_model是否是nn.Module的实例\n if isinstance(md_nn_model, nn.Module):\n self.md_nn_model = md_nn_model\n else:\n kwargs['smooth'] = True\n # 如果不是,构建一个默认的DNN恶意软件检测模型\n self.md_nn_model = MalwareDetectionDNN(self.input_size,\n n_classes,\n self.device,\n name,\n **kwargs)\n # 警告使用者:使用了自定义的基于NN的恶意软件检测器\n warnings.warn(\"Use a self-defined NN-based malware detector\")\n \n # 检查模型是否有'smooth'属性\n if hasattr(self.md_nn_model, 'smooth'):\n # 如果模型不是平滑的,将ReLU替换为SELU\n if not self.md_nn_model.smooth:\n for name, child in self.md_nn_model.named_children():\n if isinstance(child, nn.ReLU):\n self.md_nn_model._modules['relu'] = nn.SELU()\n else:\n # 没有'smooth'属性的情况下,将ReLU替换为SELU\n for name, child in self.md_nn_model.named_children():\n if isinstance(child, nn.ReLU):\n self.md_nn_model._modules['relu'] = nn.SELU()\n \n # 将模型移动到指定的设备上\n self.md_nn_model = self.md_nn_model.to(self.device)\n\n # 输入凸神经网络\n self.non_neg_dense_layers = []\n \n # 至少需要一个隐藏层\n if len(self.dense_hidden_units) < 1:\n raise ValueError(\"Expect at least one hidden layer.\")\n \n # 创建非负的密集层\n for i in range(len(self.dense_hidden_units[0:-1])):\n self.non_neg_dense_layers.append(nn.Linear(self.dense_hidden_units[i],\n self.dense_hidden_units[i + 1],\n bias=False))\n self.non_neg_dense_layers.append(nn.Linear(self.dense_hidden_units[-1], 1, bias=False))\n \n # 注册非负的密集层\n for idx_i, dense_layer in enumerate(self.non_neg_dense_layers):\n self.add_module('non_neg_layer_{}'.format(idx_i), dense_layer)\n\n # 创建密集层\n self.dense_layers = []\n self.dense_layers.append(nn.Linear(self.input_size, self.dense_hidden_units[0]))\n for i in range(len(self.dense_hidden_units[1:])):\n self.dense_layers.append(nn.Linear(self.input_size, self.dense_hidden_units[i]))\n self.dense_layers.append(nn.Linear(self.input_size, 1))\n \n # 注册密集层\n for idx_i, dense_layer in enumerate(self.dense_layers):\n self.add_module('layer_{}'.format(idx_i), dense_layer)\n\n # 创建参数tau并设置为不需要梯度\n self.tau = nn.Parameter(torch.zeros([1, ], device=self.device), requires_grad=False)\n\n # 设置模型的保存路径\n self.model_save_path = path.join(config.get('experiments', 'amd_icnn') + '_' + self.name,\n 'model.pth')\n # 打印模型的结构信息\n logger.info('========================================icnn model architecture==============================')\n logger.info(self)\n logger.info('===============================================end==========================================')\n\n\n def parse_args(self,\n dense_hidden_units=None, # 密集层隐藏单元的列表\n dropout=0.6, # dropout率\n alpha_=0.2, # alpha参数\n **kwargs # 其他关键字参数\n ):\n # 如果没有提供密集层的隐藏单元,则使用默认的[200, 200]\n if dense_hidden_units is None:\n self.dense_hidden_units = [200, 200]\n \n # 如果提供的密集层隐藏单元是列表形式,则直接赋值\n elif isinstance(dense_hidden_units, list):\n self.dense_hidden_units = dense_hidden_units\n \n # 如果提供的不是列表,则抛出类型错误\n else:\n raise TypeError(\"Expect a list of hidden units.\")\n\n # 设置dropout率\n self.dropout = dropout\n # 设置alpha参数\n self.alpha_ = alpha_\n # 获取`proc_number`参数\n self.proc_number = kwargs['proc_number']\n # 如果提供了额外的关键字参数,且参数数量大于0,则记录警告信息\n if len(kwargs) > 0:\n logger.warning(\"Unknown hyper-parameters {}\".format(str(kwargs)))\n\n # 定义forward_f函数,该函数对输入x应用md_nn_model模型\n def forward_f(self, x):\n return self.md_nn_model(x)\n\n # 定义forward_g函数,该函数处理输入数据x并传递给密集层和非负密集层\n def forward_g(self, x):\n # 初始化prev_x为None,用于存储前一个x的值\n prev_x = None\n # 对每个密集层进行枚举\n for i, dense_layer in enumerate(self.dense_layers):\n # 初始化x_add列表,用于存储中间结果\n x_add = []\n \n # 将输入x通过当前的密集层\n x1 = dense_layer(x)\n \n # 将结果添加到x_add列表中\n x_add.append(x1)\n \n # 如果prev_x不为None,表示不是第一个密集层\n if prev_x is not None:\n # 将前一个x通过非负密集层\n x2 = self.non_neg_dense_layers[i - 1](prev_x)\n # 将结果添加到x_add列表中\n x_add.append(x2)\n \n # 将x_add列表中的所有元素求和\n prev_x = torch.sum(torch.stack(x_add, dim=0), dim=0)\n \n # 如果不是最后一个密集层,则应用SELU激活函数\n if i < len(self.dense_layers):\n prev_x = F.selu(prev_x)\n \n # 改变输出的形状并返回\n return prev_x.reshape(-1)\n\n\n def forward(self, x):\n return self.forward_f(x), self.forward_g(x)\n\n # 定义前向传播函数\n def forward(self, x):\n # 将输入x同时传递给forward_f和forward_g函数\n return self.forward_f(x), self.forward_g(x)\n\n # 定义预测函数\n def predict(self, test_data_producer, indicator_masking=True):\n \"\"\"\n 预测标签并对检测器和指示器进行评估\n\n 参数:\n --------\n @param test_data_producer: torch.DataLoader,用于产生测试数据\n @param indicator_masking: 是否过滤掉低密度的示例或遮罩其值\n \"\"\"\n # 从测试数据生成器中进行推断,获取中心预测值、概率和真实标签\n y_cent, x_prob, y_true = self.inference(test_data_producer)\n # 获取预测值的最大索引作为预测结果\n y_pred = y_cent.argmax(1).cpu().numpy()\n y_true = y_true.cpu().numpy()\n # 计算指示器标志\n indicator_flag = self.indicator(x_prob).cpu().numpy()\n\n # 定义评价函数\n def measurement(_y_true, _y_pred):\n from sklearn.metrics import f1_score, accuracy_score, confusion_matrix, balanced_accuracy_score\n # 计算并打印准确率\n accuracy = accuracy_score(_y_true, _y_pred)\n b_accuracy = balanced_accuracy_score(_y_true, _y_pred)\n logger.info(\"测试数据集的准确率为 {:.5f}%\".format(accuracy * 100))\n logger.info(\"测试数据集的平衡准确率为 {:.5f}%\".format(b_accuracy * 100))\n # 检查某个类是否完全缺失\n if np.any([np.all(_y_true == i) for i in range(self.n_classes)]):\n logger.warning(\"某个类别缺失。\")\n return\n\n # 计算混淆矩阵并获取TP, TN, FP, FN\n tn, fp, fn, tp = confusion_matrix(_y_true, _y_pred).ravel()\n fpr = fp / float(tn + fp)\n fnr = fn / float(tp + fn)\n # 计算F1分数\n f1 = f1_score(_y_true, _y_pred, average='binary')\n logger.info(\"假阴性率(FNR)为 {:.5f}%, 假阳性率(FPR)为 {:.5f}%, F1分数为 {:.5f}%\".format(fnr * 100, fpr * 100, f1 * 100))\n\n # 对真实标签和预测标签进行评估\n measurement(y_true, y_pred)\n\n rtn_value = (y_pred == 0) & indicator_flag\n\n if indicator_masking:\n # 排除带有“不确定”响应的样本\n y_pred = y_pred[indicator_flag]\n y_true = y_true[indicator_flag]\n else:\n # 这里不是过滤掉示例,而是将预测重置为1\n y_pred[~indicator_flag] = 1.\n logger.info('指示器已开启...')\n logger.info('阈值为 {:.5}'.format(self.tau.item()))\n # 再次评估\n measurement(y_true, y_pred)\n\n return rtn_value\n\n\n # 定义推断函数\n def inference(self, test_data_producer):\n # 初始化三个空列表:y_cent用于存放预测的类别中心值,x_prob用于存放预测的概率值,gt_labels用于存放真实标签。\n y_cent, x_prob = [], []\n gt_labels = []\n \n # 将模型设置为评估模式\n self.eval()\n \n # 使用torch.no_grad()来指示PyTorch在此上下文中不计算梯度,这在推断时是常见的做法,可以节省内存并加速计算。\n with torch.no_grad():\n # 遍历测试数据生成器中的每一批数据\n for x, y in test_data_producer:\n # 将数据转移到设备上,并确保x的数据类型为double,y的数据类型为long\n x, y = utils.to_device(x.double(), y.long(), self.device)\n \n # 通过前向传播得到logits_f和logits_g\n logits_f, logits_g = self.forward(x)\n \n # 使用softmax函数计算logits_f的概率分布,并将其添加到y_cent列表中\n y_cent.append(torch.softmax(logits_f, dim=-1))\n \n # 将logits_g添加到x_prob列表中\n x_prob.append(logits_g)\n \n # 将真实标签添加到gt_labels列表中\n gt_labels.append(y)\n \n # 使用torch.cat将三个列表中的所有Tensor沿第0维度拼接起来\n gt_labels = torch.cat(gt_labels, dim=0)\n y_cent = torch.cat(y_cent, dim=0)\n x_prob = torch.cat(x_prob, dim=0)\n \n # 返回三个Tensor:y_cent, x_prob, gt_labels\n return y_cent, x_prob, gt_labels\n\n # 这段代码的主要目的是计算模型输入的重要性或贡献。\n # 整合梯度是一种解释机器学习模型的方法,它提供了一种方式来理解每个输入特性对预测结果的贡献是如何的。\n # 在这里,这种方法被用于两个不同的模型输出:分类任务(forward_f)和另一个可能与密度估计或某种特定任务有关的输出(forward_g)。\n def get_important_attributes(self, test_data_producer, indicator_masking=False):\n \"\"\"\n 获取输入的重要属性,使用整合梯度法 (integrated gradients)。\n\n 邻接矩阵将被忽略。\n \"\"\"\n # 存储分类任务的属性重要性\n attributions_cls = []\n # 存储其他任务(可能是密度估计或某种任务)的属性重要性\n attributions_de = []\n\n # 定义一个包装函数,用于分类任务的整合梯度计算\n def _ig_wrapper_cls(_x):\n logits = self.forward_f(_x) # 获取模型对于输入x的预测\n return F.softmax(logits, dim=-1) # 对预测进行softmax操作以得到概率值\n\n # 初始化整合梯度方法,针对分类任务\n ig_cls = IntegratedGradients(_ig_wrapper_cls)\n\n # 定义一个包装函数,用于其他任务的整合梯度计算\n def _ig_wrapper_de(_x):\n return self.forward_g(_x)\n\n # 初始化整合梯度方法,针对其他任务\n ig_de = IntegratedGradients(_ig_wrapper_de)\n\n # 遍历测试数据\n for i, (x, y) in enumerate(test_data_producer):\n x, y = utils.to_tensor(x, y, self.device) # 将输入和标签转为张量\n x.requires_grad = True # 为输入x设置梯度属性,以便后续计算梯度\n base_lines = torch.zeros_like(x, dtype=torch.double, device=self.device) # 设置基线为全零\n base_lines[:, -1] = 1 # 修改基线的最后一个值为1\n # 计算分类任务的属性重要性\n attribution_bs = ig_cls.attribute(x,\n baselines=base_lines,\n target=1) # target=1意味着我们计算对类别1的属性重要性\n attributions_cls.append(attribution_bs.clone().detach().cpu().numpy())\n\n # 计算其他任务的属性重要性\n attribution_bs = ig_de.attribute(x,\n baselines=base_lines\n )\n attributions_de.append(attribution_bs.clone().detach().cpu().numpy())\n \n # 将所有批次的结果合并为一个数组\n return np.vstack(attributions_cls), np.vstack(attributions_de)\n \n def inference_batch_wise(self, x):\n \"\"\"\n 返回分类的概率和g模型的输出。\n \"\"\"\n assert isinstance(x, torch.Tensor) # 断言确保输入是torch.Tensor类型\n self.eval() # 将模型设置为评估模式\n logits_f, logits_g = self.forward(x) # 获取f和g模型的输出\n # 对f模型的输出进行softmax操作以获得分类概率,并将结果转移到CPU上\n return torch.softmax(logits_f, dim=-1).detach().cpu().numpy(), logits_g.detach().cpu().numpy()\n\n\n def get_tau_sample_wise(self, y_pred=None):\n return self.tau # 返回tau,即决策阈值\n\n\n def indicator(self, x_prob, y_pred=None):\n \"\"\"\n 判断一个样本是否是原始的。\n \"\"\"\n # print(\"self.tau:\", self.tau)\n if isinstance(x_prob, np.ndarray): # 判断输入是否为numpy数组\n x_prob = torch.tensor(x_prob, device=self.device) # 转换numpy数组为torch.Tensor\n # 判断每个样本的概率是否小于或等于tau,并返回结果\n return (x_prob <= self.tau).cpu().numpy()\n elif isinstance(x_prob, torch.Tensor): # 判断输入是否为torch.Tensor\n return x_prob <= self.tau # 判断每个样本的概率是否小于或等于tau,并返回结果\n else:\n # 如果输入既不是numpy数组也不是torch.Tensor,抛出一个类型错误\n raise TypeError(\"Tensor or numpy.ndarray are expected.\")\n\n # 简而言之,该方法计算模型的输出概率,对这些概率进行排序,然后基于所提供的ratio来确定阈值。\n # 当模型的输出低于这个阈值时,模型将认为输入是对抗的。\n def get_threshold(self, validation_data_producer, ratio=None):\n \"\"\"\n 获取用于对抗检测的阈值。\n \n 参数:\n --------\n validation_data_producer : Object\n 用于生产验证数据集的迭代器。\n ratio : float, 可选\n 用于计算阈值的比率,默认为self.ratio。\n\n \"\"\"\n self.eval() # 将模型设置为评估模式\n # 如果未提供ratio,则使用self.ratio作为默认值\n ratio = ratio if ratio is not None else self.ratio\n \n # 断言确保ratio的值在[0,1]范围内\n assert 0 <= ratio <= 1\n probabilities = [] # 用于存储模型输出的概率值\n with torch.no_grad(): # 在不计算梯度的情况下\n for x_val, y_val in validation_data_producer: # 从验证数据生成器中获取数据\n # 将输入数据和标签转换为适当的数据类型,并移动到指定的设备上\n x_val, y_val = utils.to_tensor(x_val.double(), y_val.long(), self.device)\n # 获取g模型的输出\n x_logits = self.forward_g(x_val)\n # 将模型输出添加到概率列表中\n probabilities.append(x_logits)\n # 对所有模型输出进行排序\n s, _ = torch.sort(torch.cat(probabilities, dim=0))\n # 计算索引i,它基于所提供的比率确定了阈值在排序输出中的位置\n i = int((s.shape[0] - 1) * ratio)\n assert i >= 0 # 确保i是一个有效的索引\n # 设置模型的阈值tau为s[i],即比率确定的阈值\n self.tau[0] = s[i]\n\n\n def reset_threshold(self):\n \"\"\"\n 重置模型的阈值为0。\n \"\"\"\n self.tau[0] = 0.\n\n # 这个自定义的损失函数旨在同时训练模型以准确地分类原始样本,并检测出对抗样本。这是通过结合两种损失来实现的,其中每种损失都有其权重。\n def customize_loss(self, logits_x, labels, logits_adv_x, labels_adv, beta_1=1, beta_2=1):\n \"\"\"\n 自定义的损失函数,结合分类损失和对抗损失。\n\n 参数:\n --------\n logits_x : torch.Tensor\n 原始样本的模型输出。\n labels : torch.Tensor\n 原始样本的真实标签。\n logits_adv_x : torch.Tensor\n 对抗样本的模型输出。\n labels_adv : torch.Tensor\n 对抗样本的真实标签。\n beta_1 : float, 可选\n 原始样本损失的权重,默认为1。\n beta_2 : float, 可选\n 对抗样本损失的权重,默认为1。\n\n 返回:\n --------\n torch.Tensor\n 计算得到的总损失。\n\n \"\"\"\n # 如果有对抗样本,计算对抗损失。否则,将其设置为0。\n if logits_adv_x is not None and len(logits_adv_x) > 0:\n G = F.binary_cross_entropy_with_logits(logits_adv_x, labels_adv)\n else:\n G = 0\n\n # 如果有原始样本,计算分类损失。否则,将其设置为0。\n if logits_x is not None and len(logits_x) > 0:\n F_ = F.cross_entropy(logits_x, labels)\n else:\n F_ = 0\n\n # 结合两种损失,使用beta_1和beta_2作为权重\n return beta_1 * F_ + beta_2 * G\n\n\n # 这段代码描述了训练过程。它首先在每个时期开始时对模型进行训练,然后对每一个批次的数据进行训练。\n # 这里的亮点是它还生成了带有椒盐噪声的数据,并对其进行了分类。\n # 最后,它计算了每个批次的损失和准确率,并可能将其记录在日志中。\n \n def fit(self, train_data_producer, validation_data_producer, epochs=100, lr=0.005, weight_decay=0., verbose=True):\n \"\"\"\n 训练恶意软件和对抗检测器,根据验证结果选择最佳模型。\n\n 参数:\n --------\n train_data_producer: 对象\n 用于生成训练批次数据的迭代器。\n validation_data_producer: 对象\n 用于生成验证数据集的迭代器。\n epochs: 整数\n 训练的迭代次数,默认为100。\n lr: 浮点数\n Adam优化器的学习率,默认为0.005。\n weight_decay: 浮点数\n 惩罚因子,默认为0。\n verbose: 布尔值\n 是否显示详细日志,默认为True。\n \"\"\"\n optimizer = optim.Adam(self.parameters(), lr=lr, weight_decay=weight_decay)\n best_avg_acc = 0. # 初始化最佳平均准确率\n best_epoch = 0\n total_time = 0. # 累计训练时间\n nbatches = len(train_data_producer)\n \n # 开始训练\n for i in range(epochs):\n self.train() # 将模型设为训练模式\n losses, accuracies = [], []\n\n # 迭代训练批次数据\n for idx_batch, (x_train, y_train) in enumerate(train_data_producer):\n # 数据移动到指定设备\n x_train, y_train = utils.to_device(x_train.double(), y_train.long(), self.device)\n \n # 为g网络生成数据\n # 1. 添加椒盐噪声\n x_train_noises = torch.clamp(x_train + utils.psn(x_train, np.random.uniform(0, 0.5)), min=0., max=1.)\n x_train_ = torch.cat([x_train, x_train_noises], dim=0)\n y_train_ = torch.cat([torch.zeros(x_train.shape[:1]), torch.ones(x_train.shape[:1])]).double().to(self.device)\n idx = torch.randperm(y_train_.shape[0])\n x_train_ = x_train_[idx]\n y_train_ = y_train_[idx]\n\n # 开始一次训练迭代\n start_time = time.time()\n optimizer.zero_grad()\n logits_f = self.forward_f(x_train)\n logits_g = self.forward_g(x_train_)\n loss_train = self.customize_loss(logits_f, y_train, logits_g, y_train_)\n loss_train.backward()\n optimizer.step()\n \n # 约束条件\n constraint = utils.NonnegWeightConstraint()\n for name, module in self.named_modules():\n if 'non_neg_layer' in name:\n module.apply(constraint)\n \n total_time = total_time + time.time() - start_time\n \n # 计算准确率\n acc_f_train = (logits_f.argmax(1) == y_train).sum().item() / x_train.size()[0]\n acc_g_train = ((F.sigmoid(logits_g) >= 0.5) == y_train_).sum().item() / x_train_.size()[0]\n \n # 更新记录\n losses.append(loss_train.item())\n accuracies.append(acc_f_train)\n accuracies.append(acc_g_train)\n \n # 如果需要,打印详细日志\n if verbose:\n mins, secs = int(total_time / 60), int(total_time % 60)\n logger.info(f'Mini batch: {i * nbatches + idx_batch + 1}/{epochs * nbatches} | training time in {mins:.0f} minutes, {secs} seconds.')\n logger.info(f'Training loss (batch level): {losses[-1]:.4f} | Train accuracy: {acc_f_train * 100:.2f}% & {acc_g_train * 100:.2f}%.')\n \n # 设置模型为评估模式\n self.eval()\n \n # 初始化一个列表用于保存每批验证数据的准确率\n avg_acc_val = []\n \n # 禁用梯度计算,以加速计算并减少内存使用\n with torch.no_grad():\n for x_val, y_val in validation_data_producer:\n # 数据移到指定设备\n x_val, y_val = utils.to_device(x_val.double(), y_val.long(), self.device)\n \n # 为g网络生成数据(带有椒盐噪声)\n x_val_noises = torch.clamp(x_val + utils.psn(x_val, np.random.uniform(0, 0.5)), min=0., max=1.)\n x_val_ = torch.cat([x_val, x_val_noises], dim=0)\n y_val_ = torch.cat([torch.zeros(x_val.shape[:1]), torch.ones(x_val.shape[:1])]).long().to(self.device)\n \n # 获取预测的标签\n logits_f = self.forward_f(x_val)\n logits_g = self.forward_g(x_val_)\n \n # 计算f网络的准确率\n acc_val = (logits_f.argmax(1) == y_val).sum().item() / x_val.size()[0]\n avg_acc_val.append(acc_val)\n \n # 计算g网络的准确率\n acc_val_g = ((F.sigmoid(logits_g) >= 0.5) == y_val_).sum().item() / x_val_.size()[0]\n avg_acc_val.append(acc_val_g)\n \n # 计算平均准确率\n avg_acc_val = np.mean(avg_acc_val)\n\n # 如果当前模型的验证准确率是迄今为止的最佳,则保存该模型\n if avg_acc_val >= best_avg_acc:\n best_avg_acc = avg_acc_val\n best_epoch = i\n # 获取阈值\n self.get_threshold(validation_data_producer)\n # 保存模型\n self.save_to_disk()\n if verbose:\n print(f'Model saved at path: {self.model_save_path}')\n\n # 如果需要,显示训练和验证的详细信息\n if verbose:\n logger.info(f'Training loss (epoch level): {np.mean(losses):.4f} | Train accuracy: {np.mean(accuracies) * 100:.2f}')\n logger.info(f'Validation accuracy: {avg_acc_val * 100:.2f} | The best validation accuracy: {best_avg_acc * 100:.2f} at epoch: {best_epoch}')\n\n def load(self):\n # load model\n assert path.exists(self.model_save_path), 'train model first'\n # ckpt = torch.load(self.model_save_path)\n # self.tau = ckpt['tau']\n # self.md_nn_model.load_state_dict(ckpt['md_model'])\n self.load_state_dict(torch.load(self.model_save_path))\n\n def save_to_disk(self):\n if not path.exists(self.model_save_path):\n utils.mkdir(path.dirname(self.model_save_path))\n # torch.save({\n # 'tau': self.tau,\n # 'md_model': self.md_nn_model.state_dict(),\n # 'amd_model': self.state_dict()\n # }, self.model_save_path\n # )\n torch.save(self.state_dict(), self.model_save_path)" }, { "identifier": "save_args", "path": "tools/utils.py", "snippet": "def save_args(fout, args):\n if isinstance(args, str):\n dump_txt(args, fout, mode='w')\n elif isinstance(args, dict):\n args_str = build_kwargs(args.keys(), args)\n dump_txt(args_str, fout, mode='w')\n else:\n raise TypeError(\"Expected str or dict.\")" }, { "identifier": "get_group_args", "path": "tools/utils.py", "snippet": "def get_group_args(args, args_parser, title):\n \"\"\"\n 从给定的 argparse.ArgumentParser 对象中获取指定组的参数值,并以字典形式返回。\n\n Args:\n - args (argparse.Namespace): 已解析的命令行参数对象。\n - args_parser (argparse.ArgumentParser): 命令行参数解析器对象。\n - title (str): 目标参数组的标题。\n\n Returns:\n - dict: 包含目标参数组中参数名及其对应的值的字典。\n \"\"\"\n import argparse\n\n # 确保传入的参数 args 和 args_parser 是 argparse.Namespace 和 argparse.ArgumentParser 类型\n assert isinstance(args, argparse.Namespace) and isinstance(args_parser, argparse.ArgumentParser)\n\n # 遍历 args_parser 中的所有参数组\n for group in args_parser._action_groups:\n # 如果找到了指定标题的参数组,则返回该组中指定参数名及其对应的值\n if group.title == title:\n return {action.dest: getattr(args, action.dest, None) for action in group._group_actions}\n else:\n # 否则继续查找下一个参数组\n continue\n\n # 如果未找到目标参数组,则返回空字典\n return {}" }, { "identifier": "dump_pickle", "path": "tools/utils.py", "snippet": "def dump_pickle(data, path, use_gzip=False):\n print(\"tr_te_va_split path:\", path)\n if not os.path.exists(os.path.dirname(path)):\n mkdir(os.path.dirname(path))\n if not use_gzip:\n with open(path, 'wb') as wr:\n pkl.dump(data, wr)\n else:\n with gzip.open(path, 'wb') as wr:\n pkl.dump(data, wr)\n return True" }, { "identifier": "cmd_md", "path": "examples/md_nn_test.py", "snippet": "def _main():" } ]
import os.path as path import time from core.defense import Dataset from core.defense import AdvMalwareDetectorICNN, MalwareDetectionDNN from tools.utils import save_args, get_group_args, dump_pickle from examples.md_nn_test import cmd_md
17,879
from __future__ import absolute_import from __future__ import division from __future__ import print_function indicator_argparse = cmd_md.add_argument_group(title='adv indicator') indicator_argparse.add_argument('--ratio', type=float, default=0.95, help='ratio of validation examples remained for passing through malware detector') def _main(): # 解析命令行参数 args = cmd_md.parse_args() # 加载数据集,并根据参数提取特征 dataset = Dataset(feature_ext_args=get_group_args(args, cmd_md, 'feature')) # 获取训练数据集的输入生成器 train_dataset_producer = dataset.get_input_producer(*dataset.train_dataset, batch_size=args.batch_size, name='train', use_cache=args.cache) # 获取验证数据集的输入生成器 val_dataset_producer = dataset.get_input_producer(*dataset.validation_dataset, batch_size=args.batch_size, name='val') # 获取测试数据集的输入生成器 test_dataset_producer = dataset.get_input_producer(*dataset.test_dataset, batch_size=args.batch_size, name='test') # 确保数据集只有两个类别(可能是恶意软件和非恶意软件) assert dataset.n_classes == 2 # 根据是否使用CUDA选择设备(CPU或GPU) dv = 'cuda' if args.cuda else 'cpu' # 如果是测试模式,则使用给定的模型名称,否则使用当前时间生成一个模型名称 model_name = args.model_name if args.mode == 'test' else time.strftime("%Y%m%d-%H%M%S") # 初始化基础的恶意软件检测模型
from __future__ import absolute_import from __future__ import division from __future__ import print_function indicator_argparse = cmd_md.add_argument_group(title='adv indicator') indicator_argparse.add_argument('--ratio', type=float, default=0.95, help='ratio of validation examples remained for passing through malware detector') def _main(): # 解析命令行参数 args = cmd_md.parse_args() # 加载数据集,并根据参数提取特征 dataset = Dataset(feature_ext_args=get_group_args(args, cmd_md, 'feature')) # 获取训练数据集的输入生成器 train_dataset_producer = dataset.get_input_producer(*dataset.train_dataset, batch_size=args.batch_size, name='train', use_cache=args.cache) # 获取验证数据集的输入生成器 val_dataset_producer = dataset.get_input_producer(*dataset.validation_dataset, batch_size=args.batch_size, name='val') # 获取测试数据集的输入生成器 test_dataset_producer = dataset.get_input_producer(*dataset.test_dataset, batch_size=args.batch_size, name='test') # 确保数据集只有两个类别(可能是恶意软件和非恶意软件) assert dataset.n_classes == 2 # 根据是否使用CUDA选择设备(CPU或GPU) dv = 'cuda' if args.cuda else 'cpu' # 如果是测试模式,则使用给定的模型名称,否则使用当前时间生成一个模型名称 model_name = args.model_name if args.mode == 'test' else time.strftime("%Y%m%d-%H%M%S") # 初始化基础的恶意软件检测模型
md_model = MalwareDetectionDNN(dataset.vocab_size,
1
2023-11-27 02:00:23+00:00
24k
iann838/pulsefire
tests/test_doc_scripts.py
[ { "identifier": "RiotAPIClient", "path": "pulsefire/clients.py", "snippet": "class RiotAPIClient(BaseClient):\n \"\"\"Riot API Client.\n\n | Resources | Support |\n | -------------------- | -------------------------- |\n | League of Legends | ✅ |\n | Legends of Runeterra | ✅ |\n | Teamfight Tactics | ✅ |\n | Valorant | ✅ |\n\n Example:\n ```python\n async with RiotAPIClient(\n default_headers={\"X-Riot-Token\": <API_KEY>}\n ) as client:\n summoner = await client.get_lol_summoner_v4_by_name(region=\"na1\", name=\"Not a Whale\")\n assert summoner[\"summonerLevel\"] > 200\n ```\n \"\"\"\n\n Region = Literal[\n \"americas\", \"europe\", \"asia\", \"sea\", \"esports\",\n \"br1\", \"eun1\", \"euw1\", \"jp1\", \"kr\", \"la1\", \"la2\",\n \"na1\", \"oc1\", \"tr1\", \"ru\", \"ph2\", \"sg2\", \"th2\", \"tw2\", \"vn2\",\n \"ap\", \"br\", \"eu\", \"kr\", \"latam\", \"na\",\n ] | _str\n\n def __init__(\n self,\n *,\n base_url: str = \"https://{region}.api.riotgames.com\",\n default_params: dict[str, Any] = {},\n default_headers: dict[str, str] = {\"X-Riot-Token\": \"\"},\n default_queries: dict[str, str] = {},\n middlewares: list[Middleware] = [\n json_response_middleware(),\n http_error_middleware(),\n rate_limiter_middleware(RiotAPIRateLimiter()),\n ],\n ) -> None:\n super().__init__(\n base_url=base_url,\n default_params=default_params,\n default_headers=default_headers,\n default_queries=default_queries,\n middlewares=middlewares\n )\n\n # Account Endpoints\n\n async def get_account_v1_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.AccountV1Account:\n return await self.invoke(\"GET\", \"/riot/account/v1/accounts/by-puuid/{puuid}\")\n\n async def get_account_v1_by_riot_id(self, *, region: Region = ..., game_name: str = ..., tag_line: str = ...) -> RiotAPISchema.AccountV1Account:\n return await self.invoke(\"GET\", \"/riot/account/v1/accounts/by-riot-id/{game_name}/{tag_line}\")\n\n async def get_account_v1_me(self, *, region: Region = ..., headers: dict = {\"Authorization\": \"\"}) -> RiotAPISchema.AccountV1Account:\n return await self.invoke(\"GET\", \"/riot/account/v1/accounts/me\")\n\n async def get_account_v1_active_shard_by_puuid(self, *, region: Region = ..., puuid: str = ..., game: str = ...) -> RiotAPISchema.AccountV1ActiveShard:\n return await self.invoke(\"GET\", \"/riot/account/v1/active-shards/by-game/{game}/by-puuid/{puuid}\")\n\n # League of Legends Endpoints\n\n async def get_lol_champion_v3_rotation(self, *, region: Region = ...) -> RiotAPISchema.LolChampionV3Rotation:\n return await self.invoke(\"GET\", \"/lol/platform/v3/champion-rotations\")\n\n async def get_lol_champion_v4_mastery_by_summoner(self, *, region: Region = ..., summoner_id: str = ..., champion_id: int = ...) -> RiotAPISchema.LolChampionV4Mastery:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-summoner/{summoner_id}/by-champion/{champion_id}\")\n\n async def get_lol_champion_v4_masteries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-summoner/{summoner_id}\")\n\n async def get_lol_champion_v4_top_masteries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-summoner/{summoner_id}/top\")\n\n async def get_lol_champion_v4_mastery_score_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> int:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/scores/by-summoner/{summoner_id}\")\n\n async def get_lol_champion_v4_mastery_by_puuid(self, *, region: Region = ..., puuid: str = ..., champion_id: int = ...) -> RiotAPISchema.LolChampionV4Mastery:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-puuid/{puuid}/by-champion/{champion_id}\")\n\n async def get_lol_champion_v4_masteries_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-puuid/{puuid}\")\n\n async def get_lol_champion_v4_top_masteries_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[RiotAPISchema.LolChampionV4Mastery]:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/champion-masteries/by-puuid/{puuid}/top\")\n\n async def get_lol_champion_v4_mastery_score_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> int:\n return await self.invoke(\"GET\", \"/lol/champion-mastery/v4/scores/by-puuid/{puuid}\")\n\n async def get_lol_clash_v1_players_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolClashV1Player]:\n return await self.invoke(\"GET\", \"/lol/clash/v1/players/by-summoner/{summoner_id}\")\n\n async def get_lol_clash_v1_players_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[RiotAPISchema.LolClashV1Player]:\n return await self.invoke(\"GET\", \"/lol/clash/v1/players/by-puuid/{puuid}\")\n\n async def get_lol_clash_v1_team(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolClashV1Team:\n return await self.invoke(\"GET\", \"/lol/clash/v1/teams/{id}\")\n\n async def get_lol_clash_v1_tournament_by_team(self, *, region: Region = ..., team_id: str = ...) -> RiotAPISchema.LolClashV1Tournament:\n return await self.invoke(\"GET\", \"/lol/clash/v1/tournaments/by-team/{team_id}\")\n\n async def get_lol_clash_v1_tournament(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolClashV1Tournament:\n return await self.invoke(\"GET\", \"/lol/clash/v1/tournaments/{id}\")\n\n async def get_lol_clash_v1_tournaments(self, *, region: Region = ...) -> list[RiotAPISchema.LolClashV1Tournament]:\n return await self.invoke(\"GET\", \"/lol/clash/v1/tournaments\")\n\n async def get_lol_league_v4_entries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.LolLeagueV4LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/lol/league/v4/entries/by-summoner/{summoner_id}\")\n\n async def get_lol_league_v4_challenger_league_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/challengerleagues/by-queue/{queue}\")\n\n async def get_lol_league_v4_grandmaster_league_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/grandmasterleagues/by-queue/{queue}\")\n\n async def get_lol_league_v4_master_league_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/masterleagues/by-queue/{queue}\")\n\n async def get_lol_league_v4_entries_by_division(\n self, *, region: Region = ..., queue: str = ..., tier: str = ..., division: str = ..., queries: dict = {\"page\": 1}\n ) -> list[RiotAPISchema.LolLeagueV4LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/lol/league/v4/entries/{queue}/{tier}/{division}\")\n\n async def get_lol_league_v4_league(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolLeagueV4League:\n return await self.invoke(\"GET\", \"/lol/league/v4/leagues/{id}\")\n\n async def get_lol_match_v5_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolMatchV5Match:\n return await self.invoke(\"GET\", \"/lol/match/v5/matches/{id}\")\n\n async def get_lol_match_v5_match_timeline(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolMatchV5MatchTimeline:\n return await self.invoke(\"GET\", \"/lol/match/v5/matches/{id}/timeline\")\n\n async def get_lol_match_v5_match_ids_by_puuid(self, *, region: Region = ..., puuid: str = ..., queries: dict = {\"start\": 0, \"count\": 100}) -> list[str]:\n return await self.invoke(\"GET\", \"/lol/match/v5/matches/by-puuid/{puuid}/ids\")\n\n async def get_lol_spectator_v4_active_game_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> RiotAPISchema.LolSpectatorV4Game:\n return await self.invoke(\"GET\", \"/lol/spectator/v4/active-games/by-summoner/{summoner_id}\")\n\n async def get_lol_spectator_v4_featured_games(self, *, region: Region = ...) -> RiotAPISchema.LolSpectatorV4GameList:\n return await self.invoke(\"GET\", \"/lol/spectator/v4/featured-games\")\n\n async def get_lol_status_v4_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/lol/status/v4/platform-data\")\n\n async def get_lol_summoner_v4_by_id(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/{id}\")\n\n async def get_lol_summoner_v4_by_name(self, *, region: Region = ..., name: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/by-name/{name}\")\n\n async def get_lol_summoner_v4_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/by-puuid/{puuid}\")\n\n async def get_lol_summoner_v4_me(self, *, region: Region = ..., headers: dict = {\"Authorization\": \"\"}) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/lol/summoner/v4/summoners/me\")\n\n async def get_lol_summoner_v4_by_rso_puuid(self, *, region: Region = ..., rso_puuid: str = ...) -> RiotAPISchema.LolSummonerV4Summoner:\n return await self.invoke(\"GET\", \"/fulfillment/v1/summoners/by-puuid/{rso_puuid}\")\n\n # Teamfight Tactics Endpoints\n\n async def get_tft_league_v1_entries_by_summoner(self, *, region: Region = ..., summoner_id: str = ...) -> list[RiotAPISchema.TftLeagueV1LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/tft/league/v1/entries/by-summoner/{summoner_id}\")\n\n async def get_tft_league_v1_challenger_league(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/challenger\")\n\n async def get_tft_league_v1_grandmaster_league(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/grandmaster\")\n\n async def get_tft_league_v1_master_league(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/master\")\n\n async def get_tft_league_v1_entries_by_division(\n self, *, region: Region = ..., tier: str = ..., division: str = ..., queries: dict = {\"page\": 1}\n ) -> list[RiotAPISchema.TftLeagueV1LeagueFullEntry]:\n return await self.invoke(\"GET\", \"/tft/league/v1/entries/{tier}/{division}\")\n\n async def get_tft_league_v1_league(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.TftLeagueV1League:\n return await self.invoke(\"GET\", \"/tft/league/v1/leagues/{id}\")\n\n async def get_tft_match_v1_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.TftMatchV1Match:\n return await self.invoke(\"GET\", \"/tft/match/v1/matches/{id}\")\n\n async def get_tft_match_v1_match_ids_by_puuid(self, *, region: Region = ..., puuid: str = ..., queries: dict = {\"start\": 0, \"count\": 100}) -> list[str]:\n return await self.invoke(\"GET\", \"/tft/match/v1/matches/by-puuid/{puuid}/ids\")\n\n async def get_tft_status_v1_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/tft/status/v1/platform-data\")\n\n async def get_tft_summoner_v1_by_id(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/{id}\")\n\n async def get_tft_summoner_v1_by_name(self, *, region: Region = ..., name: str = ...) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/by-name/{name}\")\n\n async def get_tft_summoner_v1_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/by-puuid/{puuid}\")\n\n async def get_tft_summoner_v1_me(self, *, region: Region = ..., headers: dict = {\"Authorization\": \"\"}) -> RiotAPISchema.TftSummonerV1Summoner:\n return await self.invoke(\"GET\", \"/tft/summoner/v1/summoners/me\")\n\n # Legends of Runeterra Endpoints\n\n async def get_lor_ranked_v1_leaderboard(self, *, region: Region = ...) -> RiotAPISchema.LorRankedV1Leaderboard:\n return await self.invoke(\"GET\", \"/lor/ranked/v1/leaderboards\")\n\n async def get_lor_match_v1_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.LorMatchV1Match:\n return await self.invoke(\"GET\", \"/lor/match/v1/matches/{id}\")\n\n async def get_lor_match_v1_match_ids_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> list[str]:\n return await self.invoke(\"GET\", \"/lor/match/v1/matches/by-puuid/{puuid}/ids\")\n\n async def get_lor_status_v1_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/lor/status/v1/platform-data\")\n\n # Valorant Endpoints\n\n async def get_val_content_v1_contents(self, *, region: Region = ..., queries: dict = {}) -> RiotAPISchema.ValContentV1Contents:\n return await self.invoke(\"GET\", \"/val/content/v1/contents\")\n\n async def get_val_ranked_v1_leaderboard_by_act(self, *, region: Region = ..., act_id: str = ...) -> RiotAPISchema.ValRankedV1Leaderboard:\n return await self.invoke(\"GET\", \"/val/ranked/v1/leaderboards/by-act/{act_id}\")\n\n async def get_val_match_v1_match(self, *, region: Region = ..., id: str = ...) -> RiotAPISchema.ValMatchV1Match:\n return await self.invoke(\"GET\", \"/val/match/v1/matches/{id}\")\n\n async def get_val_match_v1_matchlist_by_puuid(self, *, region: Region = ..., puuid: str = ...) -> RiotAPISchema.ValMatchV1Matchlist:\n return await self.invoke(\"GET\", \"/val/match/v1/matchlists/by-puuid/{puuid}\")\n\n async def get_val_match_v1_recent_matches_by_queue(self, *, region: Region = ..., queue: str = ...) -> RiotAPISchema.ValMatchV1RecentMatches:\n return await self.invoke(\"GET\", \"/val/match/v1/recent-matches/by-queue/{queue}\")\n\n async def get_val_status_v1_platform_data(self, *, region: Region = ...) -> RiotAPISchema.StatusV1PlatformData:\n return await self.invoke(\"GET\", \"/val/status/v1/platform-data\")" }, { "identifier": "async_to_sync", "path": "pulsefire/functools.py", "snippet": "def async_to_sync(runner: Callable[[Awaitable[Any]], Any] = asyncio.run):\n \"\"\"Convert a coroutine function to run synchronously. Use as decorator `@async_to_sync()`.\n\n Example:\n ```python\n @async_to_sync()\n async def sample_func(number: int):\n ...\n \n sample_func(0)\n ```\n\n Parameters:\n runner: A callable that runs the awaitable synchronously.\n\n Raises:\n TypeError: When `func` is not a coroutine function.\n \"\"\"\n\n def decorator[**P, R](func: Callable[P, Awaitable[R]]) -> Callable[P, R]:\n if not inspect.iscoroutinefunction(func):\n raise TypeError(f\"{func} is not a coroutine function\")\n\n @functools.wraps(func)\n def wrapper(*args: P.args, **kwargs: P.kwargs) -> R:\n return runner(func(*args, **kwargs))\n\n return wrapper\n\n return decorator" }, { "identifier": "RiotAPISchema", "path": "pulsefire/schemas.py", "snippet": "class RiotAPISchema:\n\n # Account Types\n\n AccountV1Account = TypedDict(\"AccountV1Account\", {\n \"puuid\": str,\n \"gameName\": str,\n \"tagLine\": str,\n })\n AccountV1ActiveShard = TypedDict(\"AccountV1ActiveShard\", {\n \"puuid\": str,\n \"game\": str,\n \"activeShard\": str,\n })\n\n # League of Legends Types\n\n LolChampionV3Rotation = TypedDict(\"LolChampionV3Rotation\", {\n \"freeChampionIds\": list[int],\n \"freeChampionIdsForNewPlayers\": list[int],\n \"maxNewPlayerLevel\": int\n })\n LolChampionV4Mastery = TypedDict(\"LolChampionV4Mastery\", {\n \"puuid\": str,\n \"championId\": int,\n \"championLevel\": int,\n \"championPoints\": int,\n \"lastPlayTime\": int,\n \"championPointsSinceLastLevel\": int,\n \"championPointsUntilNextLevel\": int,\n \"chestGranted\": bool,\n \"tokensEarned\": int,\n \"summonerId\": str\n })\n LolClashV1Player = TypedDict(\"LolClashV1Player\", {\n \"summonerId\": str,\n \"teamId\": str,\n \"position\": str,\n \"role\": str,\n })\n LolClashV1Team = TypedDict(\"LolClashV1Team\", {\n \"id\": str,\n \"tournamentId\": int,\n \"name\": str,\n \"iconId\": int,\n \"tier\": int,\n \"captain\": str,\n \"abbreviation\": str,\n \"players\": list[LolClashV1Player],\n })\n LolClashV1TournamentSchedule = TypedDict(\"LolClashV1TournamentSchedule\", {\n \"id\": int,\n \"registrationTime\": int,\n \"startTime\": int,\n \"cancelled\": bool,\n })\n LolClashV1Tournament = TypedDict(\"LolClashV1Tournament\", {\n \"id\": int,\n \"themeId\": int,\n \"nameKey\": str,\n \"nameKeySecondary\": str,\n \"schedule\": list[LolClashV1TournamentSchedule]\n })\n LolLeagueV4LeagueEntryMiniSeries = TypedDict(\"LolLeagueV4LeagueEntryMiniSeries\", {\n \"losses\": int,\n \"progress\": str,\n \"target\": int,\n \"wins\": int,\n })\n LolLeagueV4LeagueEntry = TypedDict(\"LolLeagueV4LeagueEntry\", {\n \"summonerId\": str,\n \"summonerName\": str,\n \"rank\": str,\n \"leaguePoints\": int,\n \"wins\": int,\n \"losses\": int,\n \"hotStreak\": bool,\n \"veteran\": bool,\n \"freshBlood\": bool,\n \"inactive\": bool,\n })\n LolLeagueV4LeagueFullEntry = TypedDict(\"LolLeagueV4LeagueFullEntry\", {\n \"leagueId\": str,\n \"summonerId\": str,\n \"summonerName\": str,\n \"queueType\": str,\n \"tier\": str,\n \"rank\": str,\n \"leaguePoints\": int,\n \"wins\": int,\n \"losses\": int,\n \"hotStreak\": bool,\n \"veteran\": bool,\n \"freshBlood\": bool,\n \"inactive\": bool,\n \"miniSeries\": NotRequired[LolLeagueV4LeagueEntryMiniSeries],\n })\n LolLeagueV4League = TypedDict(\"LolLeagueV4League\", {\n \"tier\": str,\n \"leagueId\": str,\n \"queue\": str,\n \"name\": str,\n \"entries\": list[LolLeagueV4LeagueEntry]\n })\n LolMatchV5MatchMetadata = TypedDict(\"LolMatchV5MatchMetadata\", {\n \"dataVersion\": str,\n \"matchId\": str,\n \"participants\": list[str]\n })\n LolMatchV5MatchTeamObjective = TypedDict(\"LolMatchV5MatchTeamObjective\", {\n \"first\": bool,\n \"kills\": int\n })\n LolMatchV5MatchInfoParticipantChallenges = TypedDict(\"LolMatchV5MatchInfoParticipantChallenges\", {\n \"12AssistStreakCount\": int,\n \"abilityUses\": int,\n \"acesBefore15Minutes\": int,\n \"alliedJungleMonsterKills\": int,\n \"baronTakedowns\": int,\n \"blastConeOppositeOpponentCount\": int,\n \"bountyGold\": int,\n \"buffsStolen\": int,\n \"completeSupportQuestInTime\": int,\n \"controlWardTimeCoverageInRiverOrEnemyHalf\": NotRequired[float],\n \"controlWardsPlaced\": int,\n \"damagePerMinute\": float,\n \"damageTakenOnTeamPercentage\": float,\n \"dancedWithRiftHerald\": int,\n \"deathsByEnemyChamps\": int,\n \"dodgeSkillShotsSmallWindow\": int,\n \"doubleAces\": int,\n \"dragonTakedowns\": int,\n \"earliestBaron\": float,\n \"earlyLaningPhaseGoldExpAdvantage\": int,\n \"effectiveHealAndShielding\": float,\n \"elderDragonKillsWithOpposingSoul\": int,\n \"elderDragonMultikills\": int,\n \"enemyChampionImmobilizations\": int,\n \"enemyJungleMonsterKills\": int,\n \"epicMonsterKillsNearEnemyJungler\": int,\n \"epicMonsterKillsWithin30SecondsOfSpawn\": int,\n \"epicMonsterSteals\": int,\n \"epicMonsterStolenWithoutSmite\": int,\n \"firstTurretKilled\": int,\n \"firstTurretKilledTime\": NotRequired[float],\n \"flawlessAces\": int,\n \"fullTeamTakedown\": int,\n \"gameLength\": float,\n \"getTakedownsInAllLanesEarlyJungleAsLaner\": NotRequired[int],\n \"goldPerMinute\": float,\n \"hadOpenNexus\": int,\n \"immobilizeAndKillWithAlly\": int,\n \"initialBuffCount\": int,\n \"initialCrabCount\": int,\n \"jungleCsBefore10Minutes\": float,\n \"junglerTakedownsNearDamagedEpicMonster\": int,\n \"kTurretsDestroyedBeforePlatesFall\": int,\n \"kda\": float,\n \"killAfterHiddenWithAlly\": int,\n \"killParticipation\": float,\n \"killedChampTookFullTeamDamageSurvived\": int,\n \"killingSprees\": int,\n \"killsNearEnemyTurret\": int,\n \"killsOnOtherLanesEarlyJungleAsLaner\": NotRequired[int],\n \"killsOnRecentlyHealedByAramPack\": int,\n \"killsUnderOwnTurret\": int,\n \"killsWithHelpFromEpicMonster\": int,\n \"knockEnemyIntoTeamAndKill\": int,\n \"landSkillShotsEarlyGame\": int,\n \"laneMinionsFirst10Minutes\": int,\n \"laningPhaseGoldExpAdvantage\": int,\n \"legendaryCount\": int,\n \"lostAnInhibitor\": int,\n \"maxCsAdvantageOnLaneOpponent\": float,\n \"maxKillDeficit\": int,\n \"maxLevelLeadLaneOpponent\": int,\n \"mejaisFullStackInTime\": int,\n \"moreEnemyJungleThanOpponent\": float,\n \"multiKillOneSpell\": int,\n \"multiTurretRiftHeraldCount\": int,\n \"multikills\": int,\n \"multikillsAfterAggressiveFlash\": int,\n \"mythicItemUsed\": NotRequired[int],\n \"outerTurretExecutesBefore10Minutes\": int,\n \"outnumberedKills\": int,\n \"outnumberedNexusKill\": int,\n \"perfectDragonSoulsTaken\": int,\n \"perfectGame\": int,\n \"pickKillWithAlly\": int,\n \"playedChampSelectPosition\": NotRequired[int],\n \"poroExplosions\": int,\n \"quickCleanse\": int,\n \"quickFirstTurret\": int,\n \"quickSoloKills\": int,\n \"riftHeraldTakedowns\": int,\n \"saveAllyFromDeath\": int,\n \"scuttleCrabKills\": int,\n \"shortestTimeToAceFromFirstTakedown\": NotRequired[float],\n \"skillshotsDodged\": int,\n \"skillshotsHit\": int,\n \"snowballsHit\": int,\n \"soloBaronKills\": int,\n \"soloKills\": int,\n \"stealthWardsPlaced\": int,\n \"survivedSingleDigitHpCount\": int,\n \"survivedThreeImmobilizesInFight\": int,\n \"takedownOnFirstTurret\": int,\n \"takedowns\": int,\n \"takedownsAfterGainingLevelAdvantage\": int,\n \"takedownsBeforeJungleMinionSpawn\": int,\n \"takedownsFirstXMinutes\": int,\n \"takedownsInAlcove\": int,\n \"takedownsInEnemyFountain\": int,\n \"teamBaronKills\": int,\n \"teamDamagePercentage\": float,\n \"teamElderDragonKills\": int,\n \"teamRiftHeraldKills\": int,\n \"tookLargeDamageSurvived\": int,\n \"turretPlatesTaken\": int,\n \"turretTakedowns\": int,\n \"turretsTakenWithRiftHerald\": int,\n \"twentyMinionsIn3SecondsCount\": int,\n \"twoWardsOneSweeperCount\": int,\n \"unseenRecalls\": int,\n \"visionScoreAdvantageLaneOpponent\": float,\n \"visionScorePerMinute\": float,\n \"wardTakedowns\": int,\n \"wardTakedownsBefore20M\": int,\n \"wardsGuarded\": int,\n \"earliestDragonTakedown\": NotRequired[float],\n \"baronBuffGoldAdvantageOverThreshold\": NotRequired[int],\n \"teleportTakedowns\": NotRequired[int],\n \"fastestLegendary\": NotRequired[float],\n \"highestChampionDamage\": NotRequired[int],\n \"highestCrowdControlScore\": NotRequired[int],\n \"junglerKillsEarlyJungle\": NotRequired[int],\n \"killsOnLanersEarlyJungleAsJungler\": NotRequired[int],\n \"fasterSupportQuestCompletion\": NotRequired[int],\n \"highestWardKills\": NotRequired[int],\n \"soloTurretsLategame\": NotRequired[int],\n \"thirdInhibitorDestroyedTime\": NotRequired[float],\n }, total=False) | dict[str, int | float]\n LolMatchV5MatchInfoParticipantPerksStatPerks = TypedDict(\"LolMatchV5MatchInfoParticipantPerksStatPerks\", {\n \"defense\": int,\n \"flex\": int,\n \"offense\": int\n })\n LolMatchV5MatchInfoParticipantPerksStyleSelection = TypedDict(\"LolMatchV5MatchInfoParticipantPerksStyleSelection\", {\n \"perk\": int,\n \"var1\": int,\n \"var2\": int,\n \"var3\": int}\n )\n LolMatchV5MatchInfoParticipantPerksStyle = TypedDict(\"LolMatchV5MatchInfoParticipantPerksStyle\", {\n \"description\": str,\n \"selections\": list[LolMatchV5MatchInfoParticipantPerksStyleSelection],\n \"style\": int\n })\n LolMatchV5MatchInfoParticipantPerks = TypedDict(\"LolMatchV5MatchInfoParticipantPerks\", {\n \"statPerks\": LolMatchV5MatchInfoParticipantPerksStatPerks,\n \"styles\": list[LolMatchV5MatchInfoParticipantPerksStyle]\n })\n LolMatchV5MatchInfoParticipantMissions = TypedDict(\"LolMatchV5MatchInfoParticipant\", {\n \"playerScore0\": float,\n \"playerScore1\": float,\n \"playerScore10\": float,\n \"playerScore11\": float,\n \"playerScore2\": float,\n \"playerScore3\": float,\n \"playerScore4\": float,\n \"playerScore5\": float,\n \"playerScore6\": float,\n \"playerScore7\": float,\n \"playerScore8\": float,\n \"playerScore9\": float,\n })\n LolMatchV5MatchInfoParticipant = TypedDict(\"LolMatchV5MatchInfoParticipant\", {\n \"allInPings\": int,\n \"assistMePings\": int,\n \"assists\": int,\n \"baitPings\": int,\n \"baronKills\": int,\n \"basicPings\": int,\n \"bountyLevel\": int,\n \"challenges\": NotRequired[LolMatchV5MatchInfoParticipantChallenges],\n \"champExperience\": int,\n \"champLevel\": int,\n \"championId\": int,\n \"championName\": str,\n \"championTransform\": int,\n \"commandPings\": int,\n \"consumablesPurchased\": int,\n \"damageDealtToBuildings\": int,\n \"damageDealtToObjectives\": int,\n \"damageDealtToTurrets\": int,\n \"damageSelfMitigated\": int,\n \"dangerPings\": int,\n \"deaths\": int,\n \"detectorWardsPlaced\": int,\n \"doubleKills\": int,\n \"dragonKills\": int,\n \"eligibleForProgression\": bool,\n \"enemyMissingPings\": int,\n \"enemyVisionPings\": int,\n \"firstBloodAssist\": bool,\n \"firstBloodKill\": bool,\n \"firstTowerAssist\": bool,\n \"firstTowerKill\": bool,\n \"gameEndedInEarlySurrender\": bool,\n \"gameEndedInSurrender\": bool,\n \"getBackPings\": int,\n \"goldEarned\": int,\n \"goldSpent\": int,\n \"holdPings\": int,\n \"individualPosition\": str,\n \"inhibitorKills\": int,\n \"inhibitorTakedowns\": int,\n \"inhibitorsLost\": int,\n \"item0\": int,\n \"item1\": int,\n \"item2\": int,\n \"item3\": int,\n \"item4\": int,\n \"item5\": int,\n \"item6\": int,\n \"itemsPurchased\": int,\n \"killingSprees\": int,\n \"kills\": int,\n \"lane\": str,\n \"largestCriticalStrike\": int,\n \"largestKillingSpree\": int,\n \"largestMultiKill\": int,\n \"longestTimeSpentLiving\": int,\n \"magicDamageDealt\": int,\n \"magicDamageDealtToChampions\": int,\n \"magicDamageTaken\": int,\n \"missions\": NotRequired[LolMatchV5MatchInfoParticipantMissions],\n \"needVisionPings\": int,\n \"neutralMinionsKilled\": int,\n \"nexusKills\": int,\n \"nexusLost\": int,\n \"nexusTakedowns\": int,\n \"objectivesStolen\": int,\n \"objectivesStolenAssists\": int,\n \"onMyWayPings\": int,\n \"participantId\": int,\n \"pentaKills\": int,\n \"perks\": LolMatchV5MatchInfoParticipantPerks,\n \"physicalDamageDealt\": int,\n \"physicalDamageDealtToChampions\": int,\n \"physicalDamageTaken\": int,\n \"placement\": int,\n \"playerAugment1\": int,\n \"playerAugment2\": int,\n \"playerAugment3\": int,\n \"playerAugment4\": int,\n \"playerSubteamId\": int,\n \"playerScore0\": NotRequired[float],\n \"playerScore1\": NotRequired[float],\n \"playerScore10\": NotRequired[float],\n \"playerScore11\": NotRequired[float],\n \"playerScore2\": NotRequired[float],\n \"playerScore3\": NotRequired[float],\n \"playerScore4\": NotRequired[float],\n \"playerScore5\": NotRequired[float],\n \"playerScore6\": NotRequired[float],\n \"playerScore7\": NotRequired[float],\n \"playerScore8\": NotRequired[float],\n \"playerScore9\": NotRequired[float],\n \"profileIcon\": int,\n \"pushPings\": int,\n \"puuid\": str,\n \"quadraKills\": int,\n \"riotIdName\": NotRequired[str],\n \"riotIdTagline\": str,\n \"riotIdGameName\": NotRequired[str],\n \"role\": str,\n \"sightWardsBoughtInGame\": int,\n \"spell1Casts\": int,\n \"spell2Casts\": int,\n \"spell3Casts\": int,\n \"spell4Casts\": int,\n \"subteamPlacement\": int,\n \"summoner1Casts\": int,\n \"summoner1Id\": int,\n \"summoner2Casts\": int,\n \"summoner2Id\": int,\n \"summonerId\": str,\n \"summonerLevel\": int,\n \"summonerName\": str,\n \"teamEarlySurrendered\": bool,\n \"teamId\": int,\n \"teamPosition\": str,\n \"timeCCingOthers\": int,\n \"timePlayed\": int,\n \"totalAllyJungleMinionsKilled\": int,\n \"totalDamageDealt\": int,\n \"totalDamageDealtToChampions\": int,\n \"totalDamageShieldedOnTeammates\": int,\n \"totalDamageTaken\": int,\n \"totalEnemyJungleMinionsKilled\": int,\n \"totalHeal\": int,\n \"totalHealsOnTeammates\": int,\n \"totalMinionsKilled\": int,\n \"totalTimeCCDealt\": int,\n \"totalTimeSpentDead\": int,\n \"totalUnitsHealed\": int,\n \"tripleKills\": int,\n \"trueDamageDealt\": int,\n \"trueDamageDealtToChampions\": int,\n \"trueDamageTaken\": int,\n \"turretKills\": int,\n \"turretTakedowns\": int,\n \"turretsLost\": int,\n \"unrealKills\": int,\n \"visionClearedPings\": int,\n \"visionScore\": int,\n \"visionWardsBoughtInGame\": int,\n \"wardsKilled\": int,\n \"wardsPlaced\": int,\n \"win\": bool\n })\n LolMatchV5MatchInfoTeamBan = TypedDict(\"LolMatchV5MatchInfoTeamBan\", {\n \"championId\": int,\n \"pickTurn\": int\n })\n LolMatchV5MatchInfoTeamObjectives = TypedDict(\"LolMatchV5MatchInfoTeamObjectives\", {\n \"baron\": LolMatchV5MatchTeamObjective,\n \"champion\": LolMatchV5MatchTeamObjective,\n \"dragon\": LolMatchV5MatchTeamObjective,\n \"horde\": NotRequired[LolMatchV5MatchTeamObjective],\n \"inhibitor\": LolMatchV5MatchTeamObjective,\n \"riftHerald\": LolMatchV5MatchTeamObjective,\n \"tower\": LolMatchV5MatchTeamObjective\n })\n LolMatchV5MatchInfoTeam = TypedDict(\"LolMatchV5MatchInfoTeam\", {\n \"bans\": list[LolMatchV5MatchInfoTeamBan],\n \"objectives\": LolMatchV5MatchInfoTeamObjectives,\n \"teamId\": int,\n \"win\": bool\n })\n LolMatchV5MatchInfo = TypedDict(\"LolMatchV5MatchInfo\", {\n \"gameCreation\": int,\n \"gameDuration\": int,\n \"gameEndTimestamp\": int,\n \"gameId\": int,\n \"gameMode\": str,\n \"gameName\": str,\n \"gameStartTimestamp\": int,\n \"gameType\": str,\n \"gameVersion\": str,\n \"mapId\": int,\n \"participants\": list[LolMatchV5MatchInfoParticipant],\n \"platformId\": str,\n \"queueId\": int,\n \"teams\": list[LolMatchV5MatchInfoTeam],\n \"tournamentCode\": str\n })\n LolMatchV5Match = TypedDict(\"LolMatchV5Match\", {\n \"metadata\": LolMatchV5MatchMetadata,\n \"info\": LolMatchV5MatchInfo\n })\n LolMatchV5MatchTimelineParticipantFrameChampionStats = TypedDict(\"LolMatchV5MatchTimelineParticipantFrameChampionStats\", {\n \"abilityHaste\": int,\n \"abilityPower\": int,\n \"armor\": int,\n \"armorPen\": int,\n \"armorPenPercent\": int,\n \"attackDamage\": int,\n \"attackSpeed\": int,\n \"bonusArmorPenPercent\": int,\n \"bonusMagicPenPercent\": int,\n \"ccReduction\": int,\n \"cooldownReduction\": int,\n \"health\": int,\n \"healthMax\": int,\n \"healthRegen\": int,\n \"lifesteal\": int,\n \"magicPen\": int,\n \"magicPenPercent\": int,\n \"magicResist\": int,\n \"movementSpeed\": int,\n \"omnivamp\": int,\n \"physicalVamp\": int,\n \"power\": int,\n \"powerMax\": int,\n \"powerRegen\": int,\n \"spellVamp\": int\n })\n LolMatchV5MatchTimelineParticipantFrameDamageStats = TypedDict(\"LolMatchV5MatchTimelineParticipantFrameDamageStats\", {\n \"magicDamageDone\": int,\n \"magicDamageDoneToChampions\": int,\n \"magicDamageTaken\": int,\n \"physicalDamageDone\": int,\n \"physicalDamageDoneToChampions\": int,\n \"physicalDamageTaken\": int,\n \"totalDamageDone\": int,\n \"totalDamageDoneToChampions\": int,\n \"totalDamageTaken\": int,\n \"trueDamageDone\": int,\n \"trueDamageDoneToChampions\": int,\n \"trueDamageTaken\": int\n })\n LolMatchV5MatchTimelinePosition = TypedDict(\"LolMatchV5MatchTimelinePosition\", {\n \"x\": int,\n \"y\": int\n })\n LolMatchV5MatchTimelineParticipantFrame = TypedDict(\"LolMatchV5MatchTimelineParticipantFrame\", {\n \"championStats\": LolMatchV5MatchTimelineParticipantFrameChampionStats,\n \"currentGold\": int,\n \"damageStats\": LolMatchV5MatchTimelineParticipantFrameDamageStats,\n \"goldPerSecond\": int,\n \"jungleMinionsKilled\": int,\n \"level\": int,\n \"minionsKilled\": int,\n \"participantId\": int,\n \"position\": LolMatchV5MatchTimelinePosition,\n \"timeEnemySpentControlled\": int,\n \"totalGold\": int,\n \"xp\": int\n })\n LolMatchV5MatchTimelineEventDamage = TypedDict(\"LolMatchV5MatchTimelineEventDamage\", {\n \"basic\": bool,\n \"magicDamage\": int,\n \"name\": str,\n \"participantId\": int,\n \"physicalDamage\": int,\n \"spellName\": str,\n \"spellSlot\": int,\n \"trueDamage\": int,\n \"type\": str\n })\n LolMatchV5MatchTimelineMetadata = TypedDict(\"LolMatchV5MatchTimelineMetadata\", {\n \"dataVersion\": str,\n \"matchId\": str,\n \"participants\": list[str]\n })\n LolMatchV5MatchTimelineInfoFrameEvent = TypedDict(\"LolMatchV5MatchTimelineInfoFrameEvent\", {\n \"afterId\": NotRequired[int],\n \"beforeId\": NotRequired[int],\n \"goldGain\": NotRequired[int],\n \"participantId\": NotRequired[int],\n \"timestamp\": int,\n \"type\": str,\n \"creatorId\": NotRequired[int],\n \"wardType\": NotRequired[str],\n \"level\": NotRequired[int],\n \"itemId\": NotRequired[int],\n \"assistingParticipantIds\": NotRequired[list[int]],\n \"bounty\": NotRequired[int],\n \"killStreakLength\": NotRequired[int],\n \"killerId\": NotRequired[int],\n \"position\": NotRequired[LolMatchV5MatchTimelinePosition],\n \"shutdownBounty\": NotRequired[int],\n \"victimDamageDealt\": NotRequired[list[LolMatchV5MatchTimelineEventDamage]],\n \"victimDamageReceived\": NotRequired[list[LolMatchV5MatchTimelineEventDamage]],\n \"victimId\": NotRequired[int],\n \"levelUpType\": NotRequired[str],\n \"skillSlot\": NotRequired[int],\n \"realTimestamp\": NotRequired[int],\n })\n LolMatchV5MatchTimelineInfoFrame = TypedDict(\"LolMatchV5MatchTimelineInfoFrame\", {\n \"events\": list[LolMatchV5MatchTimelineInfoFrameEvent],\n \"participantFrames\": dict[str, LolMatchV5MatchTimelineParticipantFrame],\n \"timestamp\": int\n })\n LolMatchV5MatchTimelineInfoParticipants = TypedDict(\"LolMatchV5MatchTimelineInfoParticipants\", {\n \"participantId\": int,\n \"puuid\": str,\n })\n LolMatchV5MatchTimelineInfo = TypedDict(\"LolMatchV5MatchTimelineInfo\", {\n \"frameInterval\": int,\n \"frames\": list[LolMatchV5MatchTimelineInfoFrame],\n \"gameId\": int,\n \"participants\": list[LolMatchV5MatchTimelineInfoParticipants]\n })\n LolMatchV5MatchTimeline = TypedDict(\"LolMatchV5MatchTimeline\", {\n \"metadata\": LolMatchV5MatchTimelineMetadata,\n \"info\": LolMatchV5MatchTimelineInfo\n })\n LolSpectatorV4GameParticipantPerks = TypedDict(\"LolSpectatorV4GameParticipantPerks\", {\n \"perkIds\": list[int],\n \"perkStyle\": int,\n \"perkSubStyle\": int\n })\n LolSpectatorV4GameParticipant = TypedDict(\"LolSpectatorV4GameParticipant\", {\n \"gameCustomizationObjects\": NotRequired[list[str]],\n \"perks\": NotRequired[LolSpectatorV4GameParticipantPerks],\n \"puuid\": str,\n \"summonerId\": str,\n \"teamId\": int,\n \"spell1Id\": int,\n \"spell2Id\": int,\n \"championId\": int,\n \"profileIconId\": int,\n \"summonerName\": str,\n \"bot\": bool\n })\n LolSpectatorV4GameObservers = TypedDict(\"LolSpectatorV4GameObservers\", {\n \"encryptionKey\": str\n })\n LolSpectatorV4Game = TypedDict(\"LolSpectatorV4Game\", {\n \"gameId\": int,\n \"mapId\": int,\n \"gameMode\": str,\n \"gameType\": str,\n \"gameQueueConfigId\": int,\n \"participants\": list[LolSpectatorV4GameParticipant],\n \"observers\": LolSpectatorV4GameObservers,\n \"platformId\": str,\n \"bannedChampions\": list[int],\n \"gameStartTime\": int,\n \"gameLength\": int\n })\n LolSpectatorV4GameList = TypedDict(\"LolSpectatorV4GameList\", {\n \"gameList\": list[LolSpectatorV4Game],\n \"clientRefreshInterval\": int\n })\n LolSummonerV4Summoner = TypedDict(\"SummonerV4Summoner\", {\n \"id\": str,\n \"accountId\": str,\n \"puuid\": str,\n \"name\": str,\n \"profileIconId\": int,\n \"revisionDate\": int,\n \"summonerLevel\": int\n })\n\n # Teamfight Tactics Types\n\n TftLeagueV1LeagueEntry = TypedDict(\"TftLeagueV1LeagueEntry\", {\n \"summonerId\": str,\n \"summonerName\": str,\n \"rank\": str,\n \"leaguePoints\": int,\n \"wins\": int,\n \"losses\": int,\n \"hotStreak\": bool,\n \"veteran\": bool,\n \"freshBlood\": bool,\n \"inactive\": bool,\n })\n TftLeagueV1LeagueFullEntry = TypedDict(\"TftLeagueV1LeagueFullEntry\", {\n \"leagueId\": str,\n \"puuid\": str,\n \"summonerId\": str,\n \"summonerName\": str,\n \"queueType\": str,\n \"tier\": str,\n \"rank\": str,\n \"leaguePoints\": int,\n \"wins\": int,\n \"losses\": int,\n \"hotStreak\": bool,\n \"veteran\": bool,\n \"freshBlood\": bool,\n \"inactive\": bool,\n \"miniSeries\": NotRequired[LolLeagueV4LeagueEntryMiniSeries],\n })\n TftLeagueV1League = TypedDict(\"TftLeagueV1League\", {\n \"tier\": str,\n \"leagueId\": NotRequired[str],\n \"queue\": NotRequired[str],\n \"name\": NotRequired[str],\n \"entries\": list[TftLeagueV1LeagueEntry]\n })\n TftMatchV1MatchMetadata = TypedDict(\"TftMatchV1MatchMetadata\", {\n \"data_version\": str,\n \"match_id\": str,\n \"participants\": list[str]\n })\n TftMatchV1MatchInfoParticipantCompanion = TypedDict(\"TftMatchV1MatchInfoParticipantCompanion\", {\n \"content_ID\": str,\n \"item_ID\": int,\n \"skin_ID\": int,\n \"species\": str\n })\n TftMatchV1MatchInfoParticipantTrait = TypedDict(\"TftMatchV1MatchInfoParticipantTrait\", {\n \"name\": str,\n \"num_units\": int,\n \"style\": int,\n \"tier_current\": int,\n \"tier_total\": int\n })\n TftMatchV1MatchInfoParticipantUnit = TypedDict(\"TftMatchV1MatchInfoParticipantUnit\", {\n \"character_id\": str,\n \"itemNames\": list[str],\n \"name\": str,\n \"rarity\": int,\n \"tier\": int\n })\n TftMatchV1MatchInfoParticipant = TypedDict(\"TftMatchV1MatchInfoParticipant\", {\n \"augments\": list[str],\n \"companion\": TftMatchV1MatchInfoParticipantCompanion,\n \"gold_left\": int,\n \"last_round\": int,\n \"level\": int,\n \"placement\": int,\n \"players_eliminated\": int,\n \"puuid\": str,\n \"time_eliminated\": float,\n \"total_damage_to_players\": int,\n \"traits\": list[TftMatchV1MatchInfoParticipantTrait],\n \"units\": list[TftMatchV1MatchInfoParticipantUnit]\n })\n TftMatchV1MatchInfo = TypedDict(\"TftMatchV1MatchInfo\", {\n \"game_datetime\": int,\n \"game_length\": float,\n \"game_version\": str,\n \"participants\": list[TftMatchV1MatchInfoParticipant],\n \"queue_id\": int,\n \"tft_game_type\": str,\n \"tft_set_core_name\": str,\n \"tft_set_number\": int\n })\n TftMatchV1Match = TypedDict(\"TftMatchV1Match\", {\n \"metadata\": TftMatchV1MatchMetadata,\n \"info\": TftMatchV1MatchInfo\n })\n TftSummonerV1Summoner = LolSummonerV4Summoner\n\n # Legends of Runeterra Types\n\n LorRankedV1LeaderboardPlayer = TypedDict(\"LorRankedV1LeaderboardPlayer\", {\n \"name\": str,\n \"rank\": int,\n \"lp\": float\n })\n LorRankedV1Leaderboard = TypedDict(\"LorRankedV1Leaderboard\", {\n \"players\": list[LorRankedV1LeaderboardPlayer]\n })\n LorMatchV1MatchMetadata = TypedDict(\"LorMatchV1MatchMetadata\", {\n \"data_version\": str,\n \"match_id\": str,\n \"participants\": list[str]\n })\n LorMatchV1MatchInfoPlayer = TypedDict(\"LorMatchV1MatchInfoPlayer\", {\n \"puuid\": str,\n \"deck_id\": str,\n \"deck_code\": str,\n \"factions\": list[str],\n \"game_outcome\": str,\n \"order_of_play\": int\n })\n LorMatchV1MatchInfo = TypedDict(\"LorMatchV1MatchInfo\", {\n \"game_mode\": str,\n \"game_type\": str,\n \"game_start_time_utc\": str,\n \"game_version\": str,\n \"players\": list[LorMatchV1MatchInfoPlayer],\n \"total_turn_count\": int\n })\n LorMatchV1Match = TypedDict(\"LorMatchV1Match\", {\n \"metadata\": LorMatchV1MatchMetadata,\n \"info\": LorMatchV1MatchInfo\n })\n\n # Valorant Types\n\n ValContentV1ContentsAssetLocalizedNames = TypedDict(\"ValContentV1ContentsAssetLocalizedNames\", {\n \"ar-AE\": str,\n \"de-DE\": str,\n \"en-US\": str,\n \"es-ES\": str,\n \"es-MX\": str,\n \"fr-FR\": str,\n \"id-ID\": str,\n \"it-IT\": str,\n \"ja-JP\": str,\n \"ko-KR\": str,\n \"pl-PL\": str,\n \"pt-BR\": str,\n \"ru-RU\": str,\n \"th-TH\": str,\n \"tr-TR\": str,\n \"vi-VN\": str,\n \"zh-CN\": str,\n \"zh-TW\": str,\n })\n ValContentV1ContentsAsset = TypedDict(\"ValContentV1ContentsAsset\", {\n \"name\": str,\n \"id\": str,\n \"localizedNames\": NotRequired[ValContentV1ContentsAssetLocalizedNames],\n \"assetName\": str,\n \"assetPath\": NotRequired[str]\n })\n ValContentV1ContentsAct = TypedDict(\"ValContentV1ContentsAct\", {\n \"id\": str,\n \"localizedNames\": NotRequired[ValContentV1ContentsAssetLocalizedNames],\n \"parentId\": str,\n \"type\": str,\n \"name\": str,\n \"isActive\": bool\n })\n ValContentV1Contents = TypedDict(\"ValContentV1Contents\", {\n \"version\": str,\n \"characters\": list[ValContentV1ContentsAsset],\n \"maps\": list[ValContentV1ContentsAsset],\n \"chromas\": list[ValContentV1ContentsAsset],\n \"skins\": list[ValContentV1ContentsAsset],\n \"skinLevels\": list[ValContentV1ContentsAsset],\n \"equips\": list[ValContentV1ContentsAsset],\n \"gameModes\": list[ValContentV1ContentsAsset],\n \"totems\": list[ValContentV1ContentsAsset],\n \"sprays\": list[ValContentV1ContentsAsset],\n \"sprayLevels\": list[ValContentV1ContentsAsset],\n \"charms\": list[ValContentV1ContentsAsset],\n \"charmLevels\": list[ValContentV1ContentsAsset],\n \"playerCards\": list[ValContentV1ContentsAsset],\n \"playerTitles\": list[ValContentV1ContentsAsset],\n \"acts\": list[ValContentV1ContentsAct],\n \"ceremonies\": list[ValContentV1ContentsAsset]\n })\n \n ValRankedV1LeaderboardTierDetail = TypedDict(\"ValRankedV1LeaderboardTierDetail\", {\n \"rankedRatingThreshold\": int,\n \"startingPage\": int,\n \"startingIndex\": int\n })\n ValRankedV1LeaderboardPlayer = TypedDict(\"ValRankedV1LeaderboardPlayer\", {\n \"puuid\": str,\n \"gameName\": str,\n \"tagLine\": str,\n \"leaderboardRank\": int,\n \"rankedRating\": int,\n \"numberOfWins\": int,\n \"competitiveTier\": int\n })\n ValRankedV1LeaderboardTierDetails = TypedDict(\"ValRankedV1LeaderboardTierDetails\", {\n \"24\": ValRankedV1LeaderboardTierDetail,\n \"25\": ValRankedV1LeaderboardTierDetail,\n \"26\": ValRankedV1LeaderboardTierDetail,\n \"27\": ValRankedV1LeaderboardTierDetail\n })\n ValRankedV1Leaderboard = TypedDict(\"ValRankedV1Leaderboard\", {\n \"actId\": str,\n \"players\": list[ValRankedV1LeaderboardPlayer],\n \"totalPlayers\": int,\n \"immortalStartingPage\": int,\n \"immortalStartingIndex\": int,\n \"topTierRRThreshold\": int,\n \"tierDetails\": ValRankedV1LeaderboardTierDetails,\n \"startIndex\": int,\n \"query\": str,\n \"shard\": str\n })\n ValMatchV1MatchLocation = TypedDict(\"ValMatchV1MatchLocation\", {\n \"x\": float,\n \"y\": float\n })\n ValMatchV1MatchPlayerLocation = TypedDict(\"ValMatchV1MatchPlayerLocation\", {\n \"puuid\": str,\n \"viewRadians\": float,\n \"location\": ValMatchV1MatchLocation\n })\n ValMatchV1MatchInfo = TypedDict(\"ValMatchV1MatchInfo\", {\n \"matchId\": str,\n \"mapId\": str,\n \"gameVersion\": str,\n \"gameLengthMillis\": int,\n \"region\": str,\n \"gameStartMillis\": int,\n \"provisioningFlowId\": str,\n \"isCompleted\": bool,\n \"customGameName\": str,\n \"queueId\": str,\n \"gameMode\": str,\n \"isRanked\": bool,\n \"premierMatchInfo\": dict,\n \"seasonId\": str\n })\n ValMatchV1MatchPlayerStatsAbilityCasts = TypedDict(\"ValMatchV1MatchPlayerStatsAbilityCasts\", {\n \"grenadeCasts\": int,\n \"ability1Casts\": int,\n \"ability2Casts\": int,\n \"ultimateCasts\": int\n })\n ValMatchV1MatchPlayerStats = TypedDict(\"ValMatchV1MatchPlayerStats\", {\n \"score\": int,\n \"roundsPlayed\": int,\n \"kills\": int,\n \"deaths\": int,\n \"assists\": int,\n \"playtimeMillis\": int,\n \"abilityCasts\": ValMatchV1MatchPlayerStatsAbilityCasts | None\n })\n ValMatchV1MatchPlayer = TypedDict(\"ValMatchV1MatchPlayer\", {\n \"puuid\": str,\n \"gameName\": str,\n \"tagLine\": str,\n \"teamId\": str,\n \"partyId\": str,\n \"characterId\": str,\n \"stats\": ValMatchV1MatchPlayerStats,\n \"competitiveTier\": int,\n \"isObserver\": bool,\n \"playerCard\": str,\n \"playerTitle\": str,\n \"accountLevel\": int\n })\n ValMatchV1MatchTeam = TypedDict(\"ValMatchV1MatchTeam\", {\n \"teamId\": str,\n \"won\": bool,\n \"roundsPlayed\": int,\n \"roundsWon\": int,\n \"numPoints\": int\n })\n ValMatchV1MatchRoundResultPlayerStatKill = TypedDict(\"ValMatchV1MatchRoundResultPlayerStatKill\", {\n \"timeSinceGameStartMillis\": int,\n \"timeSinceRoundStartMillis\": int,\n \"killer\": str,\n \"victim\": str,\n \"victimLocation\": ValMatchV1MatchLocation,\n \"assistants\": list[str],\n \"playerLocations\": list[ValMatchV1MatchPlayerLocation],\n \"finishingDamage\": TypedDict(\"FinishingDamage\", {\n \"damageType\": str,\n \"damageItem\": str,\n \"isSecondaryFireMode\": bool\n })\n })\n ValMatchV1MatchRoundResultPlayerStatDamage = TypedDict(\"ValMatchV1MatchRoundResultPlayerStatDamage\", {\n \"receiver\": str,\n \"damage\": int,\n \"legshots\": int,\n \"bodyshots\": int,\n \"headshots\": int\n })\n ValMatchV1MatchRoundResultPlayerStatEconomy = TypedDict(\"ValMatchV1MatchRoundResultPlayerStatEconomy\", {\n \"loadoutValue\": int,\n \"weapon\": str,\n \"armor\": str,\n \"remaining\": int,\n \"spent\": int\n })\n ValMatchV1MatchRoundResultPlayerStatAbility = TypedDict(\"ValMatchV1MatchRoundResultPlayerStatAbility\", {\n \"grenadeEffects\": str | None,\n \"ability1Effects\": str | None,\n \"ability2Effects\": str | None,\n \"ultimateEffects\": str | None\n })\n ValMatchV1MatchRoundResultPlayerStat = TypedDict(\"ValMatchV1MatchRoundResultPlayerStat\", {\n \"puuid\": str,\n \"kills\": list[ValMatchV1MatchRoundResultPlayerStatKill],\n \"damage\": list[ValMatchV1MatchRoundResultPlayerStatDamage],\n \"score\": int,\n \"economy\": ValMatchV1MatchRoundResultPlayerStatEconomy,\n \"ability\": ValMatchV1MatchRoundResultPlayerStatAbility\n })\n ValMatchV1MatchRoundResult = TypedDict(\"ValMatchV1MatchRoundResult\", {\n \"roundNum\": int,\n \"roundResult\": str,\n \"roundCeremony\": str,\n \"winningTeam\": str,\n \"bombPlanter\": str | None,\n \"bombDefuser\": str | None,\n \"plantRoundTime\": int,\n \"plantPlayerLocations\": list[ValMatchV1MatchPlayerLocation] | None,\n \"plantLocation\": ValMatchV1MatchLocation,\n \"plantSite\": str,\n \"defuseRoundTime\": int,\n \"defusePlayerLocations\": list[ValMatchV1MatchPlayerLocation] | None,\n \"defuseLocation\": ValMatchV1MatchLocation,\n \"playerStats\": list[ValMatchV1MatchRoundResultPlayerStat],\n \"roundResultCode\": str\n })\n ValMatchV1Match = TypedDict(\"ValMatchV1Match\", {\n \"matchInfo\": ValMatchV1MatchInfo,\n \"players\": list[ValMatchV1MatchPlayer],\n \"coaches\": list[str],\n \"teams\": list[ValMatchV1MatchTeam],\n \"roundResults\": list[ValMatchV1MatchRoundResult]\n })\n ValMatchV1MatchlistHistory = TypedDict(\"ValMatchV1MatchlistHistory\", {\n \"matchId\": str,\n \"gameStartTimeMillis\": int,\n \"queueId\": str\n })\n ValMatchV1Matchlist = TypedDict(\"ValMatchV1Matchlist\", {\n \"puuid\": str,\n \"history\": list[ValMatchV1MatchlistHistory]\n })\n ValMatchV1RecentMatches = TypedDict(\"ValMatchV1RecentMatches\", {\n \"currentTime\": int,\n \"matchIds\": list[str]\n })\n\n # Status Types\n\n StatusV1PlatformDataLocaleContent = TypedDict(\"StatusV1PlatformDataLocaleContent\", {\n \"locale\": str,\n \"content\": str\n })\n StatusV1PlatformDataEntryUpdate = TypedDict(\"StatusV1PlatformDataEntryUpdate\", {\n \"id\": int,\n \"created_at\": str,\n \"updated_at\": str,\n \"publish\": bool,\n \"author\": str,\n \"translations\": list[StatusV1PlatformDataLocaleContent],\n \"publish_locations\": list[str]\n })\n StatusV1PlatformDataEntry = TypedDict(\"StatusV1PlatformDataEntry\", {\n \"id\": int,\n \"created_at\": str,\n \"updated_at\": str | None,\n \"archive_at\": str | None,\n \"titles\": list[StatusV1PlatformDataLocaleContent],\n \"updates\": list[StatusV1PlatformDataEntryUpdate],\n \"platforms\": list[str],\n \"maintenance_status\": str | None,\n \"incident_severity\": str | None\n })\n StatusV1PlatformData = TypedDict(\"StatusV4PlatformData\", {\n \"id\": str,\n \"name\": str,\n \"locales\": list[str],\n \"maintenances\": list[StatusV1PlatformDataEntry],\n \"incidents\": list[StatusV1PlatformDataEntry]\n })" } ]
import asyncio import os from pulsefire.clients import RiotAPIClient from pulsefire.functools import async_to_sync from pulsefire.schemas import RiotAPISchema
15,782
@async_to_sync() async def test_concurrent_request_alt2(): async with RiotAPIClient(default_headers={"X-Riot-Token": os.environ["RIOT_API_KEY"]}) as client: account = await client.get_account_v1_by_riot_id(region="americas", game_name="Not a Whale", tag_line="NA1") summoner = await client.get_lol_summoner_v4_by_puuid(region="na1", puuid=account["puuid"]) match_ids = await client.get_lol_match_v5_match_ids_by_puuid(region="americas", puuid=summoner["puuid"]) tasks: list[asyncio.Task] = [] async with asyncio.TaskGroup() as tg: for match_id in match_ids[:20]: tasks.append(tg.create_task(client.get_lol_match_v5_match(region="americas", id=match_id)))
@async_to_sync() async def test_concurrent_request_alt2(): async with RiotAPIClient(default_headers={"X-Riot-Token": os.environ["RIOT_API_KEY"]}) as client: account = await client.get_account_v1_by_riot_id(region="americas", game_name="Not a Whale", tag_line="NA1") summoner = await client.get_lol_summoner_v4_by_puuid(region="na1", puuid=account["puuid"]) match_ids = await client.get_lol_match_v5_match_ids_by_puuid(region="americas", puuid=summoner["puuid"]) tasks: list[asyncio.Task] = [] async with asyncio.TaskGroup() as tg: for match_id in match_ids[:20]: tasks.append(tg.create_task(client.get_lol_match_v5_match(region="americas", id=match_id)))
matches: list[RiotAPISchema.LolMatchV5Match] = [task.result() for task in tasks]
2
2023-11-27 13:37:24+00:00
24k
Matrixeigs/UncertaintyManagementInteroperablePowerTransportationSystems
TestCaseDistributionSystems/uc_mmgs_fully_recourse_problem.py
[ { "identifier": "case33", "path": "TestCaseDistributionSystems/test_cases/case33.py", "snippet": "def case33():\n \"\"\"Power flow data for 33 bus, 6 generator case.\n Please see L{caseformat} for details on the case file format.\n\n Based on data from ...\n\n Alsac, O. & Stott, B., I{\"Optimal Load Flow with Steady State Security\"},\n IEEE Transactions on Power Apparatus and Systems, Vol. PAS 93, No. 3,\n 1974, pp. 745-751.\n\n ... with branch parameters rounded to nearest 0.01, shunt values divided\n by 100 and shunt on bus 10 moved to bus 5, load at bus 5 zeroed out.\n Generator locations, costs and limits and bus areas were taken from ...\n\n Ferrero, R.W., Shahidehpour, S.M., Ramesh, V.C., I{\"Transaction analysis\n in deregulated power systems using game theory\"}, IEEE Transactions on\n Power Systems, Vol. 12, No. 3, Aug 1997, pp. 1340-1347.\n\n Generator Q limits were derived from Alsac & Stott, using their Pmax\n capacities. V limits and line |S| limits taken from Alsac & Stott.\n\n @return: Power flow data for 30 bus, 6 generator case.\n @see: U{http://www.pserc.cornell.edu/matpower/}\n \"\"\"\n ppc = {\"version\": '2'}\n\n ##----- Power Flow Data -----##\n ## system MVA base\n ppc[\"baseMVA\"] = 100.0\n\n ## bus data\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\n ppc[\"bus\"] = array([\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [2, 1, 0.1, 0.06, 0, 0, 1, 1, 0, 12.66, 1, 1.1, 0.95],\n [3, 1, 0.09, 0.04, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [4, 1, 0.12, 0.08, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [5, 1, 0.06, 0.03, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [6, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [7, 1, 0.2, 0.1, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [8, 1, 0.2, 0.1, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [9, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [10, 1, 0.06, 0.02, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [11, 1, 0.045, 0.03, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [12, 1, 0.06, 0.035, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [13, 1, 0.06, 0.035, 0, 0, 2, 1, 0, 12.66, 1, 1.1, 0.95],\n [14, 1, 0.12, 0.08, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [15, 1, 0.06, 0.01, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [16, 1, 0.06, 0.02, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [17, 1, 0.06, 0.02, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [18, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [19, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [20, 1, 0.09, 0.04, 0, 0, 2, 1, 0, 12.66, 1, 1.05, 0.95],\n [21, 1, 0.09, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [22, 2, 0.09, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.1, 0.95],\n [23, 2, 0.09, 0.05, 0, 0, 2, 1, 0, 12.66, 1, 1.1, 0.95],\n [24, 1, 0.42, 0.20, 0, 0.04, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [25, 1, 0.42, 0.2, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [26, 1, 0.06, 0.025, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [27, 1, 0.06, 0.025, 0, 0, 3, 1, 0, 12.66, 1, 1.1, 0.95],\n [28, 1, 0.06, 0.02, 0, 0, 1, 1, 0, 12.66, 1, 1.05, 0.95],\n [29, 1, 0.12, 0.07, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [30, 1, 0.2, 0.6, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [31, 1, 0.15, 0.07, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [32, 1, 0.21, 0.1, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n [33, 1, 0.06, 0.04, 0, 0, 3, 1, 0, 12.66, 1, 1.05, 0.95],\n ])\n\n ## generator data\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf, start-up time, shut-down time and initial condition!\n ppc[\"gen\"] = array([\n [1, 23.54, 0, 150, -20, 1, 100, 1, 80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 2, 1],\n ])\n\n ## branch data\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\n ppc[\"branch\"] = array([\n [1, 2, 0.057525912, 0.029324489, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [2, 3, 0.307595167, 0.15666764, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [3, 4, 0.228356656, 0.116299674, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [4, 5, 0.237777928, 0.121103899, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [5, 6, 0.510994811, 0.441115179, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [6, 7, 0.116798814, 0.386084969, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [7, 8, 0.44386045, 0.146684835, 0, 90, 90, 90, 0, 0, 1, -360, 360],\n [8, 9, 0.642643047, 0.461704714, 0, 70, 70, 70, 0, 0, 1, -360, 360],\n [9, 10, 0.651378001, 0.461704714, 0, 130, 130, 130, 0, 0, 1, -360, 360],\n [10, 11, 0.122663712, 0.040555144, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [11, 12, 0.233597628, 0.077241951, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [12, 13, 0.915922324, 0.720633708, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [13, 14, 0.337917936, 0.444796338, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [14, 15, 0.368739846, 0.328184702, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [15, 16, 0.465635443, 0.340039282, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [16, 17, 0.804239697, 1.073775422, 0, 65, 65, 65, 0, 0, 1, -360, 360],\n [17, 18, 0.456713311, 0.358133116, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [2, 19, 0.102323747, 0.097644308, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [19, 20, 0.938508419, 0.845668336, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [20, 21, 0.255497406, 0.298485858, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [21, 22, 0.442300637, 0.584805173, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [3, 23, 0.28151509, 0.192356167, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [23, 24, 0.560284909, 0.442425422, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [24, 25, 0.559037059, 0.43743402, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [6, 26, 0.126656834, 0.064513875, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [26, 27, 0.177319567, 0.090281989, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [27, 28, 0.660736881, 0.582559042, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [28, 29, 0.501760717, 0.437122057, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [29, 30, 0.316642084, 0.161284687, 0, 32, 32, 32, 0, 0, 1, -360, 360],\n [30, 31, 0.607952801, 0.600840053, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [31, 32, 0.193728802, 0.225798562, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [32, 33, 0.212758523, 0.330805188, 0, 16, 16, 16, 0, 0, 1, -360, 360],\n [7, 20, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [8, 14, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [11, 21, 1.2479, 1.2479, 0, 16, 16, 16, 0, 0, 0, -360, 360],\n [17, 32, 0.3120, 0.3120, 0, 65, 65, 65, 0, 0, 0, -360, 360],\n [24, 28, 0.3120, 0.3120, 0, 16, 16, 16, 0, 0, 0, -360, 360]\n ])\n\n ##----- OPF Data -----##\n ## area data\n # area refbus\n ppc[\"areas\"] = array([\n [1, 8],\n [2, 23],\n [3, 26],\n ])\n\n ## generator cost data\n # 1 startup shutdown n x1 y1 ... xn yn\n # 2 startup shutdown n c(n-1) ... c0\n ppc[\"gencost\"] = array([\n [0, 0, 0, 3, 0.0, 20, 0]\n ])\n\n return ppc" }, { "identifier": "micro_grid", "path": "TestCasesMicrogrids/test_cases/cases_unit_commitment.py", "snippet": "AC_PD = array([323.0284, 308.2374, 318.1886, 307.9809, 331.2170, 368.6539, 702.0040, 577.7045, 1180.4547, 1227.6240,\n 1282.9344, 1311.9738, 1268.9502, 1321.7436, 1323.9218, 1327.1464, 1386.9117, 1321.6387, 1132.0476,\n 1109.2701, 882.5698, 832.4520, 349.3568, 299.9920])\nDC_PD = array([287.7698, 287.7698, 287.7698, 287.7698, 299.9920, 349.3582, 774.4047, 664.0625, 1132.6996, 1107.7366,\n 1069.6837, 1068.9819, 1027.3295, 1096.3820, 1109.4778, 1110.7039, 1160.1270, 1078.7839, 852.2514,\n 791.5814, 575.4085, 551.1441, 349.3568, 299.992])\nDG = {\"PMIN\": 0,\n \"PMAX\": 5,\n \"QMIN\": -5,\n \"QMAX\": 5,\n \"COST_A\": 0.01,\n \"COST_B\": 0.5}\nUG = {\"PMIN\": -5,\n \"PMAX\": 5,\n \"QMIN\": -5,\n \"QMAX\": 5,\n \"COST\": Price_UG, } # The cost should be a profile\nESS = {\"PDC_MAX\": 5,\n \"PCH_MAX\": 5,\n \"EFF_DC\": 0.95,\n \"EFF_CH\": 0.95,\n \"E0\": 10,\n \"EMIN\": 5,\n \"EMAX\": 20, }\nBIC = {\"PMAX\": 5,\n \"QMAX\": 5,\n \"SMAX\": 5,\n \"EFF_AC2DC\": 0.9,\n \"EFF_DC2AC\": 0.9, }\nMG = {\"PMAX\": 5,\n \"PMIN\": -5,\n \"QMAX\": 5,\n \"QMIN\": -5\n }\nPD = {\"AC\": AC_PD / max(AC_PD),\n \"AC_MAX\": 5,\n \"DC\": DC_PD / max(DC_PD),\n \"DC_MAX\": 5}\nQD = {\"AC\": AC_PD / max(AC_PD),\n \"AC_MAX\": 5, }\nPV = {\"PMAX\": 0,\n \"COST\": 0}" }, { "identifier": "PBIC_AC2DC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PBIC_AC2DC = 4" }, { "identifier": "PG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PG = 0" }, { "identifier": "PESS_DC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PESS_DC = 8" }, { "identifier": "PBIC_DC2AC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PBIC_DC2AC = 5" }, { "identifier": "PUG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PUG = 2" }, { "identifier": "PESS_CH", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PESS_CH = 7" }, { "identifier": "PMESS", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PMESS = 11 # Reactive power unit commitment of" }, { "identifier": "EESS", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "EESS = 9" }, { "identifier": "NX_MG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "NX_MG = 12" }, { "identifier": "QBIC", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "QBIC = 6" }, { "identifier": "QUG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "QUG = 3" }, { "identifier": "QG", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "QG = 1" }, { "identifier": "PPV", "path": "TestCaseDistributionSystems/data_format/idx_MG_PV.py", "snippet": "PPV = 10" }, { "identifier": "DataBaseManagement", "path": "TestCaseDistributionSystems/database_management_pv.py", "snippet": "class DataBaseManagement():\n\n def __init__(self, host=\"localhost\", user=\"ems\", password=\"12345678\", db=\"mess_pv\"):\n \"\"\"\n Initialized the database connection string\n :param host: host ip\n :param user: user name\n :param password: password\n :param db: database name\n :return\n \"\"\"\n self.db = pymysql.connect(host=host, user=user, password=password, db=db)\n\n def create_table(self, table_name, nl=32, nb=33, ng=6, nmg=3):\n \"\"\"\n Creat table name\n :param table_name:\n :param nb:\n :param nb:\n :param ng:\n :return: no return value\n \"\"\"\n cursor = self.db.cursor()\n sql = \"DROP TABLE IF EXISTS \"\n cursor.execute(sql + table_name)\n if table_name == \"distribution_networks\":\n sql_start = \"\"\"CREATE TABLE distribution_networks (\"\"\"\n sql = 'SCENARIO INT,\\n TIME INT NOT NULL,\\n '\n for i in range(nl):\n sql += \"PIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nl):\n sql += \"QIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nl):\n sql += \"IIJ{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(nb):\n sql += \"V{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(ng):\n sql += \"PG{0} DECIMAL(8,6),\\n \".format(i)\n for i in range(ng - 1):\n sql += \"QG{0} DECIMAL(8,6),\\n \".format(i)\n sql += \"QG{0} DECIMAL(8,6)\\n \".format(ng - 1)\n sql_end = \"\"\")\"\"\"\n elif table_name == \"micro_grids\":\n sql_start = \"\"\"CREATE TABLE micro_grids (\"\"\"\n sql = 'SCENARIO INT,\\n MG INT,\\n TIME INT,\\n '\n sql += 'PG DECIMAL(8,4),\\n QG DECIMAL(8,4),\\n PUG DECIMAL(8,4),\\n QUG DECIMAL(8,4),\\n '\n sql += 'PBIC_AC2DC DECIMAL(8,4),\\n PBIC_DC2AC DECIMAL(8,4),\\n QBIC DECIMAL(8,4),\\n PESS_CH DECIMAL(7,4),\\n '\n sql += 'PESS_DC DECIMAL(8,4),\\n EESS DECIMAL(8,4),\\n PPV DECIMAL(8,4),\\n PMESS DECIMAL(8,4)'\n sql_end = \"\"\")\"\"\"\n elif table_name == \"mobile_energy_storage_systems\":\n sql_start = \"\"\"CREATE TABLE mobile_energy_storage_systems (\"\"\"\n sql = 'SCENARIO INT,\\n MESS INT,\\n TIME INT,\\n'\n for i in range(nmg):\n sql += \"PDC_MG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PCH_MG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"EESS DECIMAL(8,4)\\n \"\n sql_end = \"\"\")\"\"\"\n elif table_name == \"first_stage_solutions\": # First-stage solution table\n sql_start = \"\"\"CREATE TABLE first_stage_solutions (\"\"\"\n sql = 'TIME INT,\\n'\n for i in range(ng):\n sql += \"PG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"RG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg - 1):\n sql += \"PG_MG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"RG_MG{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"IESS{0} INT,\\n \".format(i)\n sql += \"PESS_DC{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"PESS_CH{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"RESS{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"ESS{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"PG_MG{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"RG_MG{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"IESS{0} INT,\\n \".format(nmg - 1)\n sql += \"PESS_DC{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"PESS_CH{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"RESS{0} DECIMAL(8,4),\\n \".format(nmg - 1)\n sql += \"ESS{0} DECIMAL(8,4)\\n \".format(nmg - 1)\n sql_end = \"\"\")\"\"\"\n elif table_name == \"fisrt_stage_mess\": # First-stage solution table\n sql_start = \"\"\"CREATE TABLE fisrt_stage_mess (\"\"\"\n sql = 'MESS INT,\\n TIME INT,\\n'\n for i in range(nmg):\n sql += \"IDC_MG{0} INT,\\n \".format(i)\n for i in range(nmg):\n sql += \"PDC_MG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PCH_MG{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"RMESS{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"MESS_F_STOP INT,\\n \"\n sql += \"MESS_T_STOP INT\\n \"\n sql_end = \"\"\")\"\"\"\n else:\n sql_start = \"\"\"CREATE TABLE scenarios (\"\"\"\n sql = 'SCENARIO INT,\\n WEIGHT DECIMAL(8,4),\\n TIME INT,\\n'\n for i in range(nb):\n sql += \"PD{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PD_AC{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg):\n sql += \"PD_DC{0} DECIMAL(8,4),\\n \".format(i)\n for i in range(nmg - 1):\n sql += \"PPV{0} DECIMAL(8,4),\\n \".format(i)\n sql += \"PPV{0} DECIMAL(8,4)\\n\".format(nmg - 1)\n sql_end = \"\"\")\"\"\"\n\n cursor.execute(sql_start + sql + sql_end)\n cursor.close()\n\n def insert_data_ds(self, table_name, nl=32, nb=33, ng=6, scenario=0, time=0, pij=0, qij=0, lij=0, vi=0, pg=0, qg=0):\n \"\"\"\n Insert data into table_name\n :param table_name:\n :param nl:\n :param nb:\n :param ng:\n :param pij:\n :param qij:\n :param lij:\n :param vi:\n :param pg:\n :param qg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,TIME,\"\n value = \"{0},{1},\".format(scenario, time)\n for i in range(nl):\n sql += \"PIJ{0},\".format(i)\n value += \"{0},\".format(pij[i])\n for i in range(nl):\n sql += \"QIJ{0},\".format(i)\n value += \"{0},\".format(qij[i])\n for i in range(nl):\n sql += \"IIJ{0},\".format(i)\n value += \"{0},\".format(lij[i])\n for i in range(nb):\n sql += \"V{0},\".format(i)\n value += \"{0},\".format(vi[i])\n for i in range(ng):\n sql += \"PG{0},\".format(i)\n value += \"{0},\".format(pg[i])\n for i in range(ng - 1):\n sql += \"QG{0},\".format(i)\n value += \"{0},\".format(qg[i])\n sql += \"QG{0}\".format(ng - 1)\n value += \"{0}\".format(qg[ng - 1])\n\n sql += \") VALUES (\" + value + \")\"\n\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_mg(self, table_name, scenario=0, time=0, mg=0, pg=0, qg=0, pug=0, qug=0, pbic_ac2dc=0, pbic_dc2ac=0,\n qbic=0, pess_ch=0, pess_dc=0, eess=0, pmess=0, ppv=0):\n \"\"\"\n insert microgrid data\n :param table_name:\n :param scenario:\n :param time:\n :param mg:\n :param pg:\n :param qg:\n :param pug:\n :param qug:\n :param pbic_ac2dc:\n :param pbic_dc2ac:\n :param qbic:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param pmess:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,MG,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, mg, time)\n sql += \"PG,QG,PUG,QUG,PBIC_AC2DC,PBIC_DC2AC,QBIC,PESS_CH,PESS_DC,EESS,PPV,PMESS\"\n value += \"{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11}\".format(pg, qg, pug, qug, pbic_ac2dc, pbic_dc2ac,\n qbic, pess_ch, pess_dc, eess, ppv, pmess)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_first_stage_mess(self, table_name, time=0, mess=0, imess=[0, 0, 0], pmess_ch=[0, 0, 0],\n pmess_dc=[0, 0, 0], rmess=[0, 0, 0], mess_f_stop=0, mess_t_stop=0, nmg=3):\n \"\"\"\n insert mobile energy storage systems data in the first-stage\n :param table_name:\n :param scenario:\n :param time:\n :param mess:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param nmg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"MESS,TIME,\"\n value = \"{0},{1},\".format(mess, time)\n for i in range(nmg):\n sql += \"IDC_MG{0},\".format(i)\n value += \"{0},\".format(imess[i])\n for i in range(nmg):\n sql += \"PDC_MG{0},\".format(i)\n value += \"{0},\".format(pmess_dc[i])\n for i in range(nmg):\n sql += \"PCH_MG{0},\".format(i)\n value += \"{0},\".format(pmess_ch[i])\n for i in range(nmg):\n sql += \"RMESS{0},\".format(i)\n value += \"{0},\".format(rmess[i])\n sql += \"MESS_F_STOP,MESS_T_STOP\"\n value += \"{0},{1}\".format(mess_f_stop, mess_t_stop)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_mess(self, table_name, scenario=0, time=0, mess=0, pmess_ch=[0, 0, 0], pmess_dc=[0, 0, 0],\n emess=0, nmg=3):\n \"\"\"\n insert mobile energy storage systems data\n :param table_name:\n :param scenario:\n :param time:\n :param mess:\n :param pess_ch:\n :param pess_dc:\n :param eess:\n :param nmg:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,MESS,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, mess, time)\n for i in range(nmg):\n sql += \"PDC_MG{0},\".format(i)\n value += \"{0},\".format(pmess_dc[i])\n for i in range(nmg):\n sql += \"PCH_MG{0},\".format(i)\n value += \"{0},\".format(pmess_ch[i])\n sql += \"EESS\"\n value += \"{0}\".format(emess)\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_first_stage(self, table_name, time=0, ng=2, nmg=2, pg=[0, 0], rg=[0, 0], pg_mg=[0, 0],\n rg_mg=[0, 0], iess=[0, 0], pess_dc=[0, 0], pess_ch=[0, 0], ress=[0, 0], ess=[0, 0]):\n \"\"\"\n insert scenario data\n :param table_name:\n :param scenario:\n :param weight:\n :param time:\n :param nb:\n :param nmg:\n :param pd:\n :param pd_ac:\n :param pd_dc:\n :return:\n \"\"\"\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"TIME,\"\n value = \"{0},\".format(time)\n for i in range(ng):\n sql += \"PG{0},\".format(i)\n sql += \"RG{0},\".format(i)\n value += \"{0},\".format(pg[i])\n value += \"{0},\".format(rg[i])\n if nmg > 1:\n for i in range(nmg - 1):\n sql += \"PG_MG{0},\".format(i)\n sql += \"RG_MG{0},\".format(i)\n sql += \"IESS{0},\".format(i)\n sql += \"PESS_DC{0},\".format(i)\n sql += \"PESS_CH{0},\".format(i)\n sql += \"RESS{0},\".format(i)\n sql += \"ESS{0},\".format(i)\n value += \"{0},\".format(pg_mg[i])\n value += \"{0},\".format(rg_mg[i])\n value += \"{0},\".format(iess[i])\n value += \"{0},\".format(pess_dc[i])\n value += \"{0},\".format(pess_ch[i])\n value += \"{0},\".format(ress[i])\n value += \"{0},\".format(ess[i])\n sql += \"PG_MG{0},\".format(nmg - 1)\n sql += \"RG_MG{0},\".format(nmg - 1)\n sql += \"IESS{0},\".format(nmg - 1)\n sql += \"PESS_DC{0},\".format(nmg - 1)\n sql += \"PESS_CH{0},\".format(nmg - 1)\n sql += \"RESS{0},\".format(nmg - 1)\n sql += \"ESS{0}\".format(nmg - 1)\n value += \"{0},\".format(pg_mg[nmg - 1])\n value += \"{0},\".format(rg_mg[nmg - 1])\n value += \"{0},\".format(iess[nmg - 1])\n value += \"{0},\".format(pess_dc[nmg - 1])\n value += \"{0},\".format(pess_ch[nmg - 1])\n value += \"{0},\".format(ress[nmg - 1])\n value += \"{0}\".format(ess[nmg - 1])\n else:\n sql += \"PG_MG{0},\".format(nmg - 1)\n sql += \"RG_MG{0},\".format(nmg - 1)\n sql += \"IESS{0},\".format(nmg - 1)\n sql += \"PESS_DC{0},\".format(nmg - 1)\n sql += \"PESS_CH{0},\".format(nmg - 1)\n sql += \"RESS{0},\".format(nmg - 1)\n sql += \"ESS{0}\".format(nmg - 1)\n value += \"{0},\".format(pg_mg)\n value += \"{0},\".format(rg_mg)\n value += \"{0},\".format(iess)\n value += \"{0},\".format(pess_dc)\n value += \"{0},\".format(pess_ch)\n value += \"{0},\".format(ress)\n value += \"{0}\".format(ess)\n\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def insert_data_scenario(self, table_name, scenario=0, weight=0, time=0, nb=1, nmg=2, pd=[0, 0], pd_ac=[0, 0],\n pd_dc=[0, 0], ppv=[0, 0]):\n cursor = self.db.cursor()\n sql_start = \"INSERT INTO \" + table_name + \" (\"\n sql = \"SCENARIO,WEIGHT,TIME,\"\n value = \"{0},{1},{2},\".format(scenario, weight, time)\n for i in range(nb):\n sql += \"PD{0},\".format(i)\n value += \"{0},\".format(pd[i])\n for i in range(nmg):\n sql += \"PD_AC{0},\".format(i)\n value += \"{0},\".format(pd_ac[i])\n for i in range(nmg):\n sql += \"PD_DC{0},\".format(i)\n value += \"{0},\".format(pd_dc[i])\n for i in range(nmg - 1):\n sql += \"PPV{0},\".format(i)\n value += \"{0},\".format(ppv[i])\n if nmg > 1:\n sql += \"PPV{0}\".format(nmg - 1)\n value += \"{0}\".format(ppv[nmg - 1])\n\n sql += \") VALUES (\" + value + \")\"\n cursor.execute(sql_start + sql)\n self.db.commit()\n cursor.close()\n\n def inquery_data_scenario(self, table_name, scenario=0, time=0):\n cursor = self.db.cursor()\n # sql = \"SELECT * FROM \" + table_name + \" ;\"\n sql = \"SELECT * FROM \" + table_name + \" WHERE SCENARIO={0} AND TIME={1};\".format(scenario, time)\n cursor.execute(sql)\n data = cursor.fetchall()\n n_data = len(data[0])\n\n temp = []\n for i in range(n_data): temp.append(float(data[0][i]))\n\n cursor.close()\n return temp" }, { "identifier": "ScenarioReduction", "path": "StochasticOptimization/scenario_reduction.py", "snippet": "class ScenarioReduction():\n def __init__(self):\n self.name = \"Scenario reduction\"\n\n def run(self, scenario, weight, n_reduced, power):\n \"\"\"\n\n :param scenario: A fan scenario tree, when more stage are considered, some merge operation can be implemented\n :param weight: Weight of each scenario\n :param n_reduced: Number of scenarios needs to be reduced\n :param power: The power in the distance calculation\n :return:\n \"\"\"\n n_scenario = scenario.shape[0] # number of original scenarios\n c = zeros((n_scenario, n_scenario))\n # Calculate the c matrix\n for i in range(n_scenario):\n for j in range(n_scenario):\n c[i, j] = linalg.norm((scenario[i, :] - scenario[j, :]), 2)\n c[i, j] = max([1, linalg.norm(scenario[i, :], power - 1), linalg.norm(scenario[j, :], power - 1)]) * \\\n c[i, j]\n\n J = arange(n_scenario) # The original index range\n J_reduced = array([])\n # Implement the iteration\n for n in range(n_reduced): # find the minimal distance\n print(\"The reduction is in process {0}\".format(n))\n c_n = inf * ones(n_scenario)\n c_n[J] = 0\n for u in J:\n # Delete the i-th distance\n J_temp = delete(J, where(J == u))\n for k in J_temp:\n c_k_j = delete(c[int(k)], J_temp)\n c_n[int(u)] += weight[int(k)] * min(c_k_j)\n u_i = argmin(c_n)\n J_reduced = append(J_reduced, u_i)\n J = delete(J, where(J == u_i))\n # Optimal redistribution\n p_s = weight.copy()\n p_s[J_reduced.astype(int)] = 0\n\n for i in J_reduced:\n c_temp = c[int(i), :]\n c_temp[J_reduced.astype(int)] = inf\n index = argmin(c_temp)\n p_s[index] += weight[int(i)]\n\n scenario_reduced = scenario[J.astype(int), :]\n weight_reduced = p_s[J.astype(int)]\n\n return scenario_reduced, weight_reduced" } ]
from TestCaseDistributionSystems.test_cases import case33 from TestCasesMicrogrids.test_cases.cases_unit_commitment import micro_grid from TestCasesTransportationSystems.test_cases import case3, TIME, LOCATION from scipy import zeros, shape, ones, diag, concatenate, eye from scipy.sparse import csr_matrix as sparse from scipy.sparse import hstack, vstack, lil_matrix from numpy import flatnonzero as find from numpy import array, tile, arange, random from pypower.idx_brch import F_BUS, T_BUS, BR_R, BR_X, RATE_A from pypower.idx_bus import PD, VMAX, VMIN, QD, BUS_I from pypower.idx_gen import GEN_BUS, PMAX, PMIN, QMAX, QMIN from pypower.ext2int import ext2int from Solvers.mixed_integer_quadratic_constrained_cplex import mixed_integer_quadratic_constrained_programming as miqcp from Solvers.mixed_integer_programming_gurobi import mixed_integer_linear_programming as milp from copy import deepcopy from TestCaseDistributionSystems.data_format.idx_MG_PV import PBIC_AC2DC, PG, PESS_DC, PBIC_DC2AC, PUG, PESS_CH, \ PMESS, EESS, NX_MG, QBIC, QUG, QG, PPV from TestCaseDistributionSystems.database_management_pv import DataBaseManagement from StochasticOptimization.scenario_reduction import ScenarioReduction
15,369
pg_l = gen[:, PMIN] / baseMVA qg_l = gen[:, QMIN] / baseMVA pd_l = zeros(nd) pij_u = slmax qij_u = slmax lij_u = slmax vm_u = bus[:, VMAX] ** 2 pg_u = gen[:, PMAX] / baseMVA qg_u = gen[:, QMAX] / baseMVA pd_u = bus[d, PD] / baseMVA _nv_second_stage = int(3 * nl + nb + 2 * ng + 2 * nmg + nd) self._nv_second_stage = _nv_second_stage # Number of decision variable within each time slot lb = concatenate([tile(concatenate([pij_l, qij_l, lij_l, vm_l, pg_l, qg_l, pmg_l, qmg_l, pd_l]), T)]) ub = concatenate([tile(concatenate([pij_u, qij_u, lij_u, vm_u, pg_u, qg_u, pmg_u, qmg_u, pd_u]), T)]) vtypes = ["c"] * _nv_second_stage * T nv_ds = _nv_second_stage * T # Number of total decision variables # Add system level constraints # 1) Active power balance Aeq_p = lil_matrix((nb * T, nv_ds)) beq_p = zeros(nb * T) for i in range(T): Aeq_p[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([Ct - Cf, zeros((nb, nl)), -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg, zeros((nb, ng)), -Cmg, zeros((nb, nmg)), Cd]) beq_p[i * nb:(i + 1) * nb] = profile[i * nb:(i + 1) * nb] / baseMVA # 2) Reactive power balance Aeq_q = lil_matrix((nb * T, nv_ds)) beq_q = zeros(nb * T) for i in range(T): Aeq_q[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([zeros((nb, nl)), Ct - Cf, -diag(Ct * Branch_X) * Ct, zeros((nb, nb)), zeros((nb, ng)), Cg, zeros((nb, nmg)), -Cmg, Cd.dot(diag(bus[d,QD]/bus[d,PD]))]) for j in range(nb): if bus[j, PD] > 0: beq_q[i * nb:(i + 1) * nb] = profile[i * nb + j] / bus[j, PD] * bus[j, QD] / baseMVA # 3) KVL equation Aeq_kvl = lil_matrix((nl * T, nv_ds)) beq_kvl = zeros(nl * T) for i in range(T): Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage: i * _nv_second_stage + nl] = -2 * diag(Branch_R) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + nl: i * _nv_second_stage + 2 * nl] = -2 * diag(Branch_X) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 2 * nl: i * _nv_second_stage + 3 * nl] = diag( Branch_R ** 2) + diag(Branch_X ** 2) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 3 * nl:i * _nv_second_stage + 3 * nl + nb] = ( Cf.T - Ct.T).toarray() Aeq = vstack([Aeq_p, Aeq_q, Aeq_kvl]) beq = concatenate([beq_p, beq_q, beq_kvl]) c = zeros(nv_ds) q = zeros(nv_ds) c0 = 0 for t in range(T): for i in range(ng): c[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 5] * baseMVA q[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA c0 += gencost[i, 6] for i in range(nd): c[t * _nv_second_stage + i + 3 * nl + nb + 2 * ng + 2 * nmg] = Voll * baseMVA# The load shedding cost # Coupling constraints between the distribution systems and micro_grids Ax2y = lil_matrix((2 * nmg * T, nv_ds)) # connection matrix with the microgrids for i in range(T): for j in range(nmg): # Active power Ax2y[i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + j] = 1000 * baseMVA # Reactive power Ax2y[nmg * T + i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + nmg + j] = 1000 * baseMVA # II) Formulate the problem for microgrids model_microgrids = {} for i in range(nmg): model_microgrids[i] = self.problem_formulation_microgrid(mg=mgs[i], mess=mess) # II.A) Combine the distribution system operation problem and microgrid systems if Aeq is not None: neq_ds = Aeq.shape[0] else: neq_ds = 0 nVariables = int(nv_ds) neq = int(neq_ds) nv_index = zeros(nmg + 1).astype(int) neq_index = zeros(nmg + 1).astype(int) nv_index[0] = nv_ds neq_index[0] = int(neq_ds) for i in range(nmg): nv_index[i + 1] = nv_index[i] + len(model_microgrids[i]["c"]) neq_index[i + 1] = neq_index[i] + model_microgrids[i]["Aeq"].shape[0] nVariables += len(model_microgrids[i]["c"]) neq += int(model_microgrids[i]["Aeq"].shape[0]) Aeq_full = lil_matrix((int(neq_index[-1]), int(nv_index[-1]))) Aeq_full[0:neq_ds, 0:nv_ds] = Aeq for i in range(nmg): lb = concatenate([lb, model_microgrids[i]["lb"]]) ub = concatenate([ub, model_microgrids[i]["ub"]]) c = concatenate([c, model_microgrids[i]["c"]]) q = concatenate([q, model_microgrids[i]["q"]]) vtypes += model_microgrids[i]["vtypes"] beq = concatenate([beq, model_microgrids[i]["beq"]]) Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_microgrids[i]["Aeq"] # Add coupling constraints, between the microgrids and distribution networks Ay2x = lil_matrix((2 * nmg * T, nv_index[-1] - nv_index[0])) for i in range(T): for j in range(nmg): Ay2x[i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + PUG] = -1
""" Stochastic optimal power flow with multiple microgrids and mobile energy storage systems @author: Zhao Tianyang @e-mail: [email protected] @date: 10 Jan 2019 Major updates: 1) Update code style using PEP 8 -- Style Guide for Python Code 2) Store data in database 3) Scenario generation and reduction 4) Automatic results analysis Nomenclature: nV: number of variables mg: microgrid ds: distribution systems me: mobile energy storage systems ch: charging dc: discharging ele: electricity tra: traffic i,j,k: index t: time index T: time periods tns:traffic networks pns:power networks """ class StochasticDynamicOptimalPowerFlowTess(): def __init__(self): self.name = "Unit commitment with tess" def main(self, power_networks, micro_grids, profile, pv_profile, mess, traffic_networks, ns=100): """ Main entrance for network reconfiguration problems :param case: electric network information :param profile: load profile within the distribution networks :param micrgrids: dictionary for microgrids :param tess: dictionary for tess :return: network reconfiguration, distribution network status, and microgrid status """ T = len(profile) # Time spans self.T = T nmg = len(micro_grids) # Number of microgrids self.nmg = nmg nmes = len(mess) # Number of mobile energy storage systems self.nmes = nmes nb_tra = traffic_networks["bus"].shape[0] # Number of buses in the transportation networks self.nb_tra = nb_tra assert nb_tra == nmg, "The microgrids within the transportation networks are not synchronized!" # 1) Formulate the first stage optimization problem model_first_stage = self.first_stage_problem_formualtion(pns=power_networks, mgs=micro_grids, mess=mess, tns=traffic_networks) # (sol_first_stage, obj, success) = milp(model_first_stage["c"], Aeq=model_first_stage["Aeq"], # beq=model_first_stage["beq"], # A=model_first_stage["A"], b=model_first_stage["b"], # vtypes=model_first_stage["vtypes"], # xmax=model_first_stage["ub"], xmin=model_first_stage["lb"]) # sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 2) Formulate the second stage optimization problem # Formulate the second stage scenarios (ds_second_stage, mgs_second_stage, weight) = self.scenario_generation_reduction(profile=profile, micro_grids=micro_grids, ns=ns, pns=power_networks, pv_profile=pv_profile, ns_reduced=round(0.98 * ns)) ns -= round(0.98 * ns) model_second_stage = {} for i in range(ns): model_second_stage[i] = self.second_stage_problem_formualtion(pns=power_networks, mgs=mgs_second_stage[i], mess=mess, tns=traffic_networks, profile=ds_second_stage[i, :], index=i, weight=weight[i]) # 3) Merge the first-stage problem and second stage problem lb = model_first_stage["lb"] ub = model_first_stage["ub"] vtypes = model_first_stage["vtypes"] c = model_first_stage["c"] Qc = dict() if model_first_stage["Aeq"] is not None: neq = model_first_stage["Aeq"].shape[0] else: neq = 0 if model_first_stage["A"] is not None: nineq = model_first_stage["A"].shape[0] else: nineq = 0 nv_first_stage = self.nv_first_stage nv_second_stage = self.nv_second_stage q = zeros(nv_first_stage) nv_index = zeros(ns + 1).astype(int) neq_index = zeros(ns + 1).astype(int) nineq_index = zeros(ns + 1).astype(int) neq_index[0] = neq nineq_index[0] = nineq nv_index[0] = nv_first_stage beq = model_first_stage["beq"] for i in range(ns): if model_second_stage[i]["Aeq"] is not None: neq_index[i + 1] = neq_index[i] + model_second_stage[i]["Aeq"].shape[0] else: neq_index[i + 1] = neq_index[i] if model_second_stage[i]["Ts"] is not None: nineq_index[i + 1] = nineq_index[i] + model_second_stage[i]["Ts"].shape[0] else: nineq_index[i + 1] = nineq_index[i] nv_index[i + 1] = nv_index[i] + nv_second_stage c = concatenate([c, model_second_stage[i]["c"]]) q = concatenate([q, model_second_stage[i]["q"]]) lb = concatenate([lb, model_second_stage[i]["lb"]]) ub = concatenate([ub, model_second_stage[i]["ub"]]) vtypes += model_second_stage[i]["vtypes"] beq = concatenate([beq, model_second_stage[i]["beq"]]) Aeq_full = lil_matrix((neq_index[-1], nv_index[-1])) Aeq_full[0:neq_index[0], 0:nv_index[0]] = model_first_stage["Aeq"] rc = zeros(0) for i in range(ns): Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Aeq"] Qc.update(model_second_stage[i]["Qc"]) rc = concatenate([rc, model_second_stage[i]["rc"]]) A_full = lil_matrix((nineq_index[-1], nv_index[-1])) b = model_first_stage["b"] A_full[0:int(nineq_index[0]), 0:int(nv_index[0])] = model_first_stage["A"] for i in range(ns): A_full[nineq_index[i]:nineq_index[i + 1], 0:nv_index[0]] = model_second_stage[i]["Ts"] A_full[nineq_index[i]:nineq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_second_stage[i]["Ws"] b = concatenate([b, model_second_stage[i]["hs"]]) # 3) Obtain the results for first-stage and second stage optimization problems # 3.1) Obtain the integrated solution (sol, obj, success) = miqcp(c, q, Aeq=Aeq_full, beq=beq, A=A_full, b=b, Qc=Qc, rc=rc, xmin=lb, xmax=ub, vtypes=vtypes) # 3.2) decouple the solution into multiple subsystems sol_first_stage = sol[0:nv_second_stage] sol_second_stage = {} for i in range(ns): sol_second_stage[i] = sol[int(nv_index[i]):int(nv_index[i + 1])] # 4) Verify the first-stage and second stage optization problem # 4.1) First-stage solution sol_first_stage = self.first_stage_solution_validation(sol=sol_first_stage) # 4.2) Second-stage solution sol_second_stage_checked = {} db_management = DataBaseManagement() db_management.create_table(table_name="distribution_networks", nl=self.nl, nb=self.nb, ng=self.ng) db_management.create_table(table_name="micro_grids", nmg=self.nmg) db_management.create_table(table_name="mobile_energy_storage_systems", nmg=self.nmg) db_management.create_table(table_name="first_stage_solutions", nmg=self.nmg, ng=self.ng) db_management.create_table(table_name="fisrt_stage_mess", nmg=self.nmg) for t in range(T): db_management.insert_data_first_stage(table_name="first_stage_solutions", time=t, ng=self.ng, nmg=self.nmg, pg=sol_first_stage["pg"][:, t].tolist(), rg=sol_first_stage["rg"][:, t].tolist(), pg_mg=sol_first_stage["pg_mg"][:, t].tolist(), rg_mg=sol_first_stage["rg_mg"][:, t].tolist(), pess_ch=sol_first_stage["pess_ch"][:, t].tolist(), pess_dc=sol_first_stage["pess_dc"][:, t].tolist(), ress=sol_first_stage["ress"][:, t].tolist(), ess=sol_first_stage["eess"][:, t].tolist(), iess=sol_first_stage["iess"][:, t].tolist()) for i in range(nmes): for t in range(T): db_management.insert_data_first_stage_mess(table_name="fisrt_stage_mess", nmg=self.nmg, time=t, mess=i, imess=sol_first_stage["MESS"][i]["idc"][:, t].tolist(), rmess=sol_first_stage["MESS"][i]["rmess"][:, t].tolist(), pmess_ch= sol_first_stage["MESS"][i]["pmess_ch"][:, t].tolist(), pmess_dc= sol_first_stage["MESS"][i]["pmess_dc"][:, t].tolist(), mess_f_stop=sol_first_stage["MESS"][i]["VRP"][t + 1][0], mess_t_stop=sol_first_stage["MESS"][i]["VRP"][t + 1][1]) for i in range(ns): sol_second_stage_checked[i] = self.second_stage_solution_validation(sol_second_stage[i]) for i in range(ns): for t in range(T): db_management.insert_data_ds(table_name="distribution_networks", nl=self.nl, nb=self.nb, ng=self.ng, scenario=i, time=t, pij=sol_second_stage_checked[i]["DS"]["pij"][:, t].tolist(), qij=sol_second_stage_checked[i]["DS"]["qij"][:, t].tolist(), lij=sol_second_stage_checked[i]["DS"]["lij"][:, t].tolist(), vi=sol_second_stage_checked[i]["DS"]["vi"][:, t].tolist(), pg=sol_second_stage_checked[i]["DS"]["pg"][:, t].tolist(), qg=sol_second_stage_checked[i]["DS"]["qg"][:, t].tolist(), ) for i in range(ns): for j in range(nmg): for t in range(T): db_management.insert_data_mg(table_name="micro_grids", scenario=i, time=t, mg=j, pg=sol_second_stage_checked[i]["MG"]["pg"][j, t], qg=sol_second_stage_checked[i]["MG"]["qg"][j, t], pug=sol_second_stage_checked[i]["MG"]["pug"][j, t], qug=sol_second_stage_checked[i]["MG"]["qug"][j, t], pbic_ac2dc=sol_second_stage_checked[i]["MG"]["pbic_ac2dc"][j, t], pbic_dc2ac=sol_second_stage_checked[i]["MG"]["pbic_dc2ac"][j, t], qbic=sol_second_stage_checked[i]["MG"]["qbic"][j, t], pess_ch=sol_second_stage_checked[i]["MG"]["pess_ch"][j, t], pess_dc=sol_second_stage_checked[i]["MG"]["pess_dc"][j, t], eess=sol_second_stage_checked[i]["MG"]["eess"][j, t], pmess=sol_second_stage_checked[i]["MG"]["pmess"][j, t], ppv=sol_second_stage_checked[i]["MG"]["ppv"][j, t]) for i in range(ns): for j in range(nmes): for t in range(T): db_management.insert_data_mess(table_name="mobile_energy_storage_systems", scenario=i, time=t, mess=j, nmg=self.nmg, pmess_dc= sol_second_stage_checked[i]["MESS"][j]["pmess_dc"][:, t].tolist(), pmess_ch= sol_second_stage_checked[i]["MESS"][j]["pmess_ch"][:, t].tolist(), emess=sol_second_stage_checked[i]["MESS"][j]["emess"][0, t]) # 4.3) Cross validation of the first-stage and second-stage decision variables tess_check = {} for i in range(ns): tess_temp = {} for j in range(nmes): tess_temp[j] = sol_second_stage_checked[i]["MESS"][j]["pmess_dc"] - \ sol_second_stage_checked[i]["MESS"][j]["pmess_ch"] - \ sol_first_stage["MESS"][j]["pmess_dc"] + \ sol_first_stage["MESS"][j]["pmess_ch"] - \ sol_first_stage["MESS"][j]["rmess"] tess_temp[j + nmes] = sol_second_stage_checked[i]["MESS"][j]["pmess_ch"] - \ sol_second_stage_checked[i]["MESS"][j]["pmess_dc"] - \ sol_first_stage["MESS"][j]["pmess_ch"] + \ sol_first_stage["MESS"][j]["pmess_dc"] - \ sol_first_stage["MESS"][j]["rmess"] tess_check[i] = tess_temp # return sol_distribution_network, sol_microgrids, sol_tess return sol_first_stage, sol_second_stage_checked def first_stage_problem_formualtion(self, pns, mgs, mess, tns): """ Problem formulation for the first stage optimization, Decision variables include, DGs within power networks, DGs within MGs, EESs within MGs and TESSs :param power_networks: Parameters for the power networks :param micro_grids: Parameters for the microgrids :param tess: Parameters for the mobile energy storage systems :param traffic_networks: Parameters for the transportation networks :return: Formulated first-stage problem """ T = self.T # Time slots nmg = self.nmg # Number of mgs nmes = self.nmes # Number of tess mpc = ext2int(pns) baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"] ng = shape(mpc['gen'])[0] ## number of dispatchable injections nb = shape(mpc["bus"])[0] self.nb = nb self.ng = ng # Obtain the initial status, start-up and shut down of generators Ig0 = gen[:, -1].astype(int) MIN_DOWN = gen[:, -2].astype(int) MIN_UP = gen[:, -3].astype(int) alpha_l = zeros(ng) beta_l = zeros(ng) Ig_l = zeros(ng) pg_l = zeros(ng) # Boundary for DGs within distribution networks rg_l = zeros(ng) alpha_u = ones(ng) beta_u = ones(ng) Ig_u = ones(ng) pg_u = gen[:, PMAX] / baseMVA rg_u = gen[:, PMAX] / baseMVA c_alpha = gencost[:, 0] c_beta = gencost[:, 1] c_ig = gencost[:, 6] cg = gencost[:, 5] * baseMVA cr = zeros(ng) pg_mg_l = zeros(nmg) # Boundary for DGs within MGs rg_mg_l = zeros(nmg) pg_mg_u = zeros(nmg) rg_mg_u = zeros(nmg) cg_mg = zeros(nmg) cr_mg = zeros(nmg) for i in range(nmg): pg_mg_l[i] = mgs[i]["DG"]["PMIN"] pg_mg_u[i] = mgs[i]["DG"]["PMAX"] rg_mg_u[i] = mgs[i]["DG"]["PMAX"] cg_mg[i] = mgs[i]["DG"]["COST_B"] pes_ch_l = zeros(nmg) # Lower boundary for ESSs within MGs pes_dc_l = zeros(nmg) ees_l = zeros(nmg) res_l = zeros(nmg) ies_l = zeros(nmg) pes_ch_u = zeros(nmg) # Upper boundary for ESSs within MGs pes_dc_u = zeros(nmg) ees_u = zeros(nmg) res_u = zeros(nmg) ies_u = ones(nmg) ces_ch = zeros(nmg) # Cost boundary for ESSs within MGs ces_dc = zeros(nmg) ces_r = zeros(nmg) ces = zeros(nmg) ces_i = zeros(nmg) for i in range(nmg): pes_ch_u[i] = mgs[i]["ESS"]["PCH_MAX"] pes_dc_u[i] = mgs[i]["ESS"]["PDC_MAX"] + mgs[i]["ESS"]["PCH_MAX"] res_u[i] = mgs[i]["ESS"]["PCH_MAX"] ees_l[i] = mgs[i]["ESS"]["EMIN"] ees_u[i] = mgs[i]["ESS"]["EMAX"] _nv_first_stage = ng * 5 + nmg * 2 + nmg * 5 nv_first_stage = _nv_first_stage * T # Formulate the boundaries lb = concatenate( [tile(concatenate( [alpha_l, beta_l, Ig_l, pg_l, rg_l, pg_mg_l, rg_mg_l, pes_ch_l, pes_dc_l, res_l, ees_l, ies_l]), T)]) ub = concatenate( [tile(concatenate( [alpha_u, beta_u, Ig_u, pg_u, rg_u, pg_mg_u, rg_mg_u, pes_ch_u, pes_dc_u, res_u, ees_u, ies_u]), T)]) # Objective value c = concatenate( [tile(concatenate([c_alpha, c_beta, c_ig, cg, cr, cg_mg, cr_mg, ces_ch, ces_dc, ces, ces_r, ces_i]), T)]) # Variable types vtypes = (["b"] * ng * 3 + ["c"] * (ng * 2 + nmg * 2 + nmg * 4) + ["b"] * nmg) * T ## Constraint sets # 1) Pg+Rg<=PguIg A = lil_matrix((ng * T, nv_first_stage)) b = zeros(ng * T) for t in range(T): for j in range(ng): A[t * ng + j, t * _nv_first_stage + ng * 3 + j] = 1 A[t * ng + j, t * _nv_first_stage + ng * 4 + j] = 1 A[t * ng + j, t * _nv_first_stage + ng * 2 + j] = -pg_u[j] # 2) Pg-Rg>=IgPgl A_temp = lil_matrix((ng * T, nv_first_stage)) b_temp = zeros(ng * T) for t in range(T): for j in range(ng): A_temp[t * ng + j, t * _nv_first_stage + ng * 3 + j] = -1 A_temp[t * ng + j, t * _nv_first_stage + ng * 4 + j] = 1 A_temp[t * ng + j, t * _nv_first_stage + j] = pg_l[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 3) Start-up and shut-down constraints of DGs UP_LIMIT = zeros(ng).astype(int) DOWN_LIMIT = zeros(ng).astype(int) for i in range(ng): UP_LIMIT[i] = T - MIN_UP[i] DOWN_LIMIT[i] = T - MIN_DOWN[i] # 3.1) Up limit A_temp = lil_matrix((sum(UP_LIMIT), nv_first_stage)) b_temp = zeros(sum(UP_LIMIT)) for i in range(ng): for t in range(MIN_UP[i], T): for k in range(t - MIN_UP[i], t): A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], k * _nv_first_stage + i] = 1 A_temp[sum(UP_LIMIT[0:i]) + t - MIN_UP[i], t * _nv_first_stage + ng * 2 + i] = -1 A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # # 3.2) Down limit A_temp = lil_matrix((sum(DOWN_LIMIT), nv_first_stage)) b_temp = ones(sum(DOWN_LIMIT)) for i in range(ng): for t in range(MIN_DOWN[i], T): for k in range(t - MIN_DOWN[i], t): A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], k * _nv_first_stage + ng + i] = 1 A_temp[sum(DOWN_LIMIT[0:i]) + t - MIN_DOWN[i], t * _nv_first_stage + ng * 2 + i] = 1 A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 4) Status transformation of each unit Aeq = lil_matrix((T * ng, nv_first_stage)) beq = zeros(T * ng) for i in range(ng): for t in range(T): Aeq[i * T + t, t * _nv_first_stage + i] = 1 Aeq[i * T + t, t * _nv_first_stage + ng + i] = -1 Aeq[i * T + t, t * _nv_first_stage + ng * 2 + i] = -1 if t != 0: Aeq[i * T + t, (t - 1) * _nv_first_stage + ng * 2 + i] = 1 else: beq[i * T + t] = -Ig0[i] # 3) Pg_mg+Rg_mg<=Pg_mg_u A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg + j] = 1 b_temp[t * nmg + j] = pg_mg_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 4) Pg_mg-Rg_mg<=Pg_mg_l A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg + j] = 1 b_temp[t * nmg + j] = pg_mg_l[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 5) Pess_dc-Pess_ch+Ress<=Pess_dc_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 2 + j] = 1 b_temp[t * nmg + j] = pes_dc_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 6) Pess_ch-Pess_dc+Ress<=Pess_ch_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = -1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 2 + j] = 1 b_temp[t * nmg + j] = pes_ch_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 7) Energy storage balance equation Aeq_temp = lil_matrix((T * nmg, nv_first_stage)) beq_temp = zeros(T * nmg) for t in range(T): for j in range(nmg): Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 3 + j] = 1 Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = -mgs[j]["ESS"]["EFF_CH"] Aeq_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 / mgs[j]["ESS"]["EFF_DC"] if t == 0: beq_temp[t * nmg + j] = mgs[j]["ESS"]["E0"] else: Aeq_temp[t * nmg + j, (t - 1) * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 3 + j] = -1 Aeq = vstack([Aeq, Aeq_temp]) beq = concatenate([beq, beq_temp]) # 8) Pess_ch<=I*Pess_ch_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 4 + j] = -pes_ch_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 9) Pess_dc<=(1-I)*Pess_dc_max A_temp = lil_matrix((nmg * T, nv_first_stage)) b_temp = zeros(nmg * T) for t in range(T): for j in range(nmg): A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg + j] = 1 A_temp[t * nmg + j, t * _nv_first_stage + ng * 5 + nmg * 2 + nmg * 4 + j] = pes_dc_u[j] b_temp[t * nmg + j] = pes_dc_u[j] A = vstack([A, A_temp]) b = concatenate([b, b_temp]) # 2) Transportation energy storage systems problem model_mess = {} for i in range(nmes): model_mess[i] = self.problem_formulation_tess(mess=mess[i], tns=tns) # 3) Merge the DGs, ESSs and TESSs neq = Aeq.shape[0] nineq = A.shape[0] nV_index = zeros(nmes + 1).astype(int) neq_index = zeros(nmes + 1).astype(int) nineq_index = zeros(nmes + 1).astype(int) nV_index[0] = nv_first_stage neq_index[0] = neq nineq_index[0] = nineq for i in range(nmes): nV_index[i + 1] = nV_index[i] + len(model_mess[i]["c"]) neq_index[i + 1] = neq_index[i] + model_mess[i]["Aeq"].shape[0] nineq_index[i + 1] = nineq_index[i] + model_mess[i]["A"].shape[0] neq += model_mess[i]["Aeq"].shape[0] nineq += model_mess[i]["A"].shape[0] # Merge the objective function, boundaries, types and rhs c = concatenate([c, model_mess[i]["c"]]) lb = concatenate([lb, model_mess[i]["lb"]]) ub = concatenate([ub, model_mess[i]["ub"]]) vtypes += model_mess[i]["vtypes"] beq = concatenate([beq, model_mess[i]["beq"]]) b = concatenate([b, model_mess[i]["b"]]) A_full = lil_matrix((nineq_index[-1], nV_index[-1])) Aeq_full = lil_matrix((neq_index[-1], nV_index[-1])) if Aeq is not None: Aeq_full[0:int(neq_index[0]), 0:int(nV_index[0])] = Aeq if A is not None: A_full[0:int(nineq_index[0]), 0:int(nV_index[0])] = A for i in range(nmes): Aeq_full[neq_index[i]:neq_index[i + 1], nV_index[i]:nV_index[i + 1]] = model_mess[i]["Aeq"] A_full[nineq_index[i]:nineq_index[i + 1], nV_index[i]:nV_index[i + 1]] = model_mess[i]["A"] self.nv_first_stage = nV_index[-1] # The number of first stage decision variables self._nv_first_stage = _nv_first_stage model_first_stage = {"c": c, "lb": lb, "ub": ub, "vtypes": vtypes, "A": A_full, "b": b, "Aeq": Aeq_full, "beq": beq, } return model_first_stage def first_stage_solution_validation(self, sol): """ Validation of the first-stage solution :param sol: The first stage solution :return: the first stage solution """ T = self.T ng = self.ng nmg = self.nmg nmes = self.nmes # Set-points of DGs within DSs, MGs and ESSs _nv_first_stage = self._nv_first_stage alpha = zeros((ng, T)) beta = zeros((ng, T)) Ig = zeros((ng, T)) Pg = zeros((ng, T)) Rg = zeros((ng, T)) Pg_mg = zeros((nmg, T)) Rg_mg = zeros((nmg, T)) Pess_dc = zeros((nmg, T)) Pess_ch = zeros((nmg, T)) Ress = zeros((nmg, T)) Eess = zeros((nmg, T)) Iess = zeros((nmg, T)) for i in range(T): alpha[:, i] = sol[_nv_first_stage * i:_nv_first_stage * i + ng] beta[:, i] = sol[_nv_first_stage * i + ng:_nv_first_stage * i + ng * 2] Ig[:, i] = sol[_nv_first_stage * i + ng * 2:_nv_first_stage * i + ng * 3] Pg[:, i] = sol[_nv_first_stage * i + ng * 3:_nv_first_stage * i + ng * 4] Rg[:, i] = sol[_nv_first_stage * i + ng * 4:_nv_first_stage * i + ng * 5] Pg_mg[:, i] = sol[_nv_first_stage * i + ng * 5:_nv_first_stage * i + ng * 5 + nmg] Rg_mg[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg:_nv_first_stage * i + ng * 5 + nmg * 2] Pess_ch[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 2:_nv_first_stage * i + ng * 5 + nmg * 3] Pess_dc[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 3:_nv_first_stage * i + ng * 5 + nmg * 4] Ress[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 4:_nv_first_stage * i + ng * 5 + nmg * 5] Eess[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 5:_nv_first_stage * i + ng * 5 + nmg * 6] Iess[:, i] = sol[_nv_first_stage * i + ng * 5 + nmg * 6:_nv_first_stage * i + ng * 5 + nmg * 7] # Set-points and scheduling of mobile energy storage systems nv_tra = self.nv_tra nl_traffic = self.nl_tra n_stops = self.n_stops nb_tra_ele = self.nb_tra_ele sol_ev = {} for i in range(nmes): ev_temp = {} ev_temp["VRP"] = [] for t in range(nl_traffic): if sol[_nv_first_stage * T + nv_tra * i + t] > 0: # obtain the solution for vrp if self.connection_matrix[t, TIME] > 0: for j in range(int(self.connection_matrix[t, TIME])): ev_temp["VRP"].append(((self.connection_matrix[t, F_BUS] - 1) % nmg, (self.connection_matrix[t, T_BUS] - 1) % nmg)) else: ev_temp["VRP"].append(((self.connection_matrix[t, F_BUS] - 1) % nmg, (self.connection_matrix[t, T_BUS] - 1) % nmg)) ev_temp["idc"] = zeros((nb_tra_ele, T)) ev_temp["pmess_dc"] = zeros((nb_tra_ele, T)) ev_temp["pmess_ch"] = zeros((nb_tra_ele, T)) ev_temp["rmess"] = zeros((nb_tra_ele, T)) for t in range(T): for k in range(nb_tra_ele): ev_temp["idc"][k, t] = sol[_nv_first_stage * T + nv_tra * i + nl_traffic + nb_tra_ele * t + k] ev_temp["pmess_dc"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops + nb_tra_ele * t + k] ev_temp["pmess_ch"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops * 2 + nb_tra_ele * t + k] ev_temp["rmess"][k, t] = \ sol[_nv_first_stage * T + nv_tra * i + nl_traffic + n_stops * 3 + nb_tra_ele * t + k] sol_ev[i] = ev_temp sol_first_stage = {"alpha": alpha, "beta": beta, "ig": Ig, "rg": Rg, "pg": Pg, "pg_mg": Pg_mg, "rg_mg": Rg_mg, "pess_ch": Pess_ch, "pess_dc": Pess_dc, "ress": Ress, "eess": Eess, "iess": Iess, "MESS": sol_ev, } return sol_first_stage def second_stage_problem_formualtion(self, pns, mgs, mess, tns, profile, index=0, weight=1): """ Second-stage problem formulation, the decision variables includes DGs within power networks, DGs within MGs, EESs within MGs and TESSs and other systems' information :param power_networks: :param micro_grids: :param tess: :param traffic_networks: :return: The second stage problems as list, including coupling constraints, and other constraint set """ # I) Formulate the problem for distribution systems operator T = self.T mpc = ext2int(pns) baseMVA, bus, gen, branch, gencost = mpc["baseMVA"], mpc["bus"], mpc["gen"], mpc["branch"], mpc["gencost"] nb = shape(mpc['bus'])[0] ## number of buses nl = shape(mpc['branch'])[0] ## number of branches ng = shape(mpc['gen'])[0] ## number of dispatchable injections nd = sum(bus[:,PD]>0) nmg = self.nmg nmes = self.nmes self.nl = nl self.nb = nb self.ng = ng self.nd = nd m = zeros(nmg) ## list of integration index pmg_l = zeros(nmg) ## list of lower boundary pmg_u = zeros(nmg) ## list of upper boundary qmg_l = zeros(nmg) ## list of lower boundary qmg_u = zeros(nmg) ## list of upper boundary for i in range(nmg): m[i] = mgs[i]["BUS"] pmg_l[i] = mgs[i]["UG"]["PMIN"] / 1000 / baseMVA pmg_u[i] = mgs[i]["UG"]["PMAX"] / 1000 / baseMVA qmg_l[i] = mgs[i]["UG"]["QMIN"] / 1000 / baseMVA qmg_u[i] = mgs[i]["UG"]["QMAX"] / 1000 / baseMVA f = branch[:, F_BUS] ## list of "from" buses t = branch[:, T_BUS] ## list of "to" buses d = bus[bus[:,PD]>0, BUS_I].astype(int) ## list of "to" buses i = range(nl) ## double set of row indices self.f = f ## record from bus for each branch # Connection matrix Cf = sparse((ones(nl), (i, f)), (nl, nb)) Ct = sparse((ones(nl), (i, t)), (nl, nb)) Cd = sparse((ones(nd), (d, range(nd))), (nb, nd)) Cg = sparse((ones(ng), (gen[:, GEN_BUS], range(ng))), (nb, ng)) Cmg = sparse((ones(nmg), (m, range(nmg))), (nb, nmg)) Branch_R = branch[:, BR_R] Branch_X = branch[:, BR_X] Cf = Cf.T Ct = Ct.T # Obtain the boundary information slmax = branch[:, RATE_A] / baseMVA pij_l = -slmax qij_l = -slmax lij_l = zeros(nl) vm_l = bus[:, VMIN] ** 2 pg_l = gen[:, PMIN] / baseMVA qg_l = gen[:, QMIN] / baseMVA pd_l = zeros(nd) pij_u = slmax qij_u = slmax lij_u = slmax vm_u = bus[:, VMAX] ** 2 pg_u = gen[:, PMAX] / baseMVA qg_u = gen[:, QMAX] / baseMVA pd_u = bus[d, PD] / baseMVA _nv_second_stage = int(3 * nl + nb + 2 * ng + 2 * nmg + nd) self._nv_second_stage = _nv_second_stage # Number of decision variable within each time slot lb = concatenate([tile(concatenate([pij_l, qij_l, lij_l, vm_l, pg_l, qg_l, pmg_l, qmg_l, pd_l]), T)]) ub = concatenate([tile(concatenate([pij_u, qij_u, lij_u, vm_u, pg_u, qg_u, pmg_u, qmg_u, pd_u]), T)]) vtypes = ["c"] * _nv_second_stage * T nv_ds = _nv_second_stage * T # Number of total decision variables # Add system level constraints # 1) Active power balance Aeq_p = lil_matrix((nb * T, nv_ds)) beq_p = zeros(nb * T) for i in range(T): Aeq_p[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([Ct - Cf, zeros((nb, nl)), -diag(Ct * Branch_R) * Ct, zeros((nb, nb)), Cg, zeros((nb, ng)), -Cmg, zeros((nb, nmg)), Cd]) beq_p[i * nb:(i + 1) * nb] = profile[i * nb:(i + 1) * nb] / baseMVA # 2) Reactive power balance Aeq_q = lil_matrix((nb * T, nv_ds)) beq_q = zeros(nb * T) for i in range(T): Aeq_q[i * nb:(i + 1) * nb, i * _nv_second_stage: (i + 1) * _nv_second_stage] = \ hstack([zeros((nb, nl)), Ct - Cf, -diag(Ct * Branch_X) * Ct, zeros((nb, nb)), zeros((nb, ng)), Cg, zeros((nb, nmg)), -Cmg, Cd.dot(diag(bus[d,QD]/bus[d,PD]))]) for j in range(nb): if bus[j, PD] > 0: beq_q[i * nb:(i + 1) * nb] = profile[i * nb + j] / bus[j, PD] * bus[j, QD] / baseMVA # 3) KVL equation Aeq_kvl = lil_matrix((nl * T, nv_ds)) beq_kvl = zeros(nl * T) for i in range(T): Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage: i * _nv_second_stage + nl] = -2 * diag(Branch_R) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + nl: i * _nv_second_stage + 2 * nl] = -2 * diag(Branch_X) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 2 * nl: i * _nv_second_stage + 3 * nl] = diag( Branch_R ** 2) + diag(Branch_X ** 2) Aeq_kvl[i * nl:(i + 1) * nl, i * _nv_second_stage + 3 * nl:i * _nv_second_stage + 3 * nl + nb] = ( Cf.T - Ct.T).toarray() Aeq = vstack([Aeq_p, Aeq_q, Aeq_kvl]) beq = concatenate([beq_p, beq_q, beq_kvl]) c = zeros(nv_ds) q = zeros(nv_ds) c0 = 0 for t in range(T): for i in range(ng): c[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 5] * baseMVA q[t * _nv_second_stage + i + 3 * nl + nb] = gencost[i, 4] * baseMVA * baseMVA c0 += gencost[i, 6] for i in range(nd): c[t * _nv_second_stage + i + 3 * nl + nb + 2 * ng + 2 * nmg] = Voll * baseMVA# The load shedding cost # Coupling constraints between the distribution systems and micro_grids Ax2y = lil_matrix((2 * nmg * T, nv_ds)) # connection matrix with the microgrids for i in range(T): for j in range(nmg): # Active power Ax2y[i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + j] = 1000 * baseMVA # Reactive power Ax2y[nmg * T + i * nmg + j, i * _nv_second_stage + 3 * nl + nb + 2 * ng + nmg + j] = 1000 * baseMVA # II) Formulate the problem for microgrids model_microgrids = {} for i in range(nmg): model_microgrids[i] = self.problem_formulation_microgrid(mg=mgs[i], mess=mess) # II.A) Combine the distribution system operation problem and microgrid systems if Aeq is not None: neq_ds = Aeq.shape[0] else: neq_ds = 0 nVariables = int(nv_ds) neq = int(neq_ds) nv_index = zeros(nmg + 1).astype(int) neq_index = zeros(nmg + 1).astype(int) nv_index[0] = nv_ds neq_index[0] = int(neq_ds) for i in range(nmg): nv_index[i + 1] = nv_index[i] + len(model_microgrids[i]["c"]) neq_index[i + 1] = neq_index[i] + model_microgrids[i]["Aeq"].shape[0] nVariables += len(model_microgrids[i]["c"]) neq += int(model_microgrids[i]["Aeq"].shape[0]) Aeq_full = lil_matrix((int(neq_index[-1]), int(nv_index[-1]))) Aeq_full[0:neq_ds, 0:nv_ds] = Aeq for i in range(nmg): lb = concatenate([lb, model_microgrids[i]["lb"]]) ub = concatenate([ub, model_microgrids[i]["ub"]]) c = concatenate([c, model_microgrids[i]["c"]]) q = concatenate([q, model_microgrids[i]["q"]]) vtypes += model_microgrids[i]["vtypes"] beq = concatenate([beq, model_microgrids[i]["beq"]]) Aeq_full[neq_index[i]:neq_index[i + 1], nv_index[i]:nv_index[i + 1]] = model_microgrids[i]["Aeq"] # Add coupling constraints, between the microgrids and distribution networks Ay2x = lil_matrix((2 * nmg * T, nv_index[-1] - nv_index[0])) for i in range(T): for j in range(nmg): Ay2x[i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + PUG] = -1
Ay2x[nmg * T + i * nmg + j, int(nv_index[j] - nv_index[0]) + i * NX_MG + QUG] = -1
12
2023-11-27 15:57:53+00:00
24k
girgle/DouZero_For_New_HLDDZ
main.py
[ { "identifier": "GameHelper", "path": "GameHelper.py", "snippet": "class GameHelper:\n def __init__(self):\n self.ScreenZoomRate = None\n self.counter = QTime()\n self.Pics = {}\n self.PicsCV = {}\n st = time.time()\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n self.Interrupt = False\n self.RealRate = (1440, 810)\n self.GetZoomRate()\n for file in os.listdir(\"./pics\"):\n info = file.split(\".\")\n if info[1] == \"png\":\n tmpImage = Image.open(\"./pics/\" + file)\n imgCv = cv2.imread(\"./pics/\" + file)\n self.Pics.update({info[0]: tmpImage})\n self.PicsCV.update({info[0]: imgCv})\n\n def sleep(self, ms):\n self.counter.restart()\n while self.counter.elapsed() < ms:\n QtWidgets.QApplication.processEvents(QEventLoop.AllEvents, 50)\n\n def Screenshot(self, region=None): # -> (im, (left, top))\n try_count = 3\n success = False\n while try_count > 0 and not success:\n try:\n try_count -= 1\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n hwnd = self.Handle\n left, top, right, bot = win32gui.GetWindowRect(hwnd)\n width = right - left\n height = bot - top\n self.RealRate = (width, height)\n width = int(width)\n height = int(height)\n hwndDC = win32gui.GetWindowDC(hwnd)\n mfcDC = win32ui.CreateDCFromHandle(hwndDC)\n saveDC = mfcDC.CreateCompatibleDC()\n saveBitMap = win32ui.CreateBitmap()\n saveBitMap.CreateCompatibleBitmap(mfcDC, width, height)\n saveDC.SelectObject(saveBitMap)\n result = windll.user32.PrintWindow(hwnd, saveDC.GetSafeHdc(), 3)\n bmpinfo = saveBitMap.GetInfo()\n bmpstr = saveBitMap.GetBitmapBits(True)\n im = Image.frombuffer(\n \"RGB\",\n (bmpinfo['bmWidth'], bmpinfo['bmHeight']),\n bmpstr, 'raw', 'BGRX', 0, 1)\n win32gui.DeleteObject(saveBitMap.GetHandle())\n saveDC.DeleteDC()\n mfcDC.DeleteDC()\n win32gui.ReleaseDC(hwnd, hwndDC)\n im = im.resize((1440, 810))\n if region is not None:\n im = im.crop((region[0], region[1], region[0] + region[2], region[1] + region[3]))\n if result:\n success = True\n return im, (left, top)\n except Exception as e:\n print(\"截图时出现错误:\", repr(e))\n self.sleep(200)\n return None, (0, 0)\n\n def GetZoomRate(self):\n self.ScreenZoomRate = ctypes.windll.shcore.GetScaleFactorForDevice(0) / 100\n\n def LocateOnScreen(self, templateName, region, confidence=0.8, img=None):\n if img is not None:\n image = img\n else:\n image, _ = self.Screenshot()\n imgcv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n return LocateOnImage(imgcv, self.PicsCV[templateName], region=region, confidence=confidence)\n\n def ClickOnImage(self, templateName, region=None, confidence=0.8, img=None):\n if img is not None:\n image = img\n else:\n image, _ = self.Screenshot()\n imgcv = cv2.cvtColor(np.asarray(image), cv2.COLOR_RGB2BGR)\n result = LocateOnImage(imgcv, self.PicsCV[templateName], region=region, confidence=confidence)\n\n if result is not None:\n self.LeftClick(result)\n print(result)\n\n def LeftClick(self, pos):\n x, y = pos\n x = (x / 1440) * self.RealRate[0]\n y = (y / 810) * self.RealRate[1]\n x = int(x)\n y = int(y)\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n left, top, _, _ = win32gui.GetWindowRect(self.Handle)\n x, y = int(left + x), int(top + y)\n\n pyautogui.mouseDown(x, y, button='left')\n time.sleep(0.1)\n pyautogui.mouseUp(x, y, button='left')\n time.sleep(0.1)\n pyautogui.moveTo(int(left + 1000), int(top + 550))\n\n '''win32gui.SetActiveWindow(self.Handle)\n lParam = win32api.MAKELONG(x, y)\n\n win32gui.PostMessage(self.Handle, WM_ACTIVATE, WA_ACTIVE, lParam)\n win32gui.PostMessage(self.Handle, WM_ACTIVATE, WA_ACTIVE, lParam)\n win32gui.PostMessage(self.Handle, WM_MOUSEMOVE, MK_LBUTTON, lParam)\n win32gui.PostMessage(self.Handle, WM_LBUTTONDOWN, MK_LBUTTON, lParam)\n win32gui.PostMessage(self.Handle, WM_LBUTTONUP, MK_LBUTTON, lParam)'''\n\n def LeftClick2(self, pos):\n x, y = pos\n x = (x / 1440) * self.RealRate[0]\n y = (y / 810) * self.RealRate[1]\n x = int(x)\n y = int(y)\n self.Handle = win32gui.FindWindow(\"UnityWndClass\", None)\n left, top, _, _ = win32gui.GetWindowRect(self.Handle)\n x, y = int(left + x), int(top + y)\n\n pyautogui.mouseDown(x, y, button='left')\n time.sleep(0.1)\n pyautogui.mouseUp(x, y, button='left')" }, { "identifier": "get_move_type", "path": "douzero/env/move_detector.py", "snippet": "def get_move_type(move):\n move_size = len(move)\n move_dict = collections.Counter(move)\n\n if move_size == 0:\n return {'type': TYPE_0_PASS}\n\n if move_size == 1:\n return {'type': TYPE_1_SINGLE, 'rank': move[0]}\n\n if move_size == 2:\n if move[0] == move[1]:\n return {'type': TYPE_2_PAIR, 'rank': move[0]}\n elif move == [20, 30]: # Kings\n return {'type': TYPE_5_KING_BOMB}\n else:\n return {'type': TYPE_15_WRONG}\n\n if move_size == 3:\n if len(move_dict) == 1:\n return {'type': TYPE_3_TRIPLE, 'rank': move[0]}\n else:\n return {'type': TYPE_15_WRONG}\n\n if move_size == 4:\n if len(move_dict) == 1:\n return {'type': TYPE_4_BOMB, 'rank': move[0]}\n elif len(move_dict) == 2:\n if move[0] == move[1] == move[2] or move[1] == move[2] == move[3]:\n return {'type': TYPE_6_3_1, 'rank': move[1]}\n else:\n return {'type': TYPE_15_WRONG}\n else:\n return {'type': TYPE_15_WRONG}\n\n if is_continuous_seq(move):\n return {'type': TYPE_8_SERIAL_SINGLE, 'rank': move[0], 'len': len(move)}\n\n if move_size == 5:\n if len(move_dict) == 2:\n return {'type': TYPE_7_3_2, 'rank': move[2]}\n else:\n return {'type': TYPE_15_WRONG}\n\n count_dict = collections.defaultdict(int)\n for c, n in move_dict.items():\n count_dict[n] += 1\n\n if move_size == 6:\n if (len(move_dict) == 2 or len(move_dict) == 3) and count_dict.get(4) == 1 and \\\n (count_dict.get(2) == 1 or count_dict.get(1) == 2):\n return {'type': TYPE_13_4_2, 'rank': move[2]}\n\n if move_size == 8 and (((len(move_dict) == 3 or len(move_dict) == 2) and\n (count_dict.get(4) == 1 and count_dict.get(2) == 2)) or count_dict.get(4) == 2):\n return {'type': TYPE_14_4_22, 'rank': max([c for c, n in move_dict.items() if n == 4])}\n\n mdkeys = sorted(move_dict.keys())\n if len(move_dict) == count_dict.get(2) and is_continuous_seq(mdkeys):\n return {'type': TYPE_9_SERIAL_PAIR, 'rank': mdkeys[0], 'len': len(mdkeys)}\n\n if len(move_dict) == count_dict.get(3) and is_continuous_seq(mdkeys):\n return {'type': TYPE_10_SERIAL_TRIPLE, 'rank': mdkeys[0], 'len': len(mdkeys)}\n\n # Check Type 11 (serial 3+1) and Type 12 (serial 3+2)\n if count_dict.get(3, 0) >= MIN_TRIPLES:\n serial_3 = list()\n single = list()\n pair = list()\n\n for k, v in move_dict.items():\n if v == 3:\n serial_3.append(k)\n elif v == 1:\n single.append(k)\n elif v == 2:\n pair.append(k)\n else: # no other possibilities\n return {'type': TYPE_15_WRONG}\n\n serial_3.sort()\n if is_continuous_seq(serial_3):\n if len(serial_3) == len(single)+len(pair)*2:\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3)}\n if len(serial_3) == len(pair) and len(move_dict) == len(serial_3) * 2:\n return {'type': TYPE_12_SERIAL_3_2, 'rank': serial_3[0], 'len': len(serial_3)}\n\n if len(serial_3) == 4:\n if is_continuous_seq(serial_3[1:]):\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[1], 'len': len(serial_3) - 1}\n if is_continuous_seq(serial_3[:-1]):\n return {'type': TYPE_11_SERIAL_3_1, 'rank': serial_3[0], 'len': len(serial_3) - 1}\n\n return {'type': TYPE_15_WRONG}" }, { "identifier": "Ui_Form", "path": "MainWindow.py", "snippet": "class Ui_Form(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n Form.resize(677, 450)\n font = QtGui.QFont()\n font.setFamily(\"Arial\")\n font.setPointSize(9)\n font.setBold(True)\n font.setItalic(False)\n font.setWeight(75)\n Form.setFont(font)\n Form.setWindowOpacity(0.8)\n self.WinRate = QtWidgets.QLabel(Form)\n self.WinRate.setGeometry(QtCore.QRect(320, 120, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.WinRate.setFont(font)\n self.WinRate.setAlignment(QtCore.Qt.AlignCenter)\n self.WinRate.setObjectName(\"WinRate\")\n self.UserHandCards = QtWidgets.QLabel(Form)\n self.UserHandCards.setGeometry(QtCore.QRect(30, 330, 351, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.UserHandCards.setFont(font)\n self.UserHandCards.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)\n self.UserHandCards.setObjectName(\"UserHandCards\")\n self.ThreeLandlordCards = QtWidgets.QLabel(Form)\n self.ThreeLandlordCards.setGeometry(QtCore.QRect(30, 120, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.ThreeLandlordCards.setFont(font)\n self.ThreeLandlordCards.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)\n self.ThreeLandlordCards.setObjectName(\"ThreeLandlordCards\")\n self.BidWinrate = QtWidgets.QLabel(Form)\n self.BidWinrate.setGeometry(QtCore.QRect(30, 220, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.BidWinrate.setFont(font)\n self.BidWinrate.setObjectName(\"BidWinrate\")\n self.PreWinrate = QtWidgets.QLabel(Form)\n self.PreWinrate.setGeometry(QtCore.QRect(30, 280, 161, 31))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.PreWinrate.setFont(font)\n self.PreWinrate.setObjectName(\"PreWinrate\")\n self.label = QtWidgets.QLabel(Form)\n self.label.setGeometry(QtCore.QRect(490, 320, 101, 41))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.label.setFont(font)\n self.label.setAlignment(QtCore.Qt.AlignCenter)\n self.label.setObjectName(\"label\")\n self.LPlayedCard = QtWidgets.QLabel(Form)\n self.LPlayedCard.setGeometry(QtCore.QRect(170, 120, 102, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.LPlayedCard.setFont(font)\n self.LPlayedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.LPlayedCard.setObjectName(\"LPlayedCard\")\n self.splitter_2 = QtWidgets.QSplitter(Form)\n self.splitter_2.setGeometry(QtCore.QRect(20, 380, 621, 41))\n self.splitter_2.setOrientation(QtCore.Qt.Horizontal)\n self.splitter_2.setObjectName(\"splitter_2\")\n self.SingleButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.SingleButton.setFont(font)\n self.SingleButton.setObjectName(\"SingleButton\")\n self.LoopButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.LoopButton.setFont(font)\n self.LoopButton.setObjectName(\"LoopButton\")\n self.StopButton = QtWidgets.QPushButton(self.splitter_2)\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.StopButton.setFont(font)\n self.StopButton.setObjectName(\"StopButton\")\n self.tableWidget = QtWidgets.QTableWidget(Form)\n self.tableWidget.setGeometry(QtCore.QRect(20, 10, 611, 75))\n self.tableWidget.setMaximumSize(QtCore.QSize(16777215, 75))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(12)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.tableWidget.setFont(font)\n self.tableWidget.setLayoutDirection(QtCore.Qt.LeftToRight)\n self.tableWidget.setStyleSheet(\"QTableWidget{\\n\"\n\"color:#DCDCDC;\\n\"\n\"background:#444444;\\n\"\n\"border:1px solid #242424;\\n\"\n\"alternate-background-color:#525252;\\n\"\n\"gridline-color:#242424;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QTableWidget::item:selected{\\n\"\n\"color:#DCDCDC;\\n\"\n\"background:qlineargradient(spread:pad,x1:0,y1:0,x2:0,y2:1,stop:0 #484848,stop:1 #383838);\\n\"\n\"}\\n\"\n\" \\n\"\n\"QTableWidget::item:hover{\\n\"\n\"background:#5B5B5B;\\n\"\n\"}\\n\"\n\"QHeaderView::section{\\n\"\n\"text-align:center;\\n\"\n\"background:#5E5E5E;\\n\"\n\"padding:3px;\\n\"\n\"margin:0px;\\n\"\n\"color:#DCDCDC;\\n\"\n\"border:1px solid #242424;\\n\"\n\"border-left-width:0;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar:vertical{\\n\"\n\"background:#484848;\\n\"\n\"padding:0px;\\n\"\n\"border-radius:6px;\\n\"\n\"max-width:12px;\\n\"\n\"}\\n\"\n\" \\n\"\n\" \\n\"\n\"QScrollBar::handle:vertical{\\n\"\n\"background:#CCCCCC;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar::handle:hover:vertical,QScrollBar::handle:pressed:vertical{\\n\"\n\"background:#A7A7A7;\\n\"\n\"}\\n\"\n\"QScrollBar::sub-page:vertical{\\n\"\n\"background:444444;\\n\"\n\"}\\n\"\n\" \\n\"\n\" \\n\"\n\"QScrollBar::add-page:vertical{\\n\"\n\"background:5B5B5B;\\n\"\n\"}\\n\"\n\" \\n\"\n\"QScrollBar::add-line:vertical{\\n\"\n\"background:none;\\n\"\n\"}\\n\"\n\"QScrollBar::sub-line:vertical{\\n\"\n\"background:none;\\n\"\n\"}\")\n self.tableWidget.setFrameShadow(QtWidgets.QFrame.Sunken)\n self.tableWidget.setMidLineWidth(-1)\n self.tableWidget.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.tableWidget.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)\n self.tableWidget.setAutoScroll(False)\n self.tableWidget.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n self.tableWidget.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)\n self.tableWidget.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.tableWidget.setTextElideMode(QtCore.Qt.ElideNone)\n self.tableWidget.setObjectName(\"tableWidget\")\n self.tableWidget.setColumnCount(15)\n self.tableWidget.setRowCount(1)\n item = QtWidgets.QTableWidgetItem()\n self.tableWidget.setVerticalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(0, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(1, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(2, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(3, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(4, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(5, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(6, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(7, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(8, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(9, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(10, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(11, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(12, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(13, item)\n item = QtWidgets.QTableWidgetItem()\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(True)\n font.setWeight(75)\n item.setFont(font)\n self.tableWidget.setHorizontalHeaderItem(14, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 0, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 1, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 2, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 3, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 4, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 5, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 6, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 7, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 8, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 9, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 10, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 11, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 12, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 13, item)\n item = QtWidgets.QTableWidgetItem()\n item.setTextAlignment(QtCore.Qt.AlignCenter)\n self.tableWidget.setItem(0, 14, item)\n self.tableWidget.horizontalHeader().setVisible(True)\n self.tableWidget.horizontalHeader().setCascadingSectionResizes(True)\n self.tableWidget.horizontalHeader().setDefaultSectionSize(41)\n self.tableWidget.horizontalHeader().setStretchLastSection(True)\n self.tableWidget.verticalHeader().setVisible(False)\n self.tableWidget.verticalHeader().setCascadingSectionResizes(False)\n self.tableWidget.verticalHeader().setDefaultSectionSize(40)\n self.tableWidget.verticalHeader().setHighlightSections(True)\n self.tableWidget.verticalHeader().setMinimumSectionSize(40)\n self.tableWidget.verticalHeader().setSortIndicatorShown(False)\n self.RPlayedCard = QtWidgets.QLabel(Form)\n self.RPlayedCard.setGeometry(QtCore.QRect(490, 120, 102, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.RPlayedCard.setFont(font)\n self.RPlayedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.RPlayedCard.setObjectName(\"RPlayedCard\")\n self.PredictedCard = QtWidgets.QLabel(Form)\n self.PredictedCard.setGeometry(QtCore.QRect(320, 190, 121, 51))\n font = QtGui.QFont()\n font.setFamily(\"微软雅黑\")\n font.setPointSize(10)\n font.setBold(False)\n font.setItalic(False)\n font.setWeight(50)\n self.PredictedCard.setFont(font)\n self.PredictedCard.setStyleSheet(\"\")\n self.PredictedCard.setFrameShape(QtWidgets.QFrame.Panel)\n self.PredictedCard.setLineWidth(1)\n self.PredictedCard.setAlignment(QtCore.Qt.AlignCenter)\n self.PredictedCard.setObjectName(\"PredictedCard\")\n\n self.retranslateUi(Form)\n QtCore.QMetaObject.connectSlotsByName(Form)\n\n def retranslateUi(self, Form):\n _translate = QtCore.QCoreApplication.translate\n Form.setWindowTitle(_translate(\"Form\", \"Hi\"))\n self.WinRate.setText(_translate(\"Form\", \"评分\"))\n self.UserHandCards.setText(_translate(\"Form\", \"手牌\"))\n self.ThreeLandlordCards.setText(_translate(\"Form\", \"地主牌\"))\n self.BidWinrate.setText(_translate(\"Form\", \"叫牌胜率:\"))\n self.PreWinrate.setText(_translate(\"Form\", \"局前胜率:\"))\n self.label.setText(_translate(\"Form\", \"游戏状态\"))\n self.LPlayedCard.setText(_translate(\"Form\", \"上家出牌区域\"))\n self.SingleButton.setText(_translate(\"Form\", \"单局\"))\n self.LoopButton.setText(_translate(\"Form\", \" 连续\"))\n self.StopButton.setText(_translate(\"Form\", \"停止\"))\n item = self.tableWidget.horizontalHeaderItem(0)\n item.setText(_translate(\"Form\", \"大\"))\n item = self.tableWidget.horizontalHeaderItem(1)\n item.setText(_translate(\"Form\", \"小\"))\n item = self.tableWidget.horizontalHeaderItem(2)\n item.setText(_translate(\"Form\", \"2\"))\n item = self.tableWidget.horizontalHeaderItem(3)\n item.setText(_translate(\"Form\", \"A\"))\n item = self.tableWidget.horizontalHeaderItem(4)\n item.setText(_translate(\"Form\", \"K\"))\n item = self.tableWidget.horizontalHeaderItem(5)\n item.setText(_translate(\"Form\", \"Q\"))\n item = self.tableWidget.horizontalHeaderItem(6)\n item.setText(_translate(\"Form\", \"J\"))\n item = self.tableWidget.horizontalHeaderItem(7)\n item.setText(_translate(\"Form\", \"10\"))\n item = self.tableWidget.horizontalHeaderItem(8)\n item.setText(_translate(\"Form\", \"9\"))\n item = self.tableWidget.horizontalHeaderItem(9)\n item.setText(_translate(\"Form\", \"8\"))\n item = self.tableWidget.horizontalHeaderItem(10)\n item.setText(_translate(\"Form\", \"7\"))\n item = self.tableWidget.horizontalHeaderItem(11)\n item.setText(_translate(\"Form\", \"6\"))\n item = self.tableWidget.horizontalHeaderItem(12)\n item.setText(_translate(\"Form\", \"5\"))\n item = self.tableWidget.horizontalHeaderItem(13)\n item.setText(_translate(\"Form\", \"4\"))\n item = self.tableWidget.horizontalHeaderItem(14)\n item.setText(_translate(\"Form\", \"3\"))\n __sortingEnabled = self.tableWidget.isSortingEnabled()\n self.tableWidget.setSortingEnabled(False)\n item = self.tableWidget.item(0, 0)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 1)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 2)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 3)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 4)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 5)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 6)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 7)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 8)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 9)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 10)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 11)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 12)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 13)\n item.setText(_translate(\"Form\", \"0\"))\n item = self.tableWidget.item(0, 14)\n item.setText(_translate(\"Form\", \"0\"))\n self.tableWidget.setSortingEnabled(__sortingEnabled)\n self.RPlayedCard.setText(_translate(\"Form\", \"下家出牌区域\"))\n self.PredictedCard.setText(_translate(\"Form\", \"AI出牌区域\"))" }, { "identifier": "GameEnv", "path": "douzero/env/game.py", "snippet": "class GameEnv(object):\n\n def __init__(self, players):\n\n self.card_play_action_seq = []\n\n self.three_landlord_cards = None\n self.game_over = False\n\n self.acting_player_position = None\n self.player_utility_dict = None\n\n self.players = players\n\n self.last_move_dict = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.played_cards = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.last_move = []\n self.last_two_moves = []\n\n self.num_wins = {'landlord': 0,\n 'farmer': 0}\n\n self.num_scores = {'landlord': 0,\n 'farmer': 0}\n\n self.info_sets = {'landlord': InfoSet('landlord'),\n 'landlord_up': InfoSet('landlord_up'),\n 'landlord_down': InfoSet('landlord_down')}\n\n self.bomb_num = 0\n self.last_pid = 'landlord'\n\n self.bid_info = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n self.bid_count = 0\n self.multiply_count = {'landlord': 1,\n 'landlord_up': 1,\n 'landlord_down': 1}\n self.step_count = 0\n\n\n def card_play_init(self, card_play_data):\n self.info_sets['landlord'].player_hand_cards = \\\n card_play_data['landlord']\n self.info_sets['landlord_up'].player_hand_cards = \\\n card_play_data['landlord_up']\n self.info_sets['landlord_down'].player_hand_cards = \\\n card_play_data['landlord_down']\n self.three_landlord_cards = card_play_data['three_landlord_cards']\n self.get_acting_player_position()\n self.game_infoset = self.get_infoset()\n\n\n def game_done(self):\n if len(self.info_sets['landlord'].player_hand_cards) == 0 or \\\n len(self.info_sets['landlord_up'].player_hand_cards) == 0 or \\\n len(self.info_sets['landlord_down'].player_hand_cards) == 0:\n # if one of the three players discards his hand,\n # then game is over.\n self.compute_player_utility()\n self.update_num_wins_scores()\n\n self.game_over = True\n\n def compute_player_utility(self):\n\n if len(self.info_sets['landlord'].player_hand_cards) == 0:\n self.player_utility_dict = {'landlord': 2,\n 'farmer': -1}\n else:\n self.player_utility_dict = {'landlord': -2,\n 'farmer': 1}\n\n def update_num_wins_scores(self):\n for pos, utility in self.player_utility_dict.items():\n base_score = 2 if pos == 'landlord' else 1\n if utility > 0:\n self.num_wins[pos] += 1\n self.winner = pos\n self.num_scores[pos] += base_score * (2 ** self.bomb_num)\n else:\n self.num_scores[pos] -= base_score * (2 ** self.bomb_num)\n\n def get_winner(self):\n return self.winner\n\n def get_bomb_num(self):\n return self.bomb_num\n\n def step(self, position, action=[]):\n win_rate = 0\n if self.acting_player_position == position:\n action, actions_confidence = self.players[1].act(self.game_infoset)\n # 计算胜率\n win_rate = actions_confidence\n # win_rate = max(actions_confidence, -1)\n # win_rate = min(win_rate, 1)\n # win_rate = str(round(float((win_rate + 1) / 2), 4))\n\n if len(action) > 0:\n self.last_pid = self.acting_player_position\n\n if action in bombs:\n self.bomb_num += 1\n\n self.last_move_dict[\n self.acting_player_position] = action.copy()\n\n self.card_play_action_seq.append((position, action))\n self.update_acting_player_hand_cards(action)\n\n self.played_cards[self.acting_player_position] += action\n\n if self.acting_player_position == 'landlord' and \\\n len(action) > 0 and \\\n len(self.three_landlord_cards) > 0:\n for card in action:\n if len(self.three_landlord_cards) > 0:\n if card in self.three_landlord_cards:\n self.three_landlord_cards.remove(card)\n else:\n break\n self.game_done()\n if not self.game_over:\n self.get_acting_player_position()\n self.game_infoset = self.get_infoset()\n # 返回动作和胜率,只有玩家角色会接受返回值\n action_message = {\"action\": str(''.join([EnvCard2RealCard[c] for c in action])),\n \"win_rate\": str(round(float(win_rate), 4))}\n return action_message\n\n def get_last_move(self):\n last_move = []\n if len(self.card_play_action_seq) != 0:\n if len(self.card_play_action_seq[-1][1]) == 0:\n last_move = self.card_play_action_seq[-2][1]\n else:\n last_move = self.card_play_action_seq[-1][1]\n\n return last_move\n\n def get_last_two_moves(self):\n last_two_moves = [[], []]\n for card in self.card_play_action_seq[-2:]:\n last_two_moves.insert(0, card[1])\n last_two_moves = last_two_moves[:2]\n return last_two_moves\n\n def get_acting_player_position(self):\n if self.acting_player_position is None:\n self.acting_player_position = 'landlord'\n\n else:\n if self.acting_player_position == 'landlord':\n self.acting_player_position = 'landlord_down'\n\n elif self.acting_player_position == 'landlord_down':\n self.acting_player_position = 'landlord_up'\n\n else:\n self.acting_player_position = 'landlord'\n\n return self.acting_player_position\n\n def update_acting_player_hand_cards(self, action):\n if action != []:\n # 更新玩家手牌,删除对应的牌\n if self.acting_player_position == self.players[0]:\n for card in action:\n self.info_sets[self.acting_player_position].player_hand_cards.remove(card)\n # 更新另外两个玩家手牌,删除相同数量的牌\n else:\n del self.info_sets[self.acting_player_position].player_hand_cards[0:len(action)]\n self.info_sets[self.acting_player_position].player_hand_cards.sort()\n\n def get_legal_card_play_actions(self):\n mg = MovesGener(\n self.info_sets[self.acting_player_position].player_hand_cards)\n\n action_sequence = self.card_play_action_seq\n\n rival_move = []\n if len(action_sequence) != 0:\n if len(action_sequence[-1][1]) == 0:\n rival_move = action_sequence[-2][1]\n else:\n rival_move = action_sequence[-1][1]\n\n rival_type = md.get_move_type(rival_move)\n rival_move_type = rival_type['type']\n rival_move_len = rival_type.get('len', 1)\n moves = list()\n\n if rival_move_type == md.TYPE_0_PASS:\n moves = mg.gen_moves()\n\n elif rival_move_type == md.TYPE_1_SINGLE:\n all_moves = mg.gen_type_1_single()\n moves = ms.filter_type_1_single(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_2_PAIR:\n all_moves = mg.gen_type_2_pair()\n moves = ms.filter_type_2_pair(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_3_TRIPLE:\n all_moves = mg.gen_type_3_triple()\n moves = ms.filter_type_3_triple(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_4_BOMB:\n all_moves = mg.gen_type_4_bomb() + mg.gen_type_5_king_bomb()\n moves = ms.filter_type_4_bomb(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_5_KING_BOMB:\n moves = []\n\n elif rival_move_type == md.TYPE_6_3_1:\n all_moves = mg.gen_type_6_3_1()\n moves = ms.filter_type_6_3_1(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_7_3_2:\n all_moves = mg.gen_type_7_3_2()\n moves = ms.filter_type_7_3_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_8_SERIAL_SINGLE:\n all_moves = mg.gen_type_8_serial_single(repeat_num=rival_move_len)\n moves = ms.filter_type_8_serial_single(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_9_SERIAL_PAIR:\n all_moves = mg.gen_type_9_serial_pair(repeat_num=rival_move_len)\n moves = ms.filter_type_9_serial_pair(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_10_SERIAL_TRIPLE:\n all_moves = mg.gen_type_10_serial_triple(repeat_num=rival_move_len)\n moves = ms.filter_type_10_serial_triple(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_11_SERIAL_3_1:\n all_moves = mg.gen_type_11_serial_3_1(repeat_num=rival_move_len)\n moves = ms.filter_type_11_serial_3_1(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_12_SERIAL_3_2:\n all_moves = mg.gen_type_12_serial_3_2(repeat_num=rival_move_len)\n moves = ms.filter_type_12_serial_3_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_13_4_2:\n all_moves = mg.gen_type_13_4_2()\n moves = ms.filter_type_13_4_2(all_moves, rival_move)\n\n elif rival_move_type == md.TYPE_14_4_22:\n all_moves = mg.gen_type_14_4_22()\n moves = ms.filter_type_14_4_22(all_moves, rival_move)\n\n if rival_move_type not in [md.TYPE_0_PASS,\n md.TYPE_4_BOMB, md.TYPE_5_KING_BOMB]:\n moves = moves + mg.gen_type_4_bomb() + mg.gen_type_5_king_bomb()\n\n if len(rival_move) != 0: # rival_move is not 'pass'\n moves = moves + [[]]\n\n for m in moves:\n m.sort()\n\n return moves\n\n def reset(self):\n self.card_play_action_seq = []\n\n self.three_landlord_cards = None\n self.game_over = False\n\n self.acting_player_position = None\n self.player_utility_dict = None\n\n self.last_move_dict = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.played_cards = {'landlord': [],\n 'landlord_up': [],\n 'landlord_down': []}\n\n self.last_move = []\n self.last_two_moves = []\n\n self.info_sets = {'landlord': InfoSet('landlord'),\n 'landlord_up': InfoSet('landlord_up'),\n 'landlord_down': InfoSet('landlord_down')}\n\n self.bomb_num = 0\n self.last_pid = 'landlord'\n self.bid_info = [[1, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1]]\n self.bid_count = 0\n self.multiply_count = {'landlord': 0,\n 'landlord_up': 0,\n 'landlord_down': 0}\n self.step_count = 0\n\n def get_infoset(self):\n self.info_sets[\n self.acting_player_position].last_pid = self.last_pid\n\n self.info_sets[\n self.acting_player_position].legal_actions = \\\n self.get_legal_card_play_actions()\n\n self.info_sets[\n self.acting_player_position].bomb_num = self.bomb_num\n\n self.info_sets[\n self.acting_player_position].last_move = self.get_last_move()\n\n self.info_sets[\n self.acting_player_position].last_two_moves = self.get_last_two_moves()\n\n self.info_sets[\n self.acting_player_position].last_move_dict = self.last_move_dict\n\n self.info_sets[self.acting_player_position].num_cards_left_dict = \\\n {pos: len(self.info_sets[pos].player_hand_cards)\n for pos in ['landlord', 'landlord_up', 'landlord_down']}\n\n self.info_sets[self.acting_player_position].other_hand_cards = []\n\n '''\n 调整计算其他人手牌的方法,整副牌减去玩家手牌与出过的牌\n for pos in ['landlord', 'landlord_up', 'landlord_down']:\n if pos != self.acting_player_position:\n self.info_sets[\n self.acting_player_position].other_hand_cards += \\\n self.info_sets[pos].player_hand_cards\n '''\n # 把出过的牌中三个子列表合成一个列表\n played_cards_tmp = []\n for i in list(self.played_cards.values()):\n played_cards_tmp.extend(i)\n # 出过的牌和玩家手上的牌\n played_and_hand_cards = played_cards_tmp + self.info_sets[self.acting_player_position].player_hand_cards\n # 整副牌减去出过的牌和玩家手上的牌,就是其他人的手牌\n for i in set(AllEnvCard):\n self.info_sets[\n self.acting_player_position].other_hand_cards.extend([i] * (AllEnvCard.count(i) - played_and_hand_cards.count(i)))\n\n self.info_sets[self.acting_player_position].played_cards = \\\n self.played_cards\n self.info_sets[self.acting_player_position].three_landlord_cards = \\\n self.three_landlord_cards\n self.info_sets[self.acting_player_position].card_play_action_seq = \\\n self.card_play_action_seq\n\n self.info_sets[\n self.acting_player_position].all_handcards = \\\n {pos: self.info_sets[pos].player_hand_cards\n for pos in ['landlord', 'landlord_up', 'landlord_down']}\n\n # Custom bid info\n self.info_sets[self.acting_player_position].bid_info = bid_infos[self.acting_player_position]\n\n return deepcopy(self.info_sets[self.acting_player_position])" }, { "identifier": "DeepAgent", "path": "douzero/evaluation/deep_agent.py", "snippet": "class DeepAgent:\n\n def __init__(self, position, model_path):\n self.model_type = \"old\"\n if \"general\" in model_path:\n self.model_type = \"general\"\n elif \"resnet\" in model_path:\n self.model_type = \"resnet\"\n self.model = _load_model(position, model_path, self.model_type)\n\n def act(self, infoset):\n obs = get_obs(infoset, model_type=self.model_type)\n z_batch = torch.from_numpy(obs['z_batch']).float()\n x_batch = torch.from_numpy(obs['x_batch']).float()\n if torch.cuda.is_available():\n z_batch, x_batch = z_batch.cuda(), x_batch.cuda()\n y_pred = self.model.forward(z_batch, x_batch, return_value=True)['values']\n y_pred = y_pred.detach().cpu().numpy()\n\n best_action_index = np.argmax(y_pred, axis=0)[0]\n best_action = infoset.legal_actions[best_action_index]\n best_action_confidence = y_pred[best_action_index]\n return best_action, best_action_confidence" } ]
import GameHelper as gh import os import sys import time import threading import pyautogui import win32gui import multiprocessing as mp import DetermineColor as DC import cv2 import numpy as np import traceback import BidModel import LandlordModel import FarmerModel from GameHelper import GameHelper from PIL import Image from skimage.metrics import structural_similarity as ssim from collections import defaultdict from douzero.env.move_detector import get_move_type from PyQt5 import QtGui, QtWidgets, QtCore from PyQt5.QtWidgets import QTableWidgetItem, QInputDialog, QMessageBox from PyQt5.QtGui import QPixmap, QIcon from PyQt5.QtCore import QTime, QEventLoop, Qt from MainWindow import Ui_Form from douzero.env.game import GameEnv from douzero.evaluation.deep_agent import DeepAgent
15,212
except AttributeError as e: traceback.print_exc() def init_display(self): self.WinRate.setText("评分") self.label.setText("游戏状态") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') self.UserHandCards.setText("手牌") # self.LBrowser.clear() # self.RBrowser.clear() self.LPlayedCard.setText("上家出牌区域") self.RPlayedCard.setText("下家出牌区域") self.PredictedCard.setText("AI出牌区域") self.ThreeLandlordCards.setText("地主牌") self.recorder2zero() for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') def init_cards(self): self.RunGame = True GameHelper.Interrupt = False self.user_hand_cards_real = "" self.user_hand_cards_env = [] # 其他玩家出牌 self.other_played_cards_real = "" self.other_played_cards_env = [] # 其他玩家手牌(整副牌减去玩家手牌,后续再减掉历史出牌) self.other_hand_cards = [] # 三张底牌 self.three_landlord_cards_real = "" self.three_landlord_cards_env = [] # 玩家角色代码:0-地主上家, 1-地主, 2-地主下家 self.user_position_code = None self.user_position = "" # 开局时三个玩家的手牌 self.card_play_data_list = {} # 识别玩家手牌 self.user_hand_cards_real = self.find_my_cards() while len(self.user_hand_cards_real) != 17 and len(self.user_hand_cards_real) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) self.user_hand_cards_real = self.find_my_cards() self.user_hand_cards_env = [RealCard2EnvCard[c] for c in list(self.user_hand_cards_real)] # 识别三张底牌 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] while len(self.three_landlord_cards_env) != 3: self.detect_start_btn() if not self.RunGame: break if len(self.three_landlord_cards_env) > 3: self.ThreeLandlordCardsConfidence += 0.05 elif len(self.three_landlord_cards_env) < 3: self.ThreeLandlordCardsConfidence -= 0.05 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] # 识别玩家的角色 self.sleep(500) self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) while self.user_position_code is None: self.detect_start_btn() if not self.RunGame: break self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) print("正在出牌人的代码: ", self.user_position_code) if self.user_position_code is None: items = ("地主上家", "地主", "地主下家") item, okPressed = QInputDialog.getItem(self, "选择角色", "未识别到地主,请手动选择角色:", items, 0, False) if okPressed and item: self.user_position_code = items.index(item) else: return self.user_position = ['landlord_up', 'landlord', 'landlord_down'][self.user_position_code] print("我现在在地主的方向:", self.user_position) for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.Players[self.user_position_code].setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') # 整副牌减去玩家手上的牌,就是其他人的手牌,再分配给另外两个角色(如何分配对AI判断没有影响) for i in set(AllEnvCard): self.other_hand_cards.extend([i] * (AllEnvCard.count(i) - self.user_hand_cards_env.count(i))) self.other_hands_cards_str = str(''.join([EnvCard2RealCard[c] for c in self.other_hand_cards]))[::-1] self.cards_recorder(self.other_hands_cards_str) self.card_play_data_list.update({ 'three_landlord_cards': self.three_landlord_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 0) % 3]: self.user_hand_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 1) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 != 1 else self.other_hand_cards[17:], ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 2) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 == 1 else self.other_hand_cards[17:] }) print("开始对局") print("手牌:", self.user_hand_cards_real) print("地主牌:", self.three_landlord_cards_real) # 生成手牌结束,校验手牌数量 if len(self.card_play_data_list["three_landlord_cards"]) != 3: QMessageBox.critical(self, "底牌识别出错", "底牌必须是3张!", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return if len(self.card_play_data_list["landlord_up"]) != 17 or \ len(self.card_play_data_list["landlord_down"]) != 17 or \ len(self.card_play_data_list["landlord"]) != 20: QMessageBox.critical(self, "手牌识别出错", "初始手牌数目有误", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return # 出牌顺序:0-玩家出牌, 1-玩家下家出牌, 2-玩家上家出牌 self.play_order = 0 if self.user_position == "landlord" else 1 if self.user_position == "landlord_up" else 2 # 创建一个代表玩家的AI ai_players = [0, 0] ai_players[0] = self.user_position
# -*- coding: utf-8 -*- # Created by: Raf # Modify by: Vincentzyx EnvCard2RealCard = {3: '3', 4: '4', 5: '5', 6: '6', 7: '7', 8: '8', 9: '9', 10: 'T', 11: 'J', 12: 'Q', 13: 'K', 14: 'A', 17: '2', 20: 'X', 30: 'D'} RealCard2EnvCard = {'3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, 'T': 10, 'J': 11, 'Q': 12, 'K': 13, 'A': 14, '2': 17, 'X': 20, 'D': 30} AllEnvCard = [3, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 8, 9, 9, 9, 9, 10, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 12, 13, 13, 13, 13, 14, 14, 14, 14, 17, 17, 17, 17, 20, 30] AllCards = ['D', 'X', '2', 'A', 'K', 'Q', 'J', 'T', '9', '8', '7', '6', '5', '4', '3'] helper = GameHelper() class MyPyQT_Form(QtWidgets.QWidget, Ui_Form): def __init__(self): super(MyPyQT_Form, self).__init__() self.other_hands_cards_str = None self.stop_sign = None self.loop_sign = None self.env = None self.three_landlord_cards_env = None self.three_landlord_cards_real = None self.user_hand_cards_env = None self.user_hand_cards_real = None self.play_order = None self.card_play_data_list = None self.other_hand_cards = None self.other_played_cards_env = None self.other_played_cards_real = None self.user_position = None self.user_position_code = None self.setupUi(self) self.setWindowFlags(QtCore.Qt.WindowMinimizeButtonHint | # 使能最小化按钮 QtCore.Qt.WindowStaysOnTopHint | # 窗体总在最前端 QtCore.Qt.WindowCloseButtonHint) self.setWindowIcon(QIcon(':/pics/favicon.ico')) self.setWindowTitle("DouZero欢乐斗地主v2.0") self.setFixedSize(self.width(), self.height()) # 固定窗体大小 self.move(50, 50) # self.setWindowIcon(QIcon('pics/favicon.ico')) window_pale = QtGui.QPalette() # window_pale.setBrush(self.backgroundRole(), QtGui.QBrush(QtGui.QPixmap("pics/bg.png"))) self.setPalette(window_pale) self.SingleButton.clicked.connect(self.game_single) self.LoopButton.clicked.connect(self.game_loop) self.StopButton.clicked.connect(self.stop) # self.Players = [self.RPlayer, self.Player, self.LPlayer] self.Players = [self.RPlayedCard, self.PredictedCard, self.LPlayedCard] self.counter = QTime() # 参数 self.MyConfidence = 0.8 # 我的牌的置信度 self.OtherConfidence = 0.8 # 别人的牌的置信度 self.WhiteConfidence = 0.85 # 检测白块的置信度 self.LandlordFlagConfidence = 0.8 # 检测地主标志的置信度 self.ThreeLandlordCardsConfidence = 0.8 # 检测地主底牌的置信度 self.PassConfidence = 0.7 self.PassConfidence = 0.8 self.WaitTime = 1 # 等待状态稳定延时 self.MyFilter = 40 # 我的牌检测结果过滤参数 self.OtherFilter = 25 # 别人的牌检测结果过滤参数 self.SleepTime = 0.1 # 循环中睡眠时间 self.RunGame = False self.AutoPlay = False self.BidThreshold1 = 65 # 叫地主阈值 self.BidThreshold2 = 72 # 抢地主阈值 self.JiabeiThreshold = ( (85, 72), # 叫地主 超级加倍 加倍 阈值 (85, 75) # 叫地主 超级加倍 加倍 阈值 (在地主是抢来的情况下) ) self.MingpaiThreshold = 92 # 坐标 self.MyHandCardsPos = (180, 560, 1050, 90) # 我的截图区域 self.LPlayedCardsPos = (320, 280, 500, 120) # 左边出牌截图区域 self.RPlayedCardsPos = (600, 280, 500, 120) # 右边出牌截图区域 self.LandlordCardsPos = (600, 33, 220, 103) # 地主底牌截图区域 self.LPassPos = (360, 360, 120, 80) # 左边不出截图区域 self.RPassPos = (940, 360, 120, 80) # 右边不出截图区域 self.PassBtnPos = (200, 450, 1000, 120) # 要不起截图区域 self.GeneralBtnPos = (200, 450, 1000, 120) # 叫地主、抢地主、加倍按钮截图区域 self.LandlordFlagPos = [(1247, 245, 48, 52), (12, 661, 51, 53), (123, 243, 52, 54)] # 地主标志截图区域(右-我-左) self.card_play_model_path_dict = { 'landlord': "baselines/resnet/resnet_landlord.ckpt", 'landlord_up': "baselines/resnet/resnet_landlord_up.ckpt", 'landlord_down': "baselines/resnet/resnet_landlord_down.ckpt" } def game_single(self): self.loop_sign = 0 self.stop_sign = 0 self.detect_start_btn() self.before_start() self.init_cards() def game_loop(self): self.loop_sign = 1 self.stop_sign = 0 while True: if self.stop_sign == 1: break self.detect_start_btn() self.before_start() self.init_cards() self.sleep(5000) def stop(self): self.stop_sign = 1 print("按下停止键") try: self.RunGame = False self.loop_sign = 0 self.env.game_over = True self.env.reset() self.init_display() self.PreWinrate.setText("局前胜率: ") self.BidWinrate.setText("叫牌胜率: ") except AttributeError as e: traceback.print_exc() def init_display(self): self.WinRate.setText("评分") self.label.setText("游戏状态") self.label.setStyleSheet('background-color: rgba(255, 0, 0, 0);') self.UserHandCards.setText("手牌") # self.LBrowser.clear() # self.RBrowser.clear() self.LPlayedCard.setText("上家出牌区域") self.RPlayedCard.setText("下家出牌区域") self.PredictedCard.setText("AI出牌区域") self.ThreeLandlordCards.setText("地主牌") self.recorder2zero() for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') def init_cards(self): self.RunGame = True GameHelper.Interrupt = False self.user_hand_cards_real = "" self.user_hand_cards_env = [] # 其他玩家出牌 self.other_played_cards_real = "" self.other_played_cards_env = [] # 其他玩家手牌(整副牌减去玩家手牌,后续再减掉历史出牌) self.other_hand_cards = [] # 三张底牌 self.three_landlord_cards_real = "" self.three_landlord_cards_env = [] # 玩家角色代码:0-地主上家, 1-地主, 2-地主下家 self.user_position_code = None self.user_position = "" # 开局时三个玩家的手牌 self.card_play_data_list = {} # 识别玩家手牌 self.user_hand_cards_real = self.find_my_cards() while len(self.user_hand_cards_real) != 17 and len(self.user_hand_cards_real) != 20: self.detect_start_btn() if not self.RunGame: break self.sleep(200) self.user_hand_cards_real = self.find_my_cards() self.user_hand_cards_env = [RealCard2EnvCard[c] for c in list(self.user_hand_cards_real)] # 识别三张底牌 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] while len(self.three_landlord_cards_env) != 3: self.detect_start_btn() if not self.RunGame: break if len(self.three_landlord_cards_env) > 3: self.ThreeLandlordCardsConfidence += 0.05 elif len(self.three_landlord_cards_env) < 3: self.ThreeLandlordCardsConfidence -= 0.05 self.three_landlord_cards_real = self.find_landlord_cards() self.ThreeLandlordCards.setText("底牌:" + self.three_landlord_cards_real) self.three_landlord_cards_env = [RealCard2EnvCard[c] for c in list(self.three_landlord_cards_real)] # 识别玩家的角色 self.sleep(500) self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) while self.user_position_code is None: self.detect_start_btn() if not self.RunGame: break self.user_position_code = self.find_landlord(self.LandlordFlagPos) self.sleep(200) print("正在出牌人的代码: ", self.user_position_code) if self.user_position_code is None: items = ("地主上家", "地主", "地主下家") item, okPressed = QInputDialog.getItem(self, "选择角色", "未识别到地主,请手动选择角色:", items, 0, False) if okPressed and item: self.user_position_code = items.index(item) else: return self.user_position = ['landlord_up', 'landlord', 'landlord_down'][self.user_position_code] print("我现在在地主的方向:", self.user_position) for player in self.Players: player.setStyleSheet('background-color: rgba(0, 255, 0, 0);') self.Players[self.user_position_code].setStyleSheet('background-color: rgba(0, 255, 0, 0.5);') # 整副牌减去玩家手上的牌,就是其他人的手牌,再分配给另外两个角色(如何分配对AI判断没有影响) for i in set(AllEnvCard): self.other_hand_cards.extend([i] * (AllEnvCard.count(i) - self.user_hand_cards_env.count(i))) self.other_hands_cards_str = str(''.join([EnvCard2RealCard[c] for c in self.other_hand_cards]))[::-1] self.cards_recorder(self.other_hands_cards_str) self.card_play_data_list.update({ 'three_landlord_cards': self.three_landlord_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 0) % 3]: self.user_hand_cards_env, ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 1) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 != 1 else self.other_hand_cards[17:], ['landlord_up', 'landlord', 'landlord_down'][(self.user_position_code + 2) % 3]: self.other_hand_cards[0:17] if (self.user_position_code + 1) % 3 == 1 else self.other_hand_cards[17:] }) print("开始对局") print("手牌:", self.user_hand_cards_real) print("地主牌:", self.three_landlord_cards_real) # 生成手牌结束,校验手牌数量 if len(self.card_play_data_list["three_landlord_cards"]) != 3: QMessageBox.critical(self, "底牌识别出错", "底牌必须是3张!", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return if len(self.card_play_data_list["landlord_up"]) != 17 or \ len(self.card_play_data_list["landlord_down"]) != 17 or \ len(self.card_play_data_list["landlord"]) != 20: QMessageBox.critical(self, "手牌识别出错", "初始手牌数目有误", QMessageBox.Yes, QMessageBox.Yes) self.init_display() return # 出牌顺序:0-玩家出牌, 1-玩家下家出牌, 2-玩家上家出牌 self.play_order = 0 if self.user_position == "landlord" else 1 if self.user_position == "landlord_up" else 2 # 创建一个代表玩家的AI ai_players = [0, 0] ai_players[0] = self.user_position
ai_players[1] = DeepAgent(self.user_position, self.card_play_model_path_dict[self.user_position])
4
2023-12-01 04:04:30+00:00
24k
super1207/satoricq
satori.py
[ { "identifier": "AdapterKook", "path": "kook_adapter.py", "snippet": "class AdapterKook:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._access_token = config[\"access_token\"]\n self._http_url = \"https://www.kookapp.cn/api/v3\"\n self._is_stop = False\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n self._queue = Queue(maxsize=100)\n self._id = 0\n self._sn = 0\n self._self_id = None\n\n\n async def enable(self) -> None:\n '''适配器启用的时候会调用,可以不理,也可以没这个函数\n 配合下面的停用函数,适配器可以得到自己在整个系统中的状态,进而进行一些优化\n 如,如果适配器处于停用状态,适配器可以自行选择关闭网络连接,以节省资源,当然,也可以不理会\n '''\n pass\n\n async def disable(self) -> None:\n '''适配器停用的时候会调用,可以不理,也可以没这个函数'''\n pass\n \n async def release(self) -> None:\n '''适配器释放的时候会调用一次,应该在这里停用ws连接\n 一般认为,适配器会和真正的协议端建立连接,所以,这个函数大多数时候是需要写的\n 但是,这个函数允许资源延迟释放,只要能释放就行\n 你可以在这个函数里面进行数据保存之类的,这种用途下,请阻塞这个函数,直到保存完成\n '''\n self._is_stop = True\n\n async def get_msg(self) -> dict:\n '''阻塞并等待消息返回,如果你的适配器不具备接收消息的能力,请不要写这个函数'''\n return await self._queue.get()\n \n\n async def _ws_recv(self,websocket):\n try:\n reply = await asyncio.wait_for(websocket.recv(),0.1)\n return reply\n except asyncio.TimeoutError:\n return None\n\n async def _ws_connect(self):\n self._login_status = SatoriLogin.LoginStatus.CONNECT\n ws_url = (await self._api_call(\"/gateway/index?compress=0\"))[\"url\"]\n async with connect(ws_url) as websocket:\n tm = time.time()\n while not self._is_stop:\n reply = await self._ws_recv(websocket)\n if not reply:\n now_time = time.time()\n if now_time - tm > 30:\n tm = now_time\n await websocket.send(json.dumps({\"s\": 2,\"sn\": self._sn}))\n continue\n js = json.loads(reply)\n s = js[\"s\"]\n if s == 5:raise Exception(\"recv reset ws\")\n elif s == 3:pass # heartbeat\n elif s == 1:\n self._login_status = SatoriLogin.LoginStatus.ONLINE\n print(\"kook:ws连接成功\")\n elif s == 0:\n self._sn = js[\"sn\"]\n asyncio.create_task(self._event_deal(js[\"d\"]))\n\n async def _ws_server(self) -> None:\n while not self._is_stop:\n try:\n await self._ws_connect()\n except:\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n print(traceback.format_exc())\n await asyncio.sleep(3)\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n\n async def init_after(self) -> None:\n '''适配器创建之后会调用一次,应该在这里进行ws连接等操作,如果不需要,可以不写'''\n asyncio.create_task(self._ws_server())\n\n def _kook_msg_to_satori(self,msg_type:int,message:str)->str:\n ret = \"\"\n if msg_type == 2: #图片\n ret += \"<img src={}/>\".format(json.dumps(message))\n else:\n def kook_msg_f(msg):\n ret = \"\"\n is_f = False\n for ch in msg:\n if is_f:\n is_f = False\n ret += ch\n elif ch == \"\\\\\":\n is_f = True\n else:\n ret += ch\n return ret\n \n index = 0\n msg_list = message.split(\"(met)\")\n for it in msg_list:\n if index % 2 == 0:\n ret += satori_to_plain(kook_msg_f(it))\n else:\n if it == \"all\":\n ret += \"<at type=\\\"all\\\"/>\"\n else:\n ret += \"<at id=\\\"{}\\\"/>\".format(it)\n index += 1\n return ret\n\n\n async def _deal_group_message_event(self,data,user_id:str):\n group_id = data[\"target_id\"]\n kook_msg = data[\"content\"]\n extra = data[\"extra\"]\n author = extra[\"author\"]\n msg_type = data[\"type\"]\n\n if msg_type == 10:#卡牌\n return\n satori_msg = self._kook_msg_to_satori(msg_type,kook_msg)\n\n satori_evt = SatoriGroupMessageCreatedEvent(\n id=self._id,\n self_id=self._self_id,\n timestamp=data[\"msg_timestamp\"],\n platform=\"kook\",\n channel=SatoriChannel(\n id=\"GROUP_\"+group_id,\n type=SatoriChannel.ChannelType.TEXT,\n name=extra[\"channel_name\"]\n ),\n message=SatoriMessage(\n id=data[\"msg_id\"],\n content=satori_msg,\n created_at=data[\"msg_timestamp\"]\n ),\n user=SatoriUser(\n id=author[\"id\"],\n name=author[\"username\"],\n avatar=author[\"avatar\"],\n is_bot=author[\"bot\"]\n ),\n member=SatoriGuildMember(\n nick=author[\"nickname\"],\n avatar=author[\"avatar\"]\n ),\n guild=SatoriGuild(\n id=extra[\"guild_id\"]\n ),\n role=SatoriGuildRole(\n id=json.dumps(sorted(author[\"roles\"]))\n )\n )\n self._id += 1\n self._queue.put_nowait(satori_evt.to_dict())\n\n async def _deal_private_message_event(self,data,user_id:str):\n\n kook_msg = data[\"content\"]\n extra = data[\"extra\"]\n author = extra[\"author\"]\n msg_type = data[\"type\"]\n\n if msg_type == 10:#卡牌\n return\n satori_msg = self._kook_msg_to_satori(msg_type,kook_msg)\n\n satori_evt = SatoriPrivateMessageCreatedEvent(\n id=self._id,\n self_id=self._self_id,\n timestamp=data[\"msg_timestamp\"],\n channel=SatoriChannel(\n id=user_id,\n type=SatoriChannel.ChannelType.TEXT,\n name=author[\"username\"]\n ),\n message=SatoriMessage(\n id=data[\"msg_id\"],\n content=satori_msg,\n created_at=data[\"msg_timestamp\"]\n ),\n user=SatoriUser(\n id=user_id,\n name=author[\"username\"],\n avatar=author[\"avatar\"],\n is_bot=author[\"bot\"]\n ),\n platform=\"kook\"\n ).to_dict()\n self._id += 1\n self._queue.put_nowait(satori_evt)\n\n async def _deal_group_increase_event(self,data):\n extra = data[\"extra\"]\n satori_evt = {\n \"id\":self._id,\n \"type\":\"guild-member-added\",\n \"platform\":\"kook\",\n \"self_id\":self._self_id,\n \"timestamp\":data[\"msg_timestamp\"],\n \"guild\":SatoriGuild(id=data[\"target_id\"]).to_dict(),\n \"member\":SatoriGuildMember(joined_at=extra[\"body\"][\"joined_at\"]).to_dict(),\n \"user\":SatoriUser(id=extra[\"body\"][\"user_id\"]).to_dict()\n }\n self._id += 1\n self._queue.put_nowait(satori_evt)\n\n\n\n async def _deal_group_evt(self,data):\n user_id:str = data[\"author_id\"]\n if user_id == \"1\": # system message\n tp = data[\"type\"]\n if tp != 255:\n return\n sub_type = data[\"extra\"][\"type\"]\n if sub_type == \"joined_guild\":\n await self._deal_group_increase_event(data)\n else:\n if self._self_id:\n if user_id != self._self_id:\n await self._deal_group_message_event(data,user_id)\n\n\n async def _deal_person_evt(self,data):\n user_id:str = data[\"author_id\"]\n if user_id != 1: # 不是系统消息\n if self._self_id:\n if user_id != self._self_id:\n await self._deal_private_message_event(data,user_id)\n\n\n async def _event_deal(self,data:dict):\n try:\n tp = data[\"channel_type\"]\n if tp == \"GROUP\":\n await self._deal_group_evt(data)\n else:\n await self._deal_person_evt(data)\n except:\n print(traceback.format_exc())\n \n async def _api_call(self,path,data = None) -> dict:\n url:str = self._http_url + path\n headers = {\"Authorization\":\"Bot {}\".format(self._access_token)}\n if data == None:\n async with httpx.AsyncClient() as client:\n return (await client.get(url,headers=headers)).json()[\"data\"]\n else:\n async with httpx.AsyncClient() as client:\n return (await client.post(url,headers=headers,data=data)).json()[\"data\"]\n\n def _make_kook_text(self,text):\n ret = \"\"\n for ch in text:\n if ch in [\"\\\\\",\"*\",\"~\",\"[\",\"(\",\")\",\"]\",\"-\",\">\",\"`\"]:\n ret += \"\\\\\"\n ret += ch\n return ret\n \n async def _satori_to_kook(self,satori_obj) -> [dict]:\n to_send_data = []\n last_type = 1\n for node in satori_obj:\n if isinstance(node,str):\n text = self._make_kook_text(node)\n if last_type == 1 and len(to_send_data) != 0:\n l = len(to_send_data)\n to_send_data[l - 1][\"content\"] += text\n else:\n to_send_data.append({\n \"type\":1,\n \"content\":text\n })\n last_type = 1\n else:\n if node[\"type\"] == \"at\":\n type = get_json_or(node[\"attrs\"],\"type\",None)\n id = get_json_or(node[\"attrs\"],\"id\",None)\n if type == \"all\":\n text = \"(met)all(met)\"\n elif id != None:\n text = \"(met){}(met)\".format(self._make_kook_text(id))\n if last_type == 1 and len(to_send_data) != 0:\n l = len(to_send_data)\n to_send_data[l - 1][\"content\"] += text\n else:\n to_send_data.append({\n \"type\":1,\n \"content\":text\n })\n last_type = 1\n elif node[\"type\"] == \"img\":\n img_url:str = node[\"attrs\"][\"src\"]\n kook_img_url = \"\"\n if img_url.startswith(\"https://img.kookapp.cn\"):\n kook_img_url = img_url\n else:\n if img_url.startswith(\"data:image/\"):\n base64_start = img_url.find(\"base64,\")\n img_content = base64.b64decode(img_url[base64_start + 7:])\n else:\n async with httpx.AsyncClient() as client:\n img_content = (await client.get(img_url)).content\n files = {\n 'file':('test',img_content)\n }\n headers = {\"Authorization\":\"Bot {}\".format(self._access_token)}\n async with httpx.AsyncClient() as client:\n ret = (await client.post(self._http_url + \"/asset/create\",files=files,headers=headers)).json()\n kook_img_url = ret[\"data\"][\"url\"]\n to_send_data.append({\n \"type\":2,\n \"content\":kook_img_url\n })\n last_type = 2\n return to_send_data\n \n async def create_message(self,platform:str,self_id:str,channel_id:str,content:str):\n '''发送消息'''\n satori_obj = parse_satori_html(content)\n to_sends = await self._satori_to_kook(satori_obj)\n if channel_id.startswith(\"GROUP_\"):\n channel_id = int(channel_id[6:])\n to_ret = []\n for it in to_sends:\n ret = await self._api_call(\"/message/create\",{\"content\":it[\"content\"],\"type\":it[\"type\"],\"target_id\":channel_id})\n to_ret.append(SatoriMessage(id=ret[\"msg_id\"],content=\"\").to_dict())\n return to_ret\n else:\n to_ret = []\n for it in to_sends:\n ret = await self._api_call(\"/direct-message/create\",{\"content\":it[\"content\"],\"type\":it[\"type\"],\"target_id\":channel_id})\n to_ret.append(SatoriMessage(id=ret[\"msg_id\"],content=\"\").to_dict())\n return to_ret\n \n async def get_login(self,platform:Optional[str],self_id:Optional[str]) -> [dict]:\n '''获取登录信息,如果platform和self_id为空,那么应该返回一个列表'''\n obret = (await self._api_call(\"/user/me\"))\n satori_ret = SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=obret[\"id\"],\n name=obret[\"username\"],\n avatar=get_json_or(obret,\"avatar\",None),\n is_bot=True\n ),\n self_id=obret[\"id\"],\n platform=\"kook\"\n ).to_dict()\n self._self_id = obret[\"id\"]\n if platform == None and self_id == None:\n return [satori_ret]\n else:\n return satori_ret\n \n async def get_guild_member(self,platform:Optional[str],self_id:Optional[str],guild_id:str,user_id:str) -> [dict]:\n '''获取群组成员信息'''\n url = \"/user/view?user_id={}&guild_id={}\".format(user_id,guild_id)\n obret = (await self._api_call(url))\n satori_ret = SatoriGuildMember(\n user=SatoriUser(\n id=obret[\"id\"],\n name=get_json_or(obret,\"username\",None),\n avatar=get_json_or(obret,\"avatar\",None),\n is_bot=get_json_or(obret,\"bot\",None)\n ),\n nick=get_json_or(obret,\"nickname\",None),\n avatar=get_json_or(obret,\"avatar\",None),\n joined_at=get_json_or(obret,\"join_time\",None)\n ).to_dict()\n return satori_ret\n \n async def get_user(self,platform:Optional[str],self_id:Optional[str],user_id:str) -> [dict]:\n '''获取用户信息'''\n url = \"/user/view?user_id={}\".format(user_id)\n obret = (await self._api_call(url))\n satori_ret = SatoriUser(\n id=obret[\"id\"],\n name=obret[\"username\"],\n avatar=obret[\"avatar\"],\n is_bot=obret[\"bot\"],\n ).to_dict()\n return satori_ret\n \n async def get_channel_list(self,platform:Optional[str],self_id:Optional[str],guild_id:str) -> [dict]:\n '''获取频道列表'''\n url = \"/channel/list?guild_id={}\".format(guild_id)\n obret = (await self._api_call(url))\n ret_list = []\n items = get_json_or(obret,\"items\",None)\n for it in items:\n channel_type = it[\"type\"]\n channel_id = \"GROUP_\" + it[\"id\"]\n channel_name = it[\"name\"]\n channel_parent = it[\"parent_id\"]\n if channel_type == 1:\n ret_list.append(SatoriChannel(\n id=channel_id,\n name=channel_name,\n type=SatoriChannel.ChannelType.TEXT,\n parent_id=channel_parent\n ).to_dict())\n page_total = get_json_or(obret,\"data\",1)\n if page_total > 1:\n for i in range(2,page_total + 1):\n url = \"/channel/list?guild_id={}&page={}\".format(guild_id,i)\n obret = (await self._api_call(url))\n items = get_json_or(obret,\"items\",None)\n for it in items:\n channel_type = it[\"type\"]\n channel_id = \"GROUP_\" + it[\"id\"]\n channel_name = it[\"name\"]\n channel_parent = it[\"parent_id\"]\n if channel_type == 1:\n ret_list.append(SatoriChannel(\n id=channel_id,\n name=channel_name,\n type=SatoriChannel.ChannelType.TEXT,\n parent=channel_parent\n ).to_dict())\n return {\"data\":ret_list}" }, { "identifier": "AdapterMihoyo", "path": "mihoyo_adapter.py", "snippet": "class AdapterMihoyo:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._http_url = \"https://bbs-api.miyoushe.com\"\n self._is_stop = False\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n self._queue = Queue(maxsize=100)\n self._id = 0\n self._sn = 1\n self._self_id = config[\"bot_id\"]\n self._secret = config[\"secret\"]\n self._villa_id = config[\"villa_id\"]\n\n\n async def enable(self) -> None:\n '''适配器启用的时候会调用,可以不理,也可以没这个函数\n 配合下面的停用函数,适配器可以得到自己在整个系统中的状态,进而进行一些优化\n 如,如果适配器处于停用状态,适配器可以自行选择关闭网络连接,以节省资源,当然,也可以不理会\n '''\n pass\n\n async def disable(self) -> None:\n '''适配器停用的时候会调用,可以不理,也可以没这个函数'''\n pass\n \n async def release(self) -> None:\n '''适配器释放的时候会调用一次,应该在这里停用ws连接\n 一般认为,适配器会和真正的协议端建立连接,所以,这个函数大多数时候是需要写的\n 但是,这个函数允许资源延迟释放,只要能释放就行\n 你可以在这个函数里面进行数据保存之类的,这种用途下,请阻塞这个函数,直到保存完成\n '''\n self._is_stop = True\n\n async def get_msg(self) -> dict:\n '''阻塞并等待消息返回,如果你的适配器不具备接收消息的能力,请不要写这个函数'''\n return await self._queue.get()\n\n async def _send_ws_pack(self,ws,ws_dat,biztype):\n magic = 0xBABEFACE.to_bytes(length=4, byteorder='little', signed=False)\n if biztype == 7:\n pb_pack = bytes(PLogin(\n uid=int(ws_dat[\"uid\"]),\n token=self._villa_id + \".\" + self._secret + \".\" + self._self_id,\n platform=ws_dat[\"platform\"],\n app_id=ws_dat[\"app_id\"],\n device_id=ws_dat[\"device_id\"]\n ))\n elif biztype == 6:\n pb_pack = bytes(PHeartBeat(\n client_timestamp=str(int(round(time.time() * 1000)))\n ))\n else:\n raise Exception(\"unkonw biztype:{}\".format(biztype))\n \n wid = self._sn\n self._sn += 1\n\n flag = 1\n appid = 104\n headerlen = 24\n datalen = headerlen + len(pb_pack)\n\n to_send = magic\n to_send += datalen.to_bytes(length=4, byteorder='little', signed=False)\n to_send += headerlen.to_bytes(length=4, byteorder='little', signed=False)\n to_send += wid.to_bytes(length=8, byteorder='little', signed=False)\n to_send += flag.to_bytes(length=4, byteorder='little', signed=False)\n to_send += biztype.to_bytes(length=4, byteorder='little', signed=False)\n to_send += appid.to_bytes(length=4, byteorder='little', signed=True)\n to_send += pb_pack\n\n await ws.send(to_send)\n \n async def _ws_recv(self,websocket):\n try:\n reply = await asyncio.wait_for(websocket.recv(),0.1)\n return reply\n except asyncio.TimeoutError:\n return None\n\n async def _ws_connect(self):\n self._login_status = SatoriLogin.LoginStatus.CONNECT\n ws_dat = (await self._api_call(\"/vila/api/bot/platform/getWebsocketInfo\"))\n # print(ws_dat)\n ws_url = ws_dat[\"websocket_url\"]\n async with connect(ws_url) as websocket:\n await self._send_ws_pack(websocket,ws_dat,biztype=7)\n tm = time.time()\n while not self._is_stop:\n reply = await self._ws_recv(websocket)\n if not reply:\n now_time = time.time()\n if now_time - tm > 30:\n tm = now_time\n await self._send_ws_pack(websocket,ws_dat,biztype=6)\n continue\n biztype = int.from_bytes(reply[24:28],byteorder='little',signed=False)\n if biztype == 7: # 登录返回\n login_reply = PLoginReply().parse(reply[32:])\n if login_reply.code == 0:\n print(\"mihoyo:ws连接成功\")\n self._login_status = SatoriLogin.LoginStatus.ONLINE\n continue\n else:\n print(\"mihoyo:ws连接失败\",login_reply.to_json())\n break\n elif biztype == 53:\n print(\"mihoyo:ws被踢下线\")\n pkoff = PKickOff().parse(reply[32:])\n print(\"mihoyo:\" + pkoff.reason)\n break\n elif biztype == 52:\n print(\"mihoyo:ws服务关机\")\n break\n elif biztype == 6:\n heart_reply = PHeartBeatReply().parse(reply[32:])\n if heart_reply.code != 0:\n print(\"mihoyo:ws心跳失败\")\n break\n elif biztype == 30001: # 正常处理\n evt = RobotEvent().parse(reply[32:]).to_dict()\n asyncio.create_task(self._event_deal(evt))\n\n async def _ws_server(self) -> None:\n while not self._is_stop:\n try:\n await self._ws_connect()\n except:\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n traceback.print_exc()\n await asyncio.sleep(3)\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n\n async def init_after(self) -> None:\n asyncio.create_task(self._ws_server())\n\n def _mihoyo_msg_to_satori(self,content_obj)->str:\n ret = \"\"\n entities = content_obj[\"content\"][\"entities\"]\n text = content_obj[\"content\"][\"text\"]\n l = len(text)\n i = 0\n while i < l:\n for en in entities:\n if en[\"offset\"] == i:\n print(en)\n i += en[\"length\"]\n if en[\"entity\"][\"type\"] == \"mention_all\": # 实际上收不到\n ret += \"<at type=\\\"all\\\"/>\"\n elif en[\"entity\"][\"type\"] == \"mentioned_robot\":\n ret += \"<at id=\\\"{}\\\"/>\".format(en[\"entity\"][\"bot_id\"])\n elif en[\"entity\"][\"type\"] == \"mentioned_user\":\n ret += \"<at id=\\\"{}\\\"/>\".format(en[\"entity\"][\"user_id\"])\n break\n else:\n ret += satori_to_plain(text[i])\n i += 1\n return ret\n async def _deal_group_message_event(self,data):\n extendData = data[\"extendData\"]\n\n sendMessage = extendData[\"sendMessage\"]\n user_id = sendMessage[\"fromUserId\"]\n villaId = sendMessage[\"villaId\"]\n roomId = sendMessage[\"roomId\"]\n\n villaRoomId = villaId + \"_\" + roomId\n\n content_obj = json.loads(sendMessage[\"content\"])\n\n extra_obj = json.loads(content_obj[\"user\"][\"extra\"])\n\n satori_msg = self._mihoyo_msg_to_satori(content_obj) # todo\n\n satori_evt = SatoriGroupMessageCreatedEvent(\n id=self._id,\n self_id=self._self_id,\n timestamp=int(data[\"sendAt\"]) * 1000,\n platform=\"mihoyo\",\n channel=SatoriChannel(\n id=villaRoomId,\n type=SatoriChannel.ChannelType.TEXT,\n ),\n message=SatoriMessage(\n id=data[\"id\"],\n content=satori_msg,\n created_at=int(sendMessage[\"sendAt\"])\n ),\n user=SatoriUser(\n id=user_id,\n name=sendMessage[\"nickname\"],\n avatar=content_obj[\"user\"][\"portraitUri\"]\n ),\n member=SatoriGuildMember(\n nick=sendMessage[\"nickname\"],\n avatar=content_obj[\"user\"][\"portraitUri\"]\n ),\n guild=SatoriGuild(\n id=villaId\n ),\n role=SatoriGuildRole(\n id=extra_obj[\"member_roles\"][\"name\"],\n name=extra_obj[\"member_roles\"][\"name\"]\n )\n )\n self._id += 1\n self._queue.put_nowait(satori_evt.to_dict())\n\n async def _event_deal(self,data:dict):\n try:\n event_type = data[\"type\"]\n if event_type == \"SendMessage\":\n await self._deal_group_message_event(data)\n except:\n print(traceback.format_exc())\n\n \n async def _api_call(self,path,data = None,villa_id = 0) -> dict:\n url:str = self._http_url + path\n headers = {\"x-rpc-bot_id\":self._self_id,\"x-rpc-bot_secret\":self._secret}\n if villa_id == 0:\n headers[\"x-rpc-bot_villa_id\"] = self._villa_id\n else:\n headers[\"x-rpc-bot_villa_id\"] = villa_id\n if data == None:\n async with httpx.AsyncClient() as client:\n return (await client.get(url,headers=headers)).json()[\"data\"]\n else:\n headers[\"Content-Type\"] = \"application/json\"\n async with httpx.AsyncClient() as client:\n ret = (await client.post(url,headers=headers,data=data)).json()\n if ret[\"retcode\"] != 0:\n print(\"mihoyo:\",ret)\n return ret[\"data\"]\n\n \n async def _satori_to_mihoyo(self,satori_obj,villa_id) -> [dict]:\n to_send_data = []\n last_type = 1\n for node in satori_obj:\n if isinstance(node,str):\n text = node\n if last_type == 1 and len(to_send_data) != 0:\n l = len(to_send_data)\n to_send_data[l - 1][\"text\"] += text\n else:\n to_send_data.append({\n \"type\":1,\n \"text\":text,\n \"entities\":[]\n })\n last_type = 1\n else:\n if node[\"type\"] == \"at\":\n type = get_json_or(node[\"attrs\"],\"type\",None)\n id = get_json_or(node[\"attrs\"],\"id\",None)\n if type == \"all\":\n text = \"@全体成员\"\n elif id != None:\n text = \"@\" + id\n else:\n continue\n\n if last_type != 1 or len(to_send_data) == 0:\n to_send_data.append({\n \"type\":1,\n \"text\":\"\",\n \"entities\":[]\n })\n last_type = 1\n\n l = len(to_send_data)\n ll = len(to_send_data[l - 1][\"text\"])\n to_send_data[l - 1][\"text\"] += text\n if type == \"all\":\n to_send_data[l - 1][\"entities\"].append({\n \"entity\": {\n \"type\": \"mention_all\"\n },\n \"length\":5,\n \"offset\":ll\n })\n else:\n if id.startswith(\"bot_\"):\n to_send_data[l - 1][\"entities\"].append({\n \"entity\": {\n \"type\": \"mentioned_robot\",\n \"bot_id\": id\n },\n \"length\":len(id) + 1,\n \"offset\":ll\n })\n else:\n to_send_data[l - 1][\"entities\"].append({\n \"entity\": {\n \"type\": \"mentioned_user\",\n \"user_id\": id\n },\n \"length\":len(id) + 1,\n \"offset\":ll\n })\n\n elif node[\"type\"] == \"img\":\n img_url:str = node[\"attrs\"][\"src\"]\n mihoyo_img_url = \"\"\n if img_url.startswith(\"data:image/\"):\n base64_start = img_url.find(\"base64,\")\n img_content = base64.b64decode(img_url[base64_start + 7:])\n else:\n async with httpx.AsyncClient() as client:\n img_content = (await client.get(img_url)).content\n ext = imghdr.what(file = \"\",h=img_content)\n m = hashlib.md5()\n m.update(img_content)\n headers = {\"x-rpc-bot_id\":self._self_id,\"x-rpc-bot_secret\":self._secret,\"x-rpc-bot_villa_id\":villa_id}\n upload_info_url = self._http_url + \"/vila/api/bot/platform/getUploadImageParams\"\n async with httpx.AsyncClient() as client:\n req = client.build_request(\"GET\",upload_info_url,json={\n \"md5\":m.hexdigest(),\n \"ext\":ext\n },headers=headers)\n file_params = (await client.send(req)).json()[\"data\"][\"params\"]\n files = {\n \"x:extra\":file_params[\"callback_var\"][\"x:extra\"],\n \"OSSAccessKeyId\":file_params[\"accessid\"],\n \"signature\":file_params[\"signature\"],\n \"success_action_status\":file_params[\"success_action_status\"],\n \"name\":file_params[\"name\"],\n \"callback\":file_params[\"callback\"],\n \"x-oss-content-type\":file_params[\"x_oss_content_type\"],\n \"key\":file_params[\"key\"],\n \"policy\":file_params[\"policy\"],\n \"Content-Disposition\":file_params[\"content_disposition\"],\n 'file':('test',img_content)\n }\n async with httpx.AsyncClient() as client:\n ret = (await client.post(file_params[\"host\"],files=files)).json()\n mihoyo_img_url = ret[\"data\"][\"url\"]\n to_send_data.append({\n \"type\":2,\n \"url\":mihoyo_img_url,\n })\n last_type = 2\n to_send_data2 = []\n for it in to_send_data:\n type = it[\"type\"]\n if type == 1:\n to_send_data2.append({\n \"object_name\":\"MHY:Text\",\n \"msg_content\":json.dumps({\n \"content\":{\n \"text\":it[\"text\"],\n \"entities\":it[\"entities\"]\n }\n })})\n elif type == 2:\n to_send_data2.append({\n \"object_name\":\"MHY:Image\",\n \"msg_content\":json.dumps({\n \"content\":{\n \"url\":it[\"url\"]\n }\n \n })})\n \n return to_send_data2\n \n async def create_message(self,platform:str,self_id:str,channel_id:str,content:str):\n '''发送消息'''\n villa_id = channel_id.split(\"_\")[0]\n satori_obj = parse_satori_html(content)\n to_sends = await self._satori_to_mihoyo(satori_obj,villa_id)\n to_ret = []\n # print(to_sends)\n for it in to_sends:\n it[\"room_id\"] = channel_id.split(\"_\")[1]\n ret = await self._api_call(\"/vila/api/bot/platform/sendMessage\",json.dumps(it),villa_id=villa_id)\n to_ret.append(SatoriMessage(id=ret[\"bot_msg_id\"],content=\"\").to_dict())\n return to_ret\n \n \n async def get_login(self,platform:Optional[str],self_id:Optional[str]) -> [dict]:\n '''获取登录信息,如果platform和self_id为空,那么应该返回一个列表'''\n satori_ret = SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=self._self_id,\n is_bot=True\n ),\n self_id=self._self_id,\n platform=\"mihoyo\"\n ).to_dict()\n if platform == None and self_id == None:\n return [satori_ret]\n else:\n return satori_ret\n\n async def get_guild_member(self,platform:Optional[str],self_id:Optional[str],guild_id:str,user_id:str) -> [dict]:\n '''获取群组成员信息'''\n url = self._http_url + \"/vila/api/bot/platform/getMember\"\n headers = {\"x-rpc-bot_id\":self._self_id,\"x-rpc-bot_secret\":self._secret,\"x-rpc-bot_villa_id\":guild_id}\n async with httpx.AsyncClient() as client:\n req = client.build_request(\"GET\",url,json={\n \"uid\":user_id\n },headers=headers)\n obret = (await client.send(req)).json()[\"data\"][\"member\"]\n satori_ret = SatoriGuildMember(\n user=SatoriUser(\n id=obret[\"basic\"][\"uid\"],\n name=obret[\"basic\"][\"nickname\"],\n avatar=obret[\"basic\"][\"avatar_url\"],\n is_bot=False\n ),\n nick=obret[\"basic\"][\"nickname\"],\n avatar=obret[\"basic\"][\"avatar_url\"],\n joined_at=int(obret[\"joined_at\"] + \"000\")\n ).to_dict()\n return satori_ret" }, { "identifier": "AdapterOnebot", "path": "onebot_adapter.py", "snippet": "class AdapterOnebot:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._http_url = config[\"http_url\"]\n self._ws_url = config[\"ws_url\"]\n if \"access_token\" in config:\n self._access_token = config[\"access_token\"]\n else:\n self._access_token = None\n self._is_stop = False\n self._login_status = 3 # DISCONNECT\n self._queue = Queue(maxsize=100)\n self._id = 0\n\n def _cqarr_to_satori(self,cqarr):\n ret = \"\"\n for node in cqarr:\n if node[\"type\"] == \"text\":\n ret += satori_to_plain(node[\"data\"][\"text\"])\n elif node[\"type\"] == \"at\":\n qq = node[\"data\"][\"qq\"]\n if qq == \"all\":\n ret += \"<at type=\\\"all\\\"/>\"\n else:\n ret += \"<at id={}/>\".format(json.dumps(qq))\n elif node[\"type\"] == \"image\":\n url = node[\"data\"][\"url\"]\n ret += \"<img src={}/>\".format(json.dumps(url))\n return ret\n\n async def enable(self) -> None:\n '''适配器启用的时候会调用,可以不理,也可以没这个函数\n 配合下面的停用函数,适配器可以得到自己在整个系统中的状态,进而进行一些优化\n 如,如果适配器处于停用状态,适配器可以自行选择关闭网络连接,以节省资源,当然,也可以不理会\n '''\n pass\n\n async def disable(self) -> None:\n '''适配器停用的时候会调用,可以不理,也可以没这个函数'''\n pass\n \n async def release(self) -> None:\n '''适配器释放的时候会调用一次,应该在这里停用ws连接\n 一般认为,适配器会和真正的协议端建立连接,所以,这个函数大多数时候是需要写的\n 但是,这个函数允许资源延迟释放,只要能释放就行\n 你可以在这个函数里面进行数据保存之类的,这种用途下,请阻塞这个函数,直到保存完成\n '''\n self._is_stop = True\n\n async def get_msg(self) -> dict:\n '''阻塞并等待消息返回,如果你的适配器不具备接收消息的能力,请不要写这个函数'''\n return await self._queue.get()\n\n async def init_after(self) -> None:\n '''适配器创建之后会调用一次,应该在这里进行ws连接等操作,如果不需要,可以不写'''\n async def _ws_server(self:AdapterOnebot) -> None:\n while not self._is_stop:\n try:\n self._login_status = 2 # CONNECT\n async with connect(self._ws_url) as websocket:\n print(\"onebot:ws已经连接\")\n self._login_status = 1 # ONLINE\n try:\n while True:\n try:\n reply = await asyncio.wait_for(websocket.recv(),0.1)\n await self._event_deal(json.loads(reply))\n except asyncio.TimeoutError:\n if self._is_stop:\n await websocket.close()\n except asyncio.QueueFull:\n print(\"队列满\")\n except Exception as e:\n print(e) \n except Exception as e:\n print(e)\n print(\"onebot:ws连接已经断开\")\n self._login_status = 3 # DISCONNECT\n asyncio.create_task(_ws_server(self))\n \n async def _event_deal(self,evt:dict):\n '''自己定义的事件转化函数'''\n post_type = evt[\"post_type\"]\n if post_type == \"message\":\n message_type = evt[\"message_type\"]\n sender = evt[\"sender\"]\n if message_type == \"group\":\n channel_obj = {\n \"id\":\"GROUP_\"+str(evt[\"group_id\"]),\n \"type\":0,\n \"name\":None,\n \"parent_id\":None\n }\n guild_obj = {\n \"id\":\"GROUP_\"+str(evt[\"group_id\"]),\n \"name\":None,\n \"avatar\":None\n }\n user_obj = {\n \"id\":str(evt[\"user_id\"]),\n \"name\":get_json_or(sender,\"nickname\",None),\n \"nick\":get_json_or(sender,\"nickname\",None),\n \"avatar\":get_json_or(sender,\"avatar\",None),\n \"is_bot\":None\n }\n joined_at = get_json_or(sender,\"join_time\",None)\n if joined_at:\n joined_at = int(str(joined_at) + \"000\")\n member_obj = {\n \"nick\":get_json_or(sender,\"card\",None),\n \"avatar\":get_json_or(sender,\"avatar\",None),\n \"joined_at\":joined_at\n }\n message_obj = {\n \"id\":str(evt[\"message_id\"]),\n \"content\":self._cqarr_to_satori(_cqmsg_to_arr(evt[\"message\"])),\n \"created_at\":int(str(evt[\"time\"] ) + \"000\")\n }\n role_obj = {\n \"id\":get_json_or(sender, \"role\",\"member\"),\n \"name\":get_json_or(sender,\"role\",\"member\")\n }\n satori_evt = {\n \"id\":self._id,\n \"type\":\"message-created\",\n \"platform\":\"onebot\",\n \"self_id\":str(evt[\"self_id\"]),\n \"timestamp\":int(str(evt[\"time\"] ) + \"000\"),\n \"channel\":channel_obj,\n \"guild\":guild_obj,\n \"member\":member_obj,\n \"message\":message_obj,\n \"role\":role_obj,\n \"user\":user_obj\n }\n self._id += 1\n self._queue.put_nowait(satori_evt)\n elif message_type == \"private\":\n channel_obj = {\n \"id\":str(evt[\"user_id\"]),\n \"type\":1,\n \"name\":None,\n \"parent_id\":None\n }\n user_obj = {\n \"id\":str(evt[\"user_id\"]),\n \"name\":get_json_or(sender,\"nickname\",None),\n \"nick\":get_json_or(sender,\"nickname\",None),\n \"avatar\":get_json_or(sender,\"avatar\",None),\n \"is_bot\":None\n }\n joined_at = get_json_or(sender,\"join_time\",None)\n if joined_at:\n joined_at = int(str(joined_at) + \"000\")\n message_obj = {\n \"id\":str(evt[\"message_id\"]),\n \"content\":self._cqarr_to_satori(_cqmsg_to_arr(evt[\"message\"])),\n \"created_at\":int(str(evt[\"time\"] ) + \"000\")\n }\n satori_evt = {\n \"id\":self._id,\n \"type\":\"message-created\",\n \"platform\":\"onebot\",\n \"self_id\":str(evt[\"self_id\"]),\n \"timestamp\":int(str(evt[\"time\"] ) + \"000\"),\n \"channel\":channel_obj,\n \"message\":message_obj,\n \"user\":user_obj\n }\n self._id += 1\n self._queue.put_nowait(satori_evt)\n elif post_type == \"notice\":\n notice_type = evt[\"notice_type\"]\n if notice_type == \"group_increase\":\n guild_obj = {\n \"id\":\"GROUP_\"+str(evt[\"group_id\"]),\n \"name\":None,\n \"avatar\":None\n }\n member_obj = {\n \"nick\":None,\n \"avatar\":get_json_or(evt,\"avatar\",None),\n \"joined_at\":int(str(evt[\"time\"] ) + \"000\")\n }\n user_obj = {\n \"id\":str(evt[\"user_id\"]),\n \"name\":None,\n \"nick\":None,\n \"avatar\":None,\n \"is_bot\":None\n }\n satori_evt = {\n \"id\":self._id,\n \"type\":\"guild-member-added\",\n \"platform\":\"onebot\",\n \"self_id\":str(evt[\"self_id\"]),\n \"timestamp\":int(str(evt[\"time\"] ) + \"000\"),\n \"guild\":guild_obj,\n \"member\":member_obj,\n \"user\":user_obj\n }\n self._id += 1\n self._queue.put_nowait(satori_evt)\n\n async def _api_call(self,path,data) -> dict:\n url:str = self._http_url + path\n if self._access_token:\n headers = {\"Authorization\":\"Bearer {}\".format(self._access_token)}\n else:\n headers = {}\n async with httpx.AsyncClient() as client:\n # headers[\"Content-Type\"] = \"application/json\"\n return (await client.post(url,headers=headers,data=data)).json()\n \n async def _satori_to_cq(self,satori_obj) -> str:\n ret = \"\"\n for node in satori_obj:\n if isinstance(node,str):\n ret += _cq_text_encode(node)\n else:\n if node[\"type\"] == \"at\":\n type = get_json_or(node[\"attrs\"],\"type\",None)\n id = get_json_or(node[\"attrs\"],\"id\",None)\n if type == \"all\":\n ret += \"[CQ:at,qq=all]\"\n elif id != None:\n ret += \"[CQ:at,qq={}]\".format(_cq_params_encode(id))\n elif node[\"type\"] == \"img\":\n img_url = node[\"attrs\"][\"src\"]\n if img_url.startswith(\"data:image/\"):\n base64_start = img_url.find(\"base64,\")\n img_url = \"base64://\" + img_url[base64_start + 7:]\n ret += \"[CQ:image,file={}]\".format(_cq_params_encode(img_url)) \n\n return ret\n\n\n async def create_message(self,platform:str,self_id:str,channel_id:str,content:str):\n '''发送消息'''\n satori_obj = parse_satori_html(content)\n to_send = await self._satori_to_cq(satori_obj)\n if channel_id.startswith(\"GROUP_\"):\n group_id = int(channel_id[6:])\n ret = await self._api_call(\"/send_group_msg\",{\"group_id\":group_id,\"message\":to_send})\n return [{\"id\":str(ret[\"data\"][\"message_id\"]),\"content\":\"\"}]\n else:\n user_id = int(channel_id)\n ret = await self._api_call(\"/send_private_msg\",{\"user_id\":user_id,\"message\":to_send})\n return [{\"id\":str(ret[\"data\"][\"message_id\"]),\"content\":\"\"}]\n \n async def get_login(self,platform:Optional[str],self_id:Optional[str]) -> [dict]:\n '''获取登录信息,如果platform和self_id为空,那么应该返回一个列表'''\n obret = (await self._api_call(\"/get_login_info\",{}))[\"data\"]\n satori_ret = {\n \"user\":{\n \"id\":str(obret[\"user_id\"]),\n \"name\":obret[\"nickname\"],\n \"nick\":obret[\"nickname\"],\n \"avatar\":get_json_or(obret,\"avatar\",None),\n \"is_bot\":None\n },\n \"self_id\":str(obret[\"user_id\"]),\n \"platform\":\"onebot\",\n \"status\":self._login_status,\n }\n if platform == None and self_id == None:\n return [satori_ret]\n else:\n return satori_ret\n \n async def get_guild_member(self,platform:Optional[str],self_id:Optional[str],guild_id:str,user_id:str) -> [dict]:\n '''获取群组成员信息'''\n obret = (await self._api_call(\"/get_group_member_info\",{\n \"group_id\":int(guild_id[6:]),\n \"user_id\":int(user_id)\n }))[\"data\"]\n joined_at = get_json_or(obret,\"join_time\",None)\n if joined_at:\n joined_at = int(str(joined_at) + \"000\")\n satori_ret = {\n \"user\":{\n \"id\":str(obret[\"user_id\"]),\n \"name\":get_json_or(obret,\"nickname\",None),\n \"nick\":get_json_or(obret,\"card\",None),\n \"avatar\":get_json_or(obret,\"avatar\",None),\n \"is_bot\":None\n },\n \"nick\":get_json_or(obret,\"card\",None),\n \"avatar\":get_json_or(obret,\"avatar\",None),\n \"joined_at\":joined_at,\n }\n return satori_ret" }, { "identifier": "Config", "path": "config.py", "snippet": "class Config:\n def __init__(self) -> None:\n self.botlist:list = []\n self.web_port:int = 8080\n self.web_host:str = \"127.0.0.1\"\n self.access_token:str = \"\"\n \n async def read_config(self):\n async with aiofiles.open('config.json', mode='r') as f:\n json_dat = json5.loads(await f.read())\n self.botlist = json_dat[\"botlist\"]\n self.web_port = json_dat[\"web_port\"]\n self.web_host = json_dat[\"web_host\"]\n self.access_token = json_dat[\"access_token\"]" }, { "identifier": "AdapterQQ", "path": "qq_adapter.py", "snippet": "class AdapterQQ:\n def __init__(self,config = {}) -> None:\n '''用于初始化一些配置信息,尽量不要在这里阻塞,因为此处不具备异步环境,如果你需要读写配置文件,请在init_after中进行'''\n self._botqq = config[\"botqq\"]\n self._appid = config[\"appid\"]\n self._token = config[\"token\"]\n if \"withgroup\" in config:\n self._withgroup = config[\"withgroup\"]\n else:\n self._withgroup = None\n self._appsecret = config[\"appsecret\"]\n self._http_url = \"https://api.sgroup.qq.com\"\n self._is_stop = False\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n self._queue = Queue(maxsize=100)\n self._id = 0\n self._sn = None\n self._self_id = None\n self._access_token = None\n self._expires_in = 0\n self.msgid_map = dict()\n # self._self_name = None\n\n\n async def enable(self) -> None:\n '''适配器启用的时候会调用,可以不理,也可以没这个函数\n 配合下面的停用函数,适配器可以得到自己在整个系统中的状态,进而进行一些优化\n 如,如果适配器处于停用状态,适配器可以自行选择关闭网络连接,以节省资源,当然,也可以不理会\n '''\n pass\n\n async def disable(self) -> None:\n '''适配器停用的时候会调用,可以不理,也可以没这个函数'''\n pass\n \n async def release(self) -> None:\n '''适配器释放的时候会调用一次,应该在这里停用ws连接\n 一般认为,适配器会和真正的协议端建立连接,所以,这个函数大多数时候是需要写的\n 但是,这个函数允许资源延迟释放,只要能释放就行\n 你可以在这个函数里面进行数据保存之类的,这种用途下,请阻塞这个函数,直到保存完成\n '''\n self._is_stop = True\n\n async def get_msg(self) -> dict:\n '''阻塞并等待消息返回,如果你的适配器不具备接收消息的能力,请不要写这个函数'''\n return await self._queue.get()\n \n\n async def _ws_recv(self,websocket):\n try:\n reply = await asyncio.wait_for(websocket.recv(),0.1)\n return reply\n except asyncio.TimeoutError:\n return None\n\n async def _ws_connect(self):\n self._login_status = SatoriLogin.LoginStatus.CONNECT\n ws_url = (await self._api_call(\"/gateway\"))[\"url\"]\n async with connect(ws_url) as websocket:\n tm = time.time()\n while not self._is_stop:\n reply = await self._ws_recv(websocket)\n if not reply:\n now_time = time.time()\n if now_time - tm > 30:\n tm = now_time\n await websocket.send(json.dumps({\"op\": 1,\"d\": self._sn}))\n continue\n js = json.loads(reply)\n op = js[\"op\"]\n if op == 0: # 事件\n self._sn = js[\"s\"]\n t = js[\"t\"]\n if t == \"READY\":\n print(\"qq:ws连接成功\")\n print(json.dumps(js))\n self._login_status = SatoriLogin.LoginStatus.ONLINE\n else:\n print(json.dumps(js))\n asyncio.create_task(self._deal_event(js))\n elif op == 1: # 心跳\n await websocket.send(json.dumps({\"op\":11}))\n elif op == 7: # 重连\n print(\"qq:服务端要求重连\")\n break\n elif op == 9: # 参数错误\n print(\"qq:参数错误:\",json.dumps(js))\n break\n elif op == 10: # ws建立成功\n if self._withgroup:\n await websocket.send(json.dumps({\n \"op\":2,\n \"d\":{\n \"token\":\"QQBot {}\".format(self._access_token),\n \"intents\":0 | (1 << 0) | (1 << 1) | (1 << 30) | (1 << 25),\n \"shard\":[0, 1],\n }\n }))\n else:\n await websocket.send(json.dumps({\n \"op\":2,\n \"d\":{\n \"token\":\"QQBot {}\".format(self._access_token),\n \"intents\":0 | (1 << 0) | (1 << 1) | (1 << 30),\n \"shard\":[0, 1],\n }\n }))\n elif op == 11: # HTTP Callback ACK\n pass\n\n async def _ws_server(self) -> None:\n while not self._is_stop:\n try:\n await self._ws_connect()\n except:\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n print(traceback.format_exc())\n await asyncio.sleep(3)\n self._login_status = SatoriLogin.LoginStatus.DISCONNECT\n\n async def _token_refresh(self):\n async with httpx.AsyncClient() as client:\n if not self._expires_in or int(self._expires_in) < 60 * 5:\n url = \"https://bots.qq.com/app/getAppAccessToken\"\n ret = (await client.post(url,json={\n \"appId\":self._appid,\n \"clientSecret\":self._appsecret\n })).json()\n self._access_token = ret[\"access_token\"]\n self._expires_in = ret[\"expires_in\"]\n # print(ret)\n\n async def _qqarr_to_satori(self,qqmsg_arr):\n ret = \"\"\n for it in qqmsg_arr:\n if it[\"type\"] == \"text\":\n ret += satori_to_plain(it[\"data\"])\n else:\n if it[\"data\"].startswith(\"<@!\"):\n user_id = it[\"data\"][3:len(it[\"data\"]) - 1]\n ret += \"<at id=\\\"{}\\\">\".format(satori_to_plain(user_id))\n elif it[\"data\"].startswith(\"<@\"):\n user_id = it[\"data\"][2:len(it[\"data\"]) - 1]\n ret += \"<at id=\\\"{}\\\">\".format(satori_to_plain(user_id))\n return ret\n \n async def _deal_channel_event(self,data):\n qqmsg_arr = _qqmsg_to_arr(data[\"content\"])\n # print(\"qqmsg_arr\",qqmsg_arr)\n satori_msg = await self._qqarr_to_satori(qqmsg_arr)\n self.msgid_map[\"CHANNEL_\"+data[\"channel_id\"]] = data[\"id\"]\n satori_evt = SatoriGroupMessageCreatedEvent(\n id=self._id,\n self_id=self._self_id,\n timestamp=int(time.mktime(time.strptime(data[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000,\n platform=\"qq_guild\",\n channel=SatoriChannel(\n id=\"CHANNEL_\"+data[\"channel_id\"],\n type=SatoriChannel.ChannelType.TEXT,\n ),\n message=SatoriMessage(\n id=data[\"id\"],\n content=satori_msg,\n created_at=int(time.mktime(time.strptime(data[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000\n ),\n user=SatoriUser(\n id=data[\"author\"][\"id\"],\n name=data[\"author\"][\"username\"],\n avatar=data[\"author\"][\"avatar\"],\n is_bot=data[\"author\"][\"bot\"]\n ),\n member=SatoriGuildMember(\n nick=data[\"member\"][\"nick\"],\n avatar=data[\"author\"][\"avatar\"],\n joined_at=int(time.mktime(time.strptime(data[\"member\"][\"joined_at\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000\n ),\n guild=SatoriGuild(\n id=data[\"guild_id\"]\n ),\n role=SatoriGuildRole(\n id=json.dumps(sorted(data[\"member\"][\"roles\"]))\n )\n )\n self._id += 1\n self._queue.put_nowait(satori_evt.to_dict())\n\n async def _deal_group_event(self,data):\n qqmsg_arr = _qqmsg_to_arr(data[\"content\"])\n # print(\"qqmsg_arr\",qqmsg_arr)\n satori_msg = await self._qqarr_to_satori(qqmsg_arr)\n self.msgid_map[\"GROUP_\"+data[\"group_id\"]] = data[\"id\"]\n satori_evt = SatoriGroupMessageCreatedEvent(\n id=self._id,\n self_id=self._botqq,\n timestamp=int(time.mktime(time.strptime(data[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000,\n platform=\"qq_group\",\n channel=SatoriChannel(\n id=\"GROUP_\"+data[\"group_id\"],\n type=SatoriChannel.ChannelType.TEXT,\n ),\n message=SatoriMessage(\n id=data[\"id\"],\n content=satori_msg,\n created_at=int(time.mktime(time.strptime(data[\"timestamp\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000\n ),\n user=SatoriUser(\n id=data[\"author\"][\"id\"]\n ),\n member=SatoriGuildMember(\n ),\n guild=SatoriGuild(\n id=\"GROUP_\"+data[\"group_id\"]\n ),\n role=SatoriGuildRole(\n id=\"unkonw\",\n name=\"unkonw\"\n )\n )\n self._id += 1\n self._queue.put_nowait(satori_evt.to_dict())\n\n async def _deal_event(self,event):\n try:\n type = event[\"t\"]\n if type == \"AT_MESSAGE_CREATE\":\n d = event[\"d\"]\n if (\"channel_id\" in d) and d[\"channel_id\"]:\n await self._deal_channel_event(d)\n else:\n if type == \"GROUP_AT_MESSAGE_CREATE\":\n d = event[\"d\"]\n if (\"group_id\" in d) and d[\"group_id\"]:\n await self._deal_group_event(d)\n except:\n print(traceback.format_exc())\n\n async def _token_refresh_task(self):\n while True:\n try:\n await self._token_refresh()\n index = 0\n while index < 60: # 每60秒检测一次token是否过期\n await asyncio.sleep(1)\n if self._is_stop:\n break\n index += 1\n if self._is_stop:break\n except:\n print(traceback.format_exc())\n\n async def init_after(self) -> None:\n '''适配器创建之后会调用一次,应该在这里进行ws连接等操作,如果不需要,可以不写'''\n try:\n await self._token_refresh()\n except:\n print(traceback.format_exc())\n asyncio.create_task(self._token_refresh_task())\n asyncio.create_task(self._ws_server())\n\n async def _api_call(self,path,data = None) -> dict:\n url:str = self._http_url + path\n headers = {\"Authorization\":\"QQBot {}\".format(self._access_token),\"X-Union-Appid\":self._appid}\n if data == None:\n async with httpx.AsyncClient() as client:\n return (await client.get(url,headers=headers)).json()\n else:\n async with httpx.AsyncClient() as client:\n ret = (await client.post(url,headers=headers,json=data))\n # print(ret.content)\n return ret.json()\n\n def _make_qq_text(self,text:str):\n ret = text\n ret = ret.replace(\"&\",\"&amp;\")\n ret = ret.replace(\"<\",\"&lt;\")\n ret = ret.replace(\">\",\"&gt;\")\n return ret\n \n async def _satori_to_qq(self,satori_obj,platform = \"qq_guild\") -> [dict]:\n to_reply_id = None\n ret_text = \"\"\n ret_img = []\n for node in satori_obj:\n if isinstance(node,str):\n text = self._make_qq_text(node)\n ret_text += text\n else:\n if node[\"type\"] == \"at\":\n type = get_json_or(node[\"attrs\"],\"type\",None)\n id = get_json_or(node[\"attrs\"],\"id\",None)\n if type == \"all\":\n # 注意,机器人不支持at all,不能发,也不能收,这里假装at all了\n ret_text += \"@全体成员\"\n # text = \"<@everyone>\"\n elif id != None:\n ret_text += \"<@{}>\".format(self._make_qq_text(id))\n elif node[\"type\"] == \"img\":\n img_url:str = node[\"attrs\"][\"src\"]\n if img_url.startswith(\"data:image/\"):\n base64_start = img_url.find(\"base64,\")\n img_content = base64.b64decode(img_url[base64_start + 7:])\n ret_img.append(img_content)\n else:\n if platform == \"qq_guild\":\n async with httpx.AsyncClient() as client:\n img_content = (await client.get(img_url)).content\n ret_img.append(img_content)\n else:\n ret_img.append(img_url)\n elif node[\"type\"] == \"passive\":\n to_reply_id = node[\"attrs\"][\"id\"]\n \n ret_vec = []\n ret_vec.append({\n \"content\":ret_text,\n \"file_image\":None,\n \"to_reply_id\":to_reply_id\n })\n if len(ret_img) != 0:\n ret_vec[0][\"file_image\"] = ret_img[0]\n for img in ret_img[1:]:\n ret_vec.append({\n \"content\":\"\",\n \"file_image\":img,\n \"to_reply_id\":to_reply_id\n })\n return ret_vec\n \n async def create_message(self,platform:str,self_id:str,channel_id:str,content:str):\n '''发送消息'''\n to_reply_id = self.msgid_map[channel_id]\n satori_obj = parse_satori_html(content)\n to_sends = await self._satori_to_qq(satori_obj,platform)\n # print(to_sends)\n if channel_id.startswith(\"CHANNEL_\") and platform == \"qq_guild\":\n channel_id = channel_id[8:]\n to_ret = []\n for it in to_sends:\n if it[\"to_reply_id\"]:to_reply_id = it[\"to_reply_id\"]\n async with httpx.AsyncClient() as client:\n headers = {\"Authorization\":\"QQBot {}\".format(self._access_token),\"X-Union-Appid\":self._appid,\"Accept\":\"application/json\"}\n url:str = self._http_url + \"/channels/{}/messages\".format(channel_id)\n data = {\n \"msg_id\":to_reply_id,\n \"content\":it[\"content\"]\n }\n if it[\"file_image\"]:\n ret = (await client.post(url,headers=headers,data=data,files={\"file_image\":it[\"file_image\"]})).json()\n else:\n ret = (await client.post(url,headers=headers,json=data)).json()\n # print(ret)\n to_ret.append(SatoriMessage(id=ret[\"id\"],content=\"\").to_dict())\n return to_ret\n elif channel_id.startswith(\"GROUP_\") and platform == \"qq_group\":\n channel_id = channel_id[6:]\n to_ret = []\n msg_seq = 1\n for it in to_sends:\n if it[\"to_reply_id\"]:to_reply_id = it[\"to_reply_id\"]\n async with httpx.AsyncClient() as client:\n headers = {\"Authorization\":\"QQBot {}\".format(self._access_token),\"X-Union-Appid\":self._appid,\"Accept\":\"application/json\"}\n url:str = self._http_url + \"/v2/groups/{}/messages\".format(channel_id)\n data = {\n \"msg_id\":to_reply_id,\n \"content\":it[\"content\"],\n \"msg_type\":0,\n \"msg_seq\":msg_seq,\n # \"image\": 目前暂不支持\n }\n msg_seq += 1\n ret = (await client.post(url,headers=headers,json=data)).json()\n # print(ret)\n to_ret.append(SatoriMessage(id=ret[\"msg_id\"],content=\"\").to_dict())\n return to_ret\n \n async def get_login(self,platform:Optional[str],self_id:Optional[str]) -> [dict]:\n '''获取登录信息,如果platform和self_id为空,那么应该返回一个列表'''\n\n if platform == \"qq_group\":\n return SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=self._botqq,\n is_bot=True\n ),\n self_id=self._botqq,\n platform=\"qq_group\"\n ).to_dict()\n else: \n obret = (await self._api_call(\"/users/@me\"))\n satori_ret = SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=obret[\"id\"],\n name=obret[\"username\"],\n avatar=obret[\"avatar\"],\n is_bot=True\n ),\n self_id=obret[\"id\"],\n platform=\"qq_guild\"\n ).to_dict()\n self._self_id = obret[\"id\"]\n if platform == \"qq_guild\":\n return satori_ret\n elif platform == None:\n if not self._withgroup:\n return [satori_ret]\n else:\n return [satori_ret,SatoriLogin(\n status=self._login_status,\n user=SatoriUser(\n id=self._botqq,\n is_bot=True\n ),\n self_id=self._botqq,\n platform=\"qq_group\"\n ).to_dict()]\n \n async def get_guild_member(self,platform:Optional[str],self_id:Optional[str],guild_id:str,user_id:str) -> [dict]:\n '''获取群组成员信息'''\n if platform == \"qq_guild\":\n url = \"/guilds/{}/members/{}\".format(guild_id,user_id)\n obret = (await self._api_call(url))\n satori_ret = SatoriGuildMember(\n user=SatoriUser(\n id=obret[\"user\"][\"id\"],\n name=obret[\"user\"][\"username\"],\n avatar=obret[\"user\"][\"avatar\"],\n is_bot=obret[\"user\"][\"bot\"]\n ),\n nick=get_json_or(obret,\"nick\",None),\n avatar=obret[\"user\"][\"avatar\"],\n joined_at=int(time.mktime(time.strptime(obret[\"joined_at\"], \"%Y-%m-%dT%H:%M:%S%z\"))) * 1000\n ).to_dict()\n return satori_ret" }, { "identifier": "remove_json_null", "path": "tool.py", "snippet": "def remove_json_null(js) -> dict:\n '''将json中的None字段删除'''\n if isinstance(js,dict):\n st = {}\n for key in js:\n if js[key] != None:\n st[key] = remove_json_null(js[key])\n return st\n elif isinstance(js,list):\n lst = []\n for it in js:\n lst.append(remove_json_null(it))\n return lst\n else:\n return js" } ]
import asyncio import aiohttp import json import uuid from kook_adapter import AdapterKook from mihoyo_adapter import AdapterMihoyo from onebot_adapter import AdapterOnebot from config import Config from aiohttp import web from qq_adapter import AdapterQQ from tool import remove_json_null
17,871
if adapter == None: return web.Response(text="bot not found") if method == "/v1/login.get": ret = await adapter.get_login(platform,self_id) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/guild.member.get": body = await request.json() ret = await adapter.get_guild_member(platform,self_id,body["guild_id"],body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/message.create": body = await request.json() ret = await adapter.create_message(platform,self_id,body["channel_id"],body["content"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/channel.list": body = await request.json() ret = await adapter.get_channel_list(platform,self_id,body["guild_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/user.get": body = await request.json() ret = await adapter.get_user(platform,self_id,body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_admin(self,request:web.Request): print("----http admin",request) '''在这里处理管理api调用''' # 鉴权 if self._config.access_token != "": if request.headers.get("Authorization") != "Bearer " + self._config.access_token: print("token err") return web.Response(text="token err") method = request.url.path if method == "/v1/admin/login.list": ret = [] for adapter in self.adapterlist: ret += await adapter["adapter"].get_login(None,None) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_foo(self,request:web.Request): '''在这里处理其余任何api调用''' print("--------http other",request) return web.Response(text="method not found") async def _handle_events_ws(self,request:web.Request): '''在这里处理websocket''' ws_id = str(uuid.uuid4()) ws = web.WebSocketResponse() ws.can_prepare(request) await ws.prepare(request) self.wsmap[ws_id] = { "ws":ws, "is_access":False } print("--------http ws",request,ws_id) try: async for msg in ws: if msg.type == aiohttp.WSMsgType.TEXT: data_json = json.loads(msg.data) print("--------recv_ws",json.dumps(msg.data)) op = data_json["op"] if op == 3: if self._config.access_token != "": if data_json["body"]["token"] != self._config.access_token: raise "token err" self.wsmap[ws_id]["is_access"] = True async def get_logins(self,ws): logins = [] for adapter in self.adapterlist: logins += await adapter["adapter"].get_login(None,None) await Satori.ws_send_json(ws,{ "op":4, "body":{ "logins":logins } }) asyncio.create_task(get_logins(self,ws)) elif op == 1: async def send_pong(ws): await Satori.ws_send_json(ws,{ "op":2 }) asyncio.create_task(send_pong(ws)) elif msg.type == aiohttp.WSMsgType.ERROR: print('ws connection closed with exception %s' % ws.exception()) finally: del self.wsmap[ws_id] print("--------http ws close",ws_id) return ws async def init_after(self): async def event_loop(self:Satori,adapter:AdapterOnebot): while True: msg = await adapter.get_msg() for wsid in self.wsmap: ws = self.wsmap[wsid] if ws["is_access"]: msg["id"] = self._evt_id asyncio.create_task(Satori.ws_send_json(ws["ws"],{"op":0,"body":msg})) self._evt_id += 1 # 读取配置文件 await self._config.read_config() # 创建 adapter for botcfg in self._config.botlist: if botcfg["platform"] == "onebot": adapter = AdapterOnebot(botcfg) elif botcfg["platform"] == "kook":
class Satori: def __init__(self) -> None: self._config:Config = Config() self.adapterlist = [] self.wsmap = {} self._evt_id = 100 async def _get_adapter(self,platform,self_id): ''' 用于获取适配器 ''' for adapter in self.adapterlist: info = adapter["info"] for bot in info: if self_id == bot["self_id"] and bot["platform"] == platform: return adapter["adapter"] return None async def ws_send_json(ws,js) -> None: js = remove_json_null(js) print("--------ws_send_json",json.dumps(js)) await ws.send_json(js) async def _handle_http_normal(self,request:web.Request): print("----http normal",request) '''在这里处理普通api调用''' # 鉴权 if self._config.access_token != "": if request.headers.get("Authorization") != "Bearer " + self._config.access_token: print("token err") return web.Response(text="token err") method = request.url.path platform = request.headers.get("X-Platform") self_id = request.headers.get("X-Self-ID") adapter:AdapterOnebot = await self._get_adapter(platform,self_id) if adapter == None: return web.Response(text="bot not found") if method == "/v1/login.get": ret = await adapter.get_login(platform,self_id) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/guild.member.get": body = await request.json() ret = await adapter.get_guild_member(platform,self_id,body["guild_id"],body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/message.create": body = await request.json() ret = await adapter.create_message(platform,self_id,body["channel_id"],body["content"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/channel.list": body = await request.json() ret = await adapter.get_channel_list(platform,self_id,body["guild_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) elif method == "/v1/user.get": body = await request.json() ret = await adapter.get_user(platform,self_id,body["user_id"]) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_admin(self,request:web.Request): print("----http admin",request) '''在这里处理管理api调用''' # 鉴权 if self._config.access_token != "": if request.headers.get("Authorization") != "Bearer " + self._config.access_token: print("token err") return web.Response(text="token err") method = request.url.path if method == "/v1/admin/login.list": ret = [] for adapter in self.adapterlist: ret += await adapter["adapter"].get_login(None,None) return web.Response(text=json.dumps(remove_json_null(ret)),headers={ "Content-Type":"application/json; charset=utf-8" }) return web.Response(text="method not found") async def _handle_http_foo(self,request:web.Request): '''在这里处理其余任何api调用''' print("--------http other",request) return web.Response(text="method not found") async def _handle_events_ws(self,request:web.Request): '''在这里处理websocket''' ws_id = str(uuid.uuid4()) ws = web.WebSocketResponse() ws.can_prepare(request) await ws.prepare(request) self.wsmap[ws_id] = { "ws":ws, "is_access":False } print("--------http ws",request,ws_id) try: async for msg in ws: if msg.type == aiohttp.WSMsgType.TEXT: data_json = json.loads(msg.data) print("--------recv_ws",json.dumps(msg.data)) op = data_json["op"] if op == 3: if self._config.access_token != "": if data_json["body"]["token"] != self._config.access_token: raise "token err" self.wsmap[ws_id]["is_access"] = True async def get_logins(self,ws): logins = [] for adapter in self.adapterlist: logins += await adapter["adapter"].get_login(None,None) await Satori.ws_send_json(ws,{ "op":4, "body":{ "logins":logins } }) asyncio.create_task(get_logins(self,ws)) elif op == 1: async def send_pong(ws): await Satori.ws_send_json(ws,{ "op":2 }) asyncio.create_task(send_pong(ws)) elif msg.type == aiohttp.WSMsgType.ERROR: print('ws connection closed with exception %s' % ws.exception()) finally: del self.wsmap[ws_id] print("--------http ws close",ws_id) return ws async def init_after(self): async def event_loop(self:Satori,adapter:AdapterOnebot): while True: msg = await adapter.get_msg() for wsid in self.wsmap: ws = self.wsmap[wsid] if ws["is_access"]: msg["id"] = self._evt_id asyncio.create_task(Satori.ws_send_json(ws["ws"],{"op":0,"body":msg})) self._evt_id += 1 # 读取配置文件 await self._config.read_config() # 创建 adapter for botcfg in self._config.botlist: if botcfg["platform"] == "onebot": adapter = AdapterOnebot(botcfg) elif botcfg["platform"] == "kook":
adapter = AdapterKook(botcfg)
0
2023-12-03 13:53:47+00:00
24k
aliyun/pai-python-sdk
pai/model.py
[ { "identifier": "git_utils", "path": "pai/common/git_utils.py", "snippet": "def git_clone_repo(git_config: Dict[str, str], source_dir: Optional[str] = None):\ndef _validate_git_config(git_config):\ndef _build_and_run_clone_command(git_config, dest_dir):\ndef _clone_command_for_codeup(git_config, dest_dir):\ndef _clone_command_for_github(git_config, dest_dir):\ndef _clone_command_for_ssh(git_config, dest_dir):\ndef _clone_command_for_github_https(git_config, dest_dir):\ndef _clone_command_for_codeup_https(git_config, dest_dir):\ndef _clone_command(repo_url, dest_dir, branch=None):\ndef _update_url_with_token(repo_url, token):\ndef _update_url_with_username_and_password(repo_url, username, password):\ndef _checkout_commit(git_config, dest_dir):" }, { "identifier": "INSTANCE_TYPE_LOCAL_GPU", "path": "pai/common/consts.py", "snippet": "INSTANCE_TYPE_LOCAL_GPU = \"local_gpu\"" }, { "identifier": "ModelFormat", "path": "pai/common/consts.py", "snippet": "class ModelFormat(object):\n SavedModel = \"SavedModel\"\n FrozenPb = \"FrozenPb\"\n KerasH5 = \"KerasH5\"\n CaffePrototxt = \"Caffe\"\n ONNX = \"ONNX\"\n BladeModel = \"BladeModel\"\n PMML = \"PMML\"\n TorchScript = \"TorchScript\"\n TFLite = \"TFLite\"" }, { "identifier": "ContainerRun", "path": "pai/common/docker_utils.py", "snippet": "class ContainerRun(object):\n \"\"\"A class represent a container run in local.\"\"\"\n\n CONTAINER_STATUS_RUNNING = \"running\"\n CONTAINER_STATUS_EXITED = \"exited\"\n CONTAINER_STATUS_PAUSED = \"paused\"\n\n def __init__(self, container, port: Optional[int] = None):\n \"\"\"Initialize a container run.\n\n Args:\n container: A docker container object.\n port (int): The host port that container is exposed to.\n\n \"\"\"\n self.container = container\n self.port = port\n\n @property\n def status(self):\n self.container.reload()\n return self.container.status\n\n def is_running(self):\n \"\"\"Return True if container is running, otherwise False.\"\"\"\n return self.status == self.CONTAINER_STATUS_RUNNING\n\n def is_terminated(self):\n \"\"\"Return True if container is terminated, otherwise False.\"\"\"\n return self.status in [\n self.CONTAINER_STATUS_EXITED,\n self.CONTAINER_STATUS_PAUSED,\n ]\n\n def is_succeeded(self):\n \"\"\"Return True if container is succeeded, otherwise False.\"\"\"\n return (\n self.status == \"exited\" and self.container.attrs[\"State\"][\"ExitCode\"] == 0\n )\n\n def wait_for_ready(self, interval=5):\n \"\"\"Wait until container enter running state or terminated state.\"\"\"\n while True:\n status = self.status\n if status == self.CONTAINER_STATUS_RUNNING:\n break\n elif status in [self.CONTAINER_STATUS_EXITED, self.CONTAINER_STATUS_PAUSED]:\n raise RuntimeError(\n \"Container is terminated : id={} status={}\".format(\n self.container.id, self.container.status\n )\n )\n time.sleep(interval)\n\n def stop(self):\n if self.is_running():\n self.container.stop()\n\n def start(self):\n if not self.is_running():\n self.container.start()\n\n def delete(self):\n if self.is_running():\n self.container.stop()\n self.container.remove()\n\n def watch(self, show_logs: bool = True):\n \"\"\"Watch container log and wait for container to exit.\"\"\"\n if not show_logs:\n self.container.wait()\n else:\n log_iter = self.container.logs(\n stream=True,\n follow=True,\n )\n for log in log_iter:\n print(log.decode())\n\n self.container.reload()\n exit_code = self.container.attrs[\"State\"][\"ExitCode\"]\n if exit_code != 0:\n raise RuntimeError(\n \"Container run exited failed: exit_code={}\".format(exit_code)\n )" }, { "identifier": "run_container", "path": "pai/common/docker_utils.py", "snippet": "def run_container(\n image_uri: str,\n container_name: Optional[str] = None,\n port: Optional[int] = None,\n environment_variables: Optional[Dict[str, str]] = None,\n command: Optional[Union[List[str], str]] = None,\n entry_point: Optional[Union[List[str], str]] = None,\n volumes: Optional[Dict[str, Any]] = None,\n working_dir: Optional[str] = None,\n gpu_count: Optional[int] = None,\n gpu_device_ids: Optional[List[str]] = None,\n gpu_capabilities: Optional[List[List[str]]] = None,\n) -> ContainerRun:\n \"\"\"Run a container in local.\n\n Args:\n image_uri (str): A docker image uri.\n container_name (str, optional): Name of the container.\n port (int, optional): The port to expose.\n environment_variables (Dict[str, str], optional): Environment variables to set\n in the container.\n command (Union[List[str], str], optional): Command to run the container.\n entry_point (Union[List[str], str], optional): Entry point to run the container.\n volumes (Dict[str, Any], optional): Volumes to mount in the container.\n working_dir (str, optional): Working directory in the container.\n gpu_count (int, optional): Number of GPU devices to request. Set to -1 to\n request all available devices.\n To use GPU, set either ``gpu_count`` or ``gpu_device_ids``.\n gpu_device_ids (List[str], optional): List of strings for GPU device IDs,\n corresponding to `NVIDIA_VISIBLE_DEVICES` in the NVIDIA Runtime.\n To use GPU, set either ``gpu_count`` or ``gpu_device_ids``.\n gpu_capabilities (List[List[str]], optional): This parameter corresponds to\n `NVIDIA_DRIVER_CAPABILITIES` in the NVIDIA Runtime. The default value is\n ``[[\"compute\", \"utility\"]]`` if ``gpu_device_ids`` or ``gpu_count`` is set.\n Available capabilities for the NVIDIA driver can be found in\n https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/user-guide.html#driver-capabilities.\n\n Returns:\n ContainerRun: A ContainerRun object.\n\n \"\"\"\n try:\n import docker\n except ImportError:\n raise ImportError(\"Please install docker first: pip install docker\")\n\n client = docker.from_env()\n # use a random host port.\n host_port = randint(49152, 65535)\n\n if gpu_count or gpu_device_ids or gpu_capabilities:\n if not gpu_capabilities:\n gpu_capabilities = [[\"compute\", \"utility\"]]\n device_requests = [\n docker.types.DeviceRequest(\n count=gpu_count,\n device_ids=gpu_device_ids,\n capabilities=gpu_capabilities,\n )\n ]\n else:\n device_requests = []\n\n container = client.containers.run(\n name=container_name,\n entrypoint=entry_point,\n image=image_uri,\n command=command,\n environment=environment_variables,\n ports={port: host_port} if port else None,\n volumes=volumes,\n working_dir=working_dir,\n detach=True,\n device_requests=device_requests,\n )\n container_run = ContainerRun(\n container=container,\n port=host_port,\n )\n return container_run" }, { "identifier": "OssUriObj", "path": "pai/common/oss_utils.py", "snippet": "class OssUriObj(object):\n \"\"\"A class that represents an OSS URI and provides some convenient methods.\"\"\"\n\n def __init__(self, uri: str):\n \"\"\"Constructor for class OssUriObj.\n\n Args:\n uri (str): A string in OSS URI schema: oss://<bucket_name>[.endpoint]/<path/to/file>,\n endpoint in uri is optional.\n \"\"\"\n if not uri.startswith(\"oss://\"):\n raise ValueError(\n \"Invalid OSS URI schema, please provide a string starts with 'oss://'\"\n )\n bucket_name, object_key, endpoint, role_arn = self.parse(uri)\n self.bucket_name = bucket_name\n self.object_key = object_key\n self.endpoint = endpoint\n self.role_arn = role_arn\n\n @classmethod\n def from_bucket_key_endpoint(\n cls, bucket_name: str, object_key: str, endpoint: Optional[str] = None\n ) -> \"OssUriObj\":\n \"\"\"Initialize an OSSUri object from bucket_name, object_key and endpoint.\n\n Args:\n bucket_name (str): The name of the OSS bucket.\n object_key (str): OSS object key/path.\n endpoint (str, optional): Endpoint for the OSS bucket.\n\n Returns:\n OssUriObj: An OssUriObj instance represents the specified OSS object.\n\n \"\"\"\n # OSS object key could not contain leading slashes.\n # Document: https://help.aliyun.com/document_detail/273129.html\n if object_key.startswith(\"/\"):\n logger.warning(\n \"OSS object key should not contain leading slashes, the leading\"\n \" slashes will be removed.\"\n )\n object_key = object_key.lstrip(\"/\")\n\n if endpoint:\n if endpoint.startswith(\"http://\"):\n endpoint = endpoint.lstrip(\"http://\")\n elif endpoint.startswith(\"https://\"):\n endpoint = endpoint.lstrip(\"https://\")\n\n uri = f\"oss://{bucket_name}.{endpoint}/{object_key}\"\n else:\n uri = f\"oss://{bucket_name}/{object_key}\"\n return OssUriObj(uri=uri)\n\n @classmethod\n def parse(cls, oss_uri: str) -> Tuple[str, str, str, str]:\n \"\"\"Parse OSS uri string and returns a tuple of (bucket_name, object_key,\n endpoint, role_arn).\n\n Args:\n oss_uri (str): A string in OSS Uri schema: oss://{bucket_name}.{endpoint}/{object_key}.\n\n Returns:\n Tuple: An tuple of [bucket_name, object_key, endpoint, role_arn].\n\n \"\"\"\n parsed_result = urlparse(oss_uri)\n if parsed_result.scheme != \"oss\":\n raise ValueError(\n \"require OSS uri('oss://[bucket_name]/[object_key]') but \"\n \"given '{}'\".format(oss_uri)\n )\n object_key = parsed_result.path\n if object_key.startswith(\"/\"):\n object_key = object_key[1:]\n\n query = parse_qs(parsed_result.query)\n if \".\" in parsed_result.hostname:\n bucket_name, endpoint = parsed_result.hostname.split(\".\", 1)\n else:\n bucket_name = parsed_result.hostname\n # try to get OSS endpoint from url query.\n if \"endpoint\" in query:\n endpoint = query.get(\"endpoint\")[0]\n elif \"host\" in query:\n endpoint = query.get(\"host\")[0]\n else:\n endpoint = None\n role_arn = query.get(\"role_arn\")[0] if \"role_arn\" in query else None\n\n return bucket_name, object_key, endpoint, role_arn\n\n def get_uri_with_endpoint(self, endpoint: str = None) -> str:\n \"\"\"Get an OSS uri string contains endpoint.\n\n Args:\n endpoint (str): Endpoint of the OSS bucket.\n\n Returns:\n str: An string in OSS uri schema contains endpoint.\n\n \"\"\"\n if not endpoint and not self.endpoint:\n raise ValueError(\"Unknown endpoint for the OSS bucket.\")\n\n return \"oss://{bucket_name}.{endpoint}/{object_key}\".format(\n bucket_name=self.bucket_name,\n endpoint=endpoint or self.endpoint,\n object_key=self.object_key,\n )\n\n def get_dir_uri(self):\n \"\"\"Returns directory in OSS uri string format of the original object.\"\"\"\n _, dirname, _ = self.parse_object_key()\n dir_uri = f\"oss://{self.bucket_name}{dirname}\"\n return dir_uri\n\n @property\n def uri(self) -> str:\n \"\"\"Returns OSS uri in string format.\"\"\"\n return \"oss://{bucket_name}/{object_key}\".format(\n bucket_name=self.bucket_name,\n object_key=self.object_key,\n )\n\n def parse_object_key(self) -> Tuple[bool, str, str]:\n \"\"\"Parse the OSS URI object key, returns a tuple of (is_dir, dir_path, file_name).\n\n Returns:\n namedtuple: An tuple of is_dir, dir_path, file_name.\n \"\"\"\n object_key = self.object_key.strip()\n if object_key.endswith(\"/\"):\n is_dir, dir_path, file_name = True, os.path.join(\"/\", object_key), None\n else:\n idx = object_key.rfind(\"/\")\n if idx < 0:\n is_dir, dir_path, file_name = False, \"/\", object_key\n else:\n is_dir, dir_path, file_name = (\n False,\n os.path.join(\"/\", object_key[: idx + 1]),\n object_key[idx + 1 :],\n )\n return is_dir, dir_path, file_name" }, { "identifier": "download", "path": "pai/common/oss_utils.py", "snippet": "def download(\n oss_path: Union[str, OssUriObj],\n local_path: str,\n bucket: Optional[oss2.Bucket] = None,\n un_tar=False,\n):\n \"\"\"Download OSS objects to local path.\n\n Args:\n oss_path (str): Source OSS path, could be a single OSS object or a OSS\n directory.\n local_path (str): Local path used to store the data from OSS.\n bucket (oss2.Bucket, optional): OSS bucket used to store the upload data. If it\n is not provided, OSS bucket of the default session will be used.\n un_tar (bool, optional): Whether to decompress the downloaded data. It is only\n work for `oss_path` point to a single file that has a suffix \"tar.gz\".\n\n Returns:\n str: A local file path for the downloaded data.\n\n \"\"\"\n\n bucket, oss_path = _get_bucket_and_path(bucket, oss_path)\n\n if not bucket.object_exists(oss_path) or oss_path.endswith(\"/\"):\n # The `oss_path` represents a \"directory\" in the OSS bucket, download the\n # objects which object key is prefixed with `oss_path`.\n # Note: `un_tar` is not work while `oss_path` is a directory.\n\n oss_path += \"/\" if not oss_path.endswith(\"/\") else \"\"\n iterator = oss2.ObjectIteratorV2(\n bucket=bucket,\n prefix=oss_path,\n )\n keys = [obj.key for obj in iterator if not obj.key.endswith(\"/\")]\n for key in tqdm(keys, desc=f\"Downloading: {oss_path}\"):\n rel_path = os.path.relpath(key, oss_path)\n dest = os.path.join(local_path, rel_path)\n os.makedirs(os.path.dirname(dest), exist_ok=True)\n _download_with_progress(\n dest,\n object_key=key,\n oss_bucket=bucket,\n )\n return local_path\n else:\n # The `oss_path` represents a single file in OSS bucket.\n if oss_path.endswith(\".tar.gz\") and un_tar:\n # currently, only tar.gz format is supported for un_tar after downloading.\n with tempfile.TemporaryDirectory() as temp_dir:\n target_path = os.path.join(temp_dir, os.path.basename(oss_path))\n _download_with_progress(\n target_path,\n object_key=oss_path,\n oss_bucket=bucket,\n )\n with tarfile.open(name=target_path, mode=\"r\") as t:\n t.extractall(path=local_path)\n\n return local_path\n else:\n os.makedirs(local_path, exist_ok=True)\n dest = os.path.join(local_path, os.path.basename(oss_path))\n _download_with_progress(\n dest,\n object_key=oss_path,\n oss_bucket=bucket,\n )\n\n return dest" }, { "identifier": "is_oss_uri", "path": "pai/common/oss_utils.py", "snippet": "def is_oss_uri(uri: Union[str, bytes]) -> bool:\n \"\"\"Determines whether the given uri is an OSS uri.\n\n Args:\n uri (Union[str, bytes]): A string in OSS URI schema:\n oss://<bucket_name>[.endpoint]/<path/to/file>,\n\n\n Returns:\n bool: True if the given uri is an OSS uri, else False.\n\n \"\"\"\n return bool(uri and isinstance(uri, (str, bytes)) and str(uri).startswith(\"oss://\"))" }, { "identifier": "upload", "path": "pai/common/oss_utils.py", "snippet": "def upload(\n source_path: str,\n oss_path: Union[str, OssUriObj],\n bucket: Optional[oss2.Bucket] = None,\n is_tar: Optional[bool] = False,\n) -> str:\n \"\"\"Upload local source file/directory to OSS.\n\n Examples::\n\n # compress and upload local directory `./src/` to OSS\n >>> upload(source_path=\"./src/\", oss_path=\"path/to/file\",\n ... bucket=session.oss_bucket, is_tar=True)\n\n\n Args:\n source_path (str): Source file local path which needs to be uploaded, can be\n a single file or a directory.\n oss_path (Union[str, OssUriObj]): Destination OSS path.\n bucket (oss2.Bucket): OSS bucket used to store the upload data. If it is not\n provided, OSS bucket of the default session will be used.\n is_tar (bool): Whether to compress the file before uploading (default: False).\n\n Returns:\n str: A string in OSS URI format. If the source_path is directory, return the\n OSS URI representing the directory for uploaded data, else then\n returns the OSS URI points to the uploaded file.\n \"\"\"\n\n bucket, oss_path = _get_bucket_and_path(bucket, oss_path)\n\n source_path_obj = pathlib.Path(source_path)\n if not source_path_obj.exists():\n raise RuntimeError(\"Source path is not exist: {}\".format(source_path))\n\n if is_tar:\n # compress the local data and upload the compressed source data.\n with tempfile.TemporaryDirectory() as dir_name:\n temp_tar_path = _tar_file(\n source_path, os.path.join(dir_name, \"source.tar.gz\")\n )\n dest_path = (\n os.path.join(oss_path, os.path.basename(temp_tar_path))\n if oss_path.endswith(\"/\")\n else oss_path\n )\n _upload_with_progress(\n filename=temp_tar_path, object_key=dest_path, oss_bucket=bucket\n )\n return \"oss://{}/{}\".format(bucket.bucket_name, dest_path)\n elif not source_path_obj.is_dir():\n # if source path is a file, just invoke bucket.put_object.\n\n # if the oss_path is endswith slash, the file will be uploaded to\n # \"{oss_path}{filename}\", else the file will be uploaded to \"{oss_path}\".\n dest_path = (\n os.path.join(oss_path, os.path.basename(source_path))\n if oss_path.endswith(\"/\")\n else oss_path\n )\n _upload_with_progress(\n filename=source_path, object_key=dest_path, oss_bucket=bucket\n )\n return \"oss://{}/{}\".format(bucket.bucket_name, dest_path)\n else:\n # if the source path is a directory, upload all the file under the directory.\n source_files = glob.glob(\n pathname=str(source_path_obj / \"**\"),\n recursive=True,\n )\n if not oss_path.endswith(\"/\"):\n oss_path += \"/\"\n\n files = [f for f in source_files if not os.path.isdir(f)]\n for file_path in files:\n file_path_obj = pathlib.Path(file_path)\n file_relative_path = file_path_obj.relative_to(source_path_obj).as_posix()\n object_key = oss_path + file_relative_path\n _upload_with_progress(\n filename=file_path, object_key=object_key, oss_bucket=bucket\n )\n return \"oss://{}/{}\".format(bucket.bucket_name, oss_path)" }, { "identifier": "generate_repr", "path": "pai/common/utils.py", "snippet": "def generate_repr(repr_obj, *attr_names: str, **kwargs) -> str:\n \"\"\"Generate a string representation of the given object.\n\n Args:\n repr_obj: The object used to generate the string representation.\n attr_names: A list of attribute names to include in the string representation.\n\n Returns:\n str: A string representation of the object.\n\n \"\"\"\n attrs = {name: getattr(repr_obj, name) for name in attr_names}\n attrs.update(kwargs)\n attr_repr = \", \".join([\"{}={}\".format(k, v) for k, v in attrs.items()])\n cls_name = repr_obj.__class__.__name__\n\n return f\"{cls_name}({attr_repr})\"" }, { "identifier": "is_local_run_instance_type", "path": "pai/common/utils.py", "snippet": "def is_local_run_instance_type(instance_type: str) -> bool:\n \"\"\"Return True if instance_type is local run instance type.\"\"\"\n return instance_type and instance_type.strip() in [\n INSTANCE_TYPE_LOCAL_GPU,\n INSTANCE_TYPE_LOCAL,\n ]" }, { "identifier": "random_str", "path": "pai/common/utils.py", "snippet": "def random_str(n):\n \"\"\"Random string generation with lower case letters and digits.\n\n Args:\n n: Size of generated random string.\n\n Returns:\n str: generated random string.\n\n \"\"\"\n return \"\".join(\n random.choice(string.ascii_lowercase + string.digits) for _ in range(n)\n )" }, { "identifier": "to_plain_text", "path": "pai/common/utils.py", "snippet": "def to_plain_text(\n input_str: str, allowed_characters=DEFAULT_PLAIN_TEXT_ALLOW_CHARACTERS, repl_ch=\"_\"\n):\n \"\"\"Replace characters in input_str if it is not in allowed_characters.\"\"\"\n return \"\".join([c if c in allowed_characters else repl_ch for c in input_str])" }, { "identifier": "DuplicatedMountException", "path": "pai/exception.py", "snippet": "class DuplicatedMountException(PAIException):\n \"\"\"Raised if a OSS path is mounted twice.\"\"\"" }, { "identifier": "MountPathIsOccupiedException", "path": "pai/exception.py", "snippet": "class MountPathIsOccupiedException(PAIException):\n \"\"\"Raised if target mount path is already used.\"\"\"" }, { "identifier": "ImageInfo", "path": "pai/image.py", "snippet": "class ImageInfo(object):\n \"\"\"This class represents information for an image provided by PAI.\n\n Args:\n image_name (str): The name of the image.\n image_uri (str): The URI of the image.\n framework_name (str): The name of the framework installed in the image.\n framework_version (str, optional): The version of the framework (Default None).\n image_scope (str): The scope of the image, could be 'training', 'inference' or\n 'develop'.\n accelerator_type (str, optional): The type of accelerator. Defaults to None.\n python_version (str, optional): The version of Python. Defaults to None.\n \"\"\"\n\n def __repr__(self):\n return (\n \"{}(framework_name={}: framework_version={}: image_scope={}: \"\n \"accelerator_type={}: py_version={})\".format(\n self.__class__.__name__,\n self.framework_name,\n self.framework_version,\n self.image_scope,\n self.accelerator_type,\n self.python_version,\n )\n )\n\n def __init__(\n self,\n image_name: str,\n image_uri: str,\n framework_name: str,\n image_scope: str,\n framework_version: str = None,\n accelerator_type: Optional[str] = None,\n python_version: Optional[str] = None,\n ):\n self.image_name = image_name\n self.image_uri = image_uri\n self.framework_name = framework_name\n self.framework_version = framework_version\n self.accelerator_type = accelerator_type\n self.python_version = python_version\n self.image_scope = image_scope" }, { "identifier": "AsyncPredictor", "path": "pai/predictor.py", "snippet": "class AsyncPredictor(PredictorBase, _ServicePredictorMixin):\n \"\"\"A class that facilitates making predictions to asynchronous prediction service.\n\n Examples::\n\n # Initialize an AsyncPredictor object using the name of a running service.\n async_predictor = AsyncPredictor(service_name=\"example_service\")\n\n # Make a prediction with the service and get the prediction result.\n resp = async_predictor.predict(data=\"YourPredictionData\")\n result = resp.wait()\n\n # Make a prediction with async API.\n import asyncio\n result = asyncio.run(async_predictor.predict_async(data=\"YourPredictionData\"))\n\n \"\"\"\n\n def __init__(\n self,\n service_name: str,\n max_workers: Optional[int] = None,\n endpoint_type: str = EndpointType.INTERNET,\n serializer: Optional[SerializerBase] = None,\n session: Optional[Session] = None,\n ):\n \"\"\"Construct a `AsyncPredictor` object using an existing async prediction service.\n\n Args:\n service_name (str): Name of the existing prediction service.\n max_workers (int): The maximum number of threads that can be used to\n execute the given prediction calls.\n endpoint_type (str): Selects the endpoint used by the predictor, which\n should be one of `INTERNET` or `INTRANET`. The `INTERNET` endpoint type\n means that the predictor calls the service over a public endpoint, while\n the `INTRANET` endpoint type is over a VPC endpoint.\n serializer (SerializerBase, optional): A serializer object that transforms\n the input Python object for data transmission and deserialize the\n response data to Python object.\n session (Session, optional): A PAI session object used for communicating\n with PAI service.\n \"\"\"\n\n super(AsyncPredictor, self).__init__(\n service_name=service_name,\n session=session or get_default_session(),\n endpoint_type=endpoint_type,\n serializer=serializer,\n )\n self._max_workers = max_workers\n self.executor = ThreadPoolExecutor(max_workers=self._max_workers)\n self._check()\n\n @property\n def max_workers(self):\n return self._max_workers\n\n @max_workers.setter\n def max_workers(self, n: int):\n if hasattr(self, \"executor\"):\n logger.info(\"Waiting for all submitted tasks in the queue to complete...\")\n self.executor.shutdown()\n self._max_workers = n\n self.executor = ThreadPoolExecutor(max_workers=self._max_workers)\n\n def __del__(self):\n \"\"\"wait for all pending tasks to complete before exit.\"\"\"\n if hasattr(self, \"executor\"):\n logger.info(\"Waiting for all pending tasks to complete...\")\n self.executor.shutdown()\n super(AsyncPredictor, self).__del__()\n\n def _check(self):\n config = json.loads(self._service_api_object[\"ServiceConfig\"])\n if config.get(\"metadata\", {}).get(\"type\") != ServiceType.Async:\n logger.warning(\n \"AsyncPredictor is not recommended to make prediction to a standard \"\n \" prediction service.\"\n )\n\n def _get_result(\n self, request_id: str\n ) -> Optional[Tuple[int, Dict[str, str], bytes]]:\n resp = self._send_request(\n method=\"GET\",\n path=_QUEUE_SERVICE_SINK_PATH,\n params={\n \"requestId\": request_id,\n # _raw_ is false because we want to get the encapsulated prediction\n # result in response body.\n \"_raw_\": \"false\",\n },\n )\n logger.debug(\n \"Poll prediction result: request_id=%s status_code=%s, content=%s\",\n request_id,\n resp.status_code,\n resp.content,\n )\n if resp.status_code == 204:\n # Status code 204 means could not find prediction response for the specific\n # request id.\n return\n\n # Raise exception if status code is not 2xx.\n if resp.status_code // 100 != 2:\n raise RuntimeError(\n \"Pulling prediction result failed: status_code={} content={}\".format(\n resp.status_code, resp.content.decode(\"utf-8\")\n )\n )\n return self._parse_encapsulated_response(resp.json()[0])\n\n def _parse_encapsulated_response(self, data) -> Tuple[int, Dict[str, str], bytes]:\n tags = data[\"tags\"]\n # If the status code from prediction service is not 200, a tag with\n # key 'lastCode' will be added to the tags in response.\n status_code = int(tags.get(\"lastCode\", 200))\n data = base64.b64decode(data[\"data\"])\n # currently, headers are not supported in async prediction service.\n headers = dict()\n return status_code, headers, data\n\n async def _get_result_async(\n self, request_id: str\n ) -> Optional[Tuple[int, Dict[str, str], bytes]]:\n resp = await self._send_request_async(\n method=\"GET\",\n path=_QUEUE_SERVICE_SINK_PATH,\n params={\n \"requestId\": request_id,\n # _raw_ is false because we want to get the encapsulated prediction\n # result in response body.\n \"_raw_\": \"false\",\n },\n )\n status_code = resp.status\n content = await resp.read()\n logger.debug(\n \"Get prediction result: request_id=%s status_code=%s, content=%s\",\n request_id,\n status_code,\n content,\n )\n if status_code == 204:\n # Status code 204 means could not find prediction response for the specific\n # request id.\n return\n if status_code // 100 != 2:\n raise RuntimeError(\n \"Pulling prediction result failed: status_code={} content={}\".format(\n status_code, content.decode(\"utf-8\")\n )\n )\n data = (await resp.json())[0]\n return self._parse_encapsulated_response(data)\n\n def _poll_result(\n self, request_id: str, wait_config: WaitConfig\n ) -> Tuple[int, Dict[str, str], bytes]:\n # if max_attempts is negative or zero, then wait forever\n attempts = -1 if wait_config.max_attempts <= 0 else wait_config.max_attempts\n while attempts != 0:\n attempts -= 1\n result = self._get_result(request_id=request_id)\n if not result:\n time.sleep(wait_config.interval)\n continue\n status_code, headers, content = result\n # check real prediction response\n if status_code // 100 != 2:\n raise PredictionException(\n code=status_code,\n message=f\"Prediction failed: status_code={status_code}\"\n f\" content={content.decode()}\",\n )\n return status_code, headers, content\n\n # Polling prediction result timeout.\n raise RuntimeError(\n f\"Polling prediction result timeout: request_id={request_id}, \"\n f\"total_time={wait_config.max_attempts * wait_config.interval}\"\n )\n\n async def _poll_result_async(\n self, request_id, wait_config: WaitConfig\n ) -> Tuple[int, Dict[str, str], bytes]:\n # if max_attempts is negative or zero, then wait forever\n attempts = -1 if wait_config.max_attempts <= 0 else wait_config.max_attempts\n while attempts != 0:\n attempts -= 1\n result = await self._get_result_async(request_id)\n if not result:\n await asyncio.sleep(wait_config.interval)\n continue\n status_code, headers, content = result\n # check real prediction response\n if status_code // 100 != 2:\n raise PredictionException(\n f\"Prediction failed: status_code={status_code} content={content.decode()}\"\n )\n return status_code, headers, content\n\n # Polling prediction result timeout.\n raise RuntimeError(\n f\"Polling prediction result timeout: request_id={request_id}, \"\n f\"total_time={wait_config.max_attempts * wait_config.interval}\"\n )\n\n def _get_request_id(self, resp: requests.models.Response) -> str:\n if resp.status_code // 100 != 2:\n raise RuntimeError(\n f\"Send prediction request failed. status_code={resp.status_code} \"\n f\"message={resp.text}\"\n )\n\n if _QUEUE_SERVICE_REQUEST_ID_HEADER not in resp.headers:\n logger.error(\n f\"Send prediction request failed. Missing request id.\"\n f\" status_code={resp.status_code} content={resp.text}\"\n )\n raise RuntimeError(\"Missing request id in response header.\")\n\n request_id = resp.headers[_QUEUE_SERVICE_REQUEST_ID_HEADER]\n logger.debug(\n f\"Send prediction request successfully. request_id={request_id}\"\n f\" status_code={resp.status_code}\",\n )\n return request_id\n\n async def _get_request_id_async(self, resp: aiohttp.ClientResponse) -> str:\n content = await resp.read()\n if resp.status != 200:\n raise RuntimeError(\n \"Send request to async prediction service failed: status_code={} \"\n \"content={}\".format(resp.status, content.decode(\"utf-8\"))\n )\n\n if _QUEUE_SERVICE_REQUEST_ID_HEADER not in resp.headers:\n logger.error(\n f\"Send prediction request failed. Missing request id.\"\n f\" status_code={resp.status} content={content.decode()}\"\n )\n raise RuntimeError(\"Missing request id in response header.\")\n request_id = resp.headers[_QUEUE_SERVICE_REQUEST_ID_HEADER]\n logger.debug(\n f\"Send prediction request successfully. request_id={request_id}\"\n f\" status_code={resp.status}\",\n )\n return request_id\n\n def _predict_fn(\n self,\n data,\n ):\n \"\"\"Make a prediction with the async prediction service.\"\"\"\n # serialize input data\n data = self._handle_input(data)\n resp = self._send_request(data=data)\n request_id = self._get_request_id(resp)\n logger.debug(\"Async prediction RequestId: \", request_id)\n # poll prediction result\n status, headers, content = self._poll_result(\n request_id=request_id, wait_config=WaitConfig()\n )\n\n return self._handle_output(content)\n\n def _wrap_callback_fn(self, cb: Callable):\n \"\"\"Wrap the callback function to handle the prediction result.\"\"\"\n\n @functools.wraps(cb)\n def _(future: Future):\n return cb(future.result())\n\n return _\n\n def predict(\n self,\n data,\n callback: Optional[Union[Callable, List[Callable]]] = None,\n ):\n \"\"\"Make a prediction with the async prediction service.\n\n The input data is serialized using the `serializer.serialize` method before it\n is sent, and the response body is deserialized using the\n `serializer.deserialize` method the prediction result returns.\n\n Args:\n data: The input data for the prediction. It will be serialized using the\n serializer of the predictor before transmitted to the prediction\n service.\n callback (Union[Callable, List[Callable]], optional): A Callback function,\n or a list of callback functions used to process the prediction result.\n\n Returns:\n AsyncTask: The task object that can be used to retrieve the prediction\n result.\n \"\"\"\n self._post_init_serializer()\n future = self.executor.submit(self._predict_fn, data)\n\n if isinstance(callback, Callable):\n callback = [callback]\n\n if callback:\n for cb in callback:\n future.add_done_callback(self._wrap_callback_fn(cb))\n\n return AsyncTask(future=future)\n\n async def predict_async(self, data, wait_config: WaitConfig = WaitConfig()):\n \"\"\"Make a prediction with the async prediction service.\n\n The serializer object for the predictor is responsible for data transformation\n when the 'predict' method is invoked. The input data is serialized using the\n `serializer.serialize` method before it is sent, and the response is\n deserialized using the `serializer.deserialize` method before the prediction\n result returns.\n\n Args:\n data: The input data for the prediction. It will be serialized using the\n serializer of the predictor before transmitted to the prediction\n service.\n wait_config (WaitConfig): A config object that controls the behavior of\n polling the prediction result.\n\n Returns:\n Prediction result.\n\n \"\"\"\n self._post_init_serializer()\n data = self._handle_input(data)\n resp = await self._send_request_async(data=data)\n request_id = await self._get_request_id_async(resp)\n\n status_code, headers, content = await self._poll_result_async(\n request_id=request_id, wait_config=wait_config\n )\n return self._handle_output(content)\n\n def _raw_predict_fn(self, data, method, path, headers, **kwargs):\n json_data, data = self._handle_raw_input(data)\n resp = self._send_request(\n path=path,\n json=json_data,\n data=data,\n headers=self._build_headers(headers),\n method=method,\n **kwargs,\n )\n request_id = self._get_request_id(resp)\n status, headers, content = self._poll_result(\n request_id, wait_config=WaitConfig()\n )\n return RawResponse(status, headers, content)\n\n def raw_predict(\n self,\n data: Any = None,\n callback: Optional[Union[Callable, List[Callable], None]] = None,\n method: str = \"POST\",\n path: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n **kwargs,\n ) -> AsyncTask:\n \"\"\"Make a prediction with the online prediction service.\n\n Args:\n data (Any): Input data to be sent to the prediction service. If it is a\n file-like object, bytes, or string, it will be sent as the request body.\n Otherwise, it will be treated as a JSON serializable object and sent as\n JSON.\n callback (Union[Callable, List[Callable]], optional): A Callback function,\n or a list of callback functions used to process the prediction result.\n path (str, optional): Path for the request to be sent to. If it is provided,\n it will be appended to the endpoint URL (Default None).\n headers (dict, optional): Request headers.\n method (str, optional): Request method, default to 'POST'.\n **kwargs: Additional keyword arguments for the request.\n Returns:\n AsyncTask: The task object that can be used to retrieve the prediction\n result.\n\n Examples:\n\n from pai.predictor import AsyncPredictor, AsyncTask\n\n predictor = AsyncPredictor()\n task: AsyncTask = predictor.raw_predict(data=\"YourPredictionData\")\n print(task.result())\n\n \"\"\"\n\n future = self.executor.submit(\n self._raw_predict_fn, data, method, path, headers, **kwargs\n )\n cbs = [callback] if isinstance(callback, Callable) else callback\n if cbs:\n for cb in cbs:\n future.add_done_callback(self._wrap_callback_fn(cb))\n\n return AsyncTask(future=future)\n\n async def raw_predict_async(\n self,\n data,\n wait_config: WaitConfig = WaitConfig(),\n method: str = \"POST\",\n headers: Optional[Dict[str, str]] = None,\n path: Optional[str] = None,\n **kwargs,\n ) -> RawResponse:\n \"\"\"Make a prediction with the online prediction service.\n\n Args:\n data (Any): Input data to be sent to the prediction service. If it is a\n file-like object, bytes, or string, it will be sent as the request body.\n Otherwise, it will be treated as a JSON serializable object and sent as\n JSON.\n wait_config (WaitConfig): A config object that controls the behavior of\n polling the prediction result.\n path (str, optional): Path for the request to be sent to. If it is provided,\n it will be appended to the endpoint URL (Default None).\n headers (dict, optional): Request headers.\n method (str, optional): Request method, default to 'POST'.\n **kwargs: Additional keyword arguments for the request.\n Returns:\n RawResponse: Prediction result.\n\n \"\"\"\n if self.service_status not in ServiceStatus.completed_status():\n self.wait_for_ready()\n json_data, data = self._handle_raw_input(data)\n\n resp = await self._send_request_async(\n data=data,\n method=method,\n json=json_data,\n path=path,\n headers=headers,\n **kwargs,\n )\n request_id = await self._get_request_id_async(resp)\n # Polling the prediction result.\n status_code, headers, content = await self._poll_result_async(\n request_id=request_id, wait_config=wait_config\n )\n return self._handle_raw_output(status_code, headers, content)" }, { "identifier": "LocalPredictor", "path": "pai/predictor.py", "snippet": "class LocalPredictor(PredictorBase):\n \"\"\"Perform prediction to a local service running with docker.\"\"\"\n\n def __init__(\n self,\n port: int,\n container_id: Optional[str] = None,\n serializer: Optional[SerializerBase] = None,\n ):\n \"\"\"LocalPredictor initializer.\n\n Args:\n port (int): The port of the local service.\n container_id (str, optional): The container id of the local service.\n serializer (SerializerBase, optional): A serializer object that transforms.\n \"\"\"\n self.container_id = container_id\n self.port = port\n self.serializer = serializer or JsonSerializer()\n self._container_run = (\n self._build_container_run(container_id, port=port)\n if self.container_id\n else None\n )\n\n @classmethod\n def _build_container_run(cls, container_id, port):\n try:\n import docker\n except ImportError:\n raise ImportError(\"Please install docker first: pip install docker\")\n client = docker.from_env()\n container = client.containers.get(container_id)\n\n return ContainerRun(container=container, port=port)\n\n def predict(self, data) -> Any:\n \"\"\"Perform prediction with the given data.\n\n Args:\n data: The data to be predicted.\n \"\"\"\n request_data = self.serializer.serialize(data=data)\n response = requests.post(\n url=\"http://127.0.0.1:{port}/\".format(port=self._container_run.port),\n data=request_data,\n )\n\n if response.status_code // 100 != 2:\n raise PredictionException(\n code=response.status_code,\n message=response.content,\n )\n\n return self.serializer.deserialize(response.content)\n\n def _build_headers(\n self, headers: Optional[Dict[str, str]] = None\n ) -> Dict[str, str]:\n headers = headers or dict()\n headers[\"User-Agent\"] = http_user_agent(headers.get(\"User-Agent\"))\n return headers\n\n def _build_url(self, path: Optional[str] = None):\n url = \"http://127.0.0.1:{}\".format(self.port)\n if path:\n if path.startswith(\"/\"):\n path = path[1:]\n url = posixpath.join(url, path)\n return url\n\n def raw_predict(\n self,\n data: Any = None,\n path: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n method: str = \"POST\",\n timeout: Optional[Union[float, Tuple[float, float]]] = None,\n **kwargs,\n ) -> RawResponse:\n \"\"\"Make a prediction with the online prediction service.\n\n Args:\n data (Any): Input data to be sent to the prediction service. If it is a\n file-like object, bytes, or string, it will be sent as the request body.\n Otherwise, it will be treated as a JSON serializable object and sent as\n JSON.\n path (str, optional): Path for the request to be sent to. If it is provided,\n it will be appended to the endpoint URL (Default None).\n headers (dict, optional): Request headers.\n method (str, optional): Request method, default to 'POST'.\n timeout(float, tuple(float, float), optional): Timeout setting for the\n request (Default 10).\n Returns:\n RawResponse: Prediction response from the service.\n\n Raises:\n PredictionException: Raise if status code of the prediction response does\n not equal 2xx.\n \"\"\"\n if isinstance(data, (IOBase, bytes, str)):\n # if data is a file-like object, bytes, or string, it will be sent as\n # request body\n json_data, data = None, data\n else:\n # otherwise, it will be treated as a JSON serializable object and sent as\n # JSON.\n json_data, data = data, None\n header = self._build_headers(headers=headers)\n url = self._build_url(path)\n resp = requests.request(\n url=url,\n json=json_data,\n data=data,\n headers=header,\n method=method,\n timeout=timeout,\n **kwargs,\n )\n resp = RawResponse(\n status_code=resp.status_code,\n content=resp.content,\n headers=dict(resp.headers),\n )\n if resp.status_code // 100 != 2:\n raise PredictionException(resp.status_code, resp.content)\n return resp\n\n def delete_service(self):\n \"\"\"Delete the docker container that running the service.\"\"\"\n if self._container_run:\n self._container_run.stop()\n\n def wait_for_ready(self):\n self._container_run.wait_for_ready()\n # ensure the server is ready.\n self._wait_local_server_ready()\n time.sleep(5)\n\n def _wait_local_server_ready(\n self,\n interval: int = 5,\n ):\n \"\"\"Wait for the local model server to be ready.\"\"\"\n container_run = self._container_run\n while True:\n try:\n # Check whether the container is still running.\n if not container_run.is_running():\n raise RuntimeError(\n \"Container exited unexpectedly, status: {}\".format(\n container_run.status\n )\n )\n\n # Make a HEAD request to the server, just test for connection.\n requests.head(\n f\"http://127.0.0.1:{container_run.port}/\",\n )\n break\n except requests.ConnectionError:\n # ConnectionError means server is not ready.\n logging.debug(\"Waiting for the container to be ready...\")\n time.sleep(interval)\n continue" }, { "identifier": "Predictor", "path": "pai/predictor.py", "snippet": "class Predictor(PredictorBase, _ServicePredictorMixin):\n \"\"\"Predictor is responsible for making prediction to an online service.\n\n The `predictor.predict` method sends the input data to the online prediction service\n and returns the prediction result. The serializer object of the predictor is\n responsible for data transformation when the `predict` method is invoked. The input\n data is serialized using the `serializer.serialize` method before it is sent, and\n the response is deserialized using the `serializer.deserialize` method before the\n prediction result returns.\n\n Examples::\n\n # Initialize a predictor object from an existing service using PyTorch\n # processor.\n torch_predictor = Predictor(service_name=\"example_torch_service\")\n result = torch_predictor.predict(numpy.asarray([[22,33,44], [19,22,33]]))\n assert isinstance(result, numpy.ndarray)\n\n \"\"\"\n\n def __init__(\n self,\n service_name: str,\n endpoint_type: str = EndpointType.INTERNET,\n serializer: Optional[SerializerBase] = None,\n session: Optional[Session] = None,\n ):\n \"\"\"Construct a `Predictor` object using an existing prediction service.\n\n Args:\n service_name (str): Name of the existing prediction service.\n endpoint_type (str): Selects the endpoint used by the predictor, which\n should be one of `INTERNET` or `INTRANET`. The `INTERNET` endpoint type\n means that the predictor calls the service over a public endpoint, while\n the `INTRANET` endpoint type is over a VPC endpoint.\n serializer (SerializerBase, optional): A serializer object that transforms\n the input Python object for data transmission and deserialize the\n response data to Python object.\n session (Session, optional): A PAI session object used for communicating\n with PAI service.\n \"\"\"\n super(Predictor, self).__init__(\n service_name=service_name,\n session=session or get_default_session(),\n endpoint_type=endpoint_type,\n serializer=serializer,\n )\n self._check()\n\n def _check(self):\n config = json.loads(self._service_api_object[\"ServiceConfig\"])\n if config.get(\"metadata\", {}).get(\"type\") == ServiceType.Async:\n logger.warning(\n \"Predictor is not recommended to make prediction to a async\"\n \" prediction service.\"\n )\n\n def predict(self, data):\n \"\"\"Make a prediction with the online prediction service.\n\n The serializer object for the predictor is responsible for data transformation\n when the 'predict' method is invoked. The input data is serialized using the\n `serializer.serialize` method before it is sent, and the response is\n deserialized using the `serializer.deserialize` method before the prediction\n result returns.\n\n Args:\n data: The input data for the prediction. It will be serialized using the\n serializer of the predictor before transmitted to the prediction\n service.\n\n Returns:\n object: Prediction result.\n\n Raises:\n PredictionException: Raise if status code of the prediction response does\n not equal 2xx.\n \"\"\"\n self._post_init_serializer()\n data = self._handle_input(data)\n resp = self._send_request(\n data,\n )\n if resp.status_code // 100 != 2:\n raise PredictionException(resp.status_code, resp.content)\n return self._handle_output(\n resp.content,\n )\n\n def raw_predict(\n self,\n data: Any = None,\n path: Optional[str] = None,\n headers: Optional[Dict[str, str]] = None,\n method: str = \"POST\",\n timeout: Optional[Union[float, Tuple[float, float]]] = None,\n **kwargs,\n ) -> RawResponse:\n \"\"\"Make a prediction with the online prediction service.\n\n Args:\n data (Any): Input data to be sent to the prediction service. If it is a\n file-like object, bytes, or string, it will be sent as the request body.\n Otherwise, it will be treated as a JSON serializable object and sent as\n JSON.\n path (str, optional): Path for the request to be sent to. If it is provided,\n it will be appended to the endpoint URL (Default None).\n headers (dict, optional): Request headers.\n method (str, optional): Request method, default to 'POST'.\n timeout(float, tuple(float, float), optional): Timeout setting for the\n request (Default 10).\n **kwargs: Additional keyword arguments for the request.\n Returns:\n RawResponse: Prediction response from the service.\n\n Raises:\n PredictionException: Raise if status code of the prediction response does\n not equal 2xx.\n \"\"\"\n json_data, data = self._handle_raw_input(data)\n resp = self._send_request(\n data=data,\n json=json_data,\n method=method,\n path=path,\n headers=headers,\n timeout=timeout,\n **kwargs,\n )\n if resp.status_code // 100 != 2:\n raise PredictionException(resp.status_code, resp.content)\n\n resp = RawResponse(\n status_code=resp.status_code,\n content=resp.content,\n headers=dict(resp.headers),\n )\n return resp" }, { "identifier": "ServiceType", "path": "pai/predictor.py", "snippet": "class ServiceType(object):\n Standard = \"Standard\"\n Async = \"Async\"" }, { "identifier": "SerializerBase", "path": "pai/serializers.py", "snippet": "class SerializerBase(ABC):\n \"\"\"Abstract class for creating a Serializer class for predictor.\"\"\"\n\n @abstractmethod\n def serialize(self, data) -> bytes:\n \"\"\"Serialize the input data to bytes for transmitting.\"\"\"\n\n @abstractmethod\n def deserialize(self, data: bytes):\n \"\"\"Deserialize the data from raw bytes to Python object .\"\"\"\n\n def inspect_from_service(\n self, service_name: str, *, session: Optional[Session] = None\n ):\n \"\"\"Inspect the online prediction service to complete the serializer instance\n initialization.\n\n The implementation of the `inspect_from_service` method is optional. You only\n need to implement it if your serializer requires additional information from\n service metadata or if it needs to send a request to the service in order to\n be initialized.\n\n \"\"\"" }, { "identifier": "Session", "path": "pai/session.py", "snippet": "class Session(ResourceAPIsContainerMixin):\n \"\"\"A class responsible for communicating with PAI services.\"\"\"\n\n def __init__(\n self,\n region_id: str,\n workspace_id: Optional[str] = None,\n credential_config: Optional[CredentialConfig] = None,\n oss_bucket_name: Optional[str] = None,\n oss_endpoint: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"PAI Session Initializer.\n\n Args:\n credential_config (:class:`alibabacloud_credentials.models.Config`, optional):\n The credential config used to access the Alibaba Cloud.\n region_id (str): The ID of the Alibaba Cloud region where the service\n is located.\n workspace_id (str, optional): ID of the workspace used in the default\n session.\n oss_bucket_name (str, optional): The name of the OSS bucket used in the\n session.\n oss_endpoint (str, optional): The endpoint for the OSS bucket.\n \"\"\"\n\n if not region_id:\n raise ValueError(\"Region ID must be provided.\")\n\n self._credential_config = credential_config\n self._region_id = region_id\n self._workspace_id = workspace_id\n self._oss_bucket_name = oss_bucket_name\n self._oss_endpoint = oss_endpoint\n\n header = kwargs.pop(\"header\", None)\n super(Session, self).__init__(header=header)\n\n @property\n def region_id(self) -> str:\n return self._region_id\n\n @property\n def is_inner(self) -> bool:\n return self._region_id in INNER_REGION_IDS\n\n @property\n def oss_bucket_name(self) -> str:\n return self._oss_bucket_name\n\n @property\n def oss_endpoint(self) -> str:\n return self._oss_endpoint\n\n @property\n def credential_config(self) -> CredentialConfig:\n return self._credential_config\n\n @property\n def workspace_name(self):\n if hasattr(self, \"_workspace_name\") and self._workspace_name:\n return self._workspace_name\n\n if not self._workspace_id:\n raise ValueError(\"Workspace id is not set.\")\n workspace_api_obj = self.workspace_api.get(workspace_id=self._workspace_id)\n self._workspace_name = workspace_api_obj[\"WorkspaceName\"]\n return self._workspace_name\n\n @property\n def provider(self) -> str:\n caller_identity = self._acs_sts_client.get_caller_identity().body\n return caller_identity.account_id\n\n @property\n def workspace_id(self) -> str:\n \"\"\"ID of the workspace used by the session.\"\"\"\n return self._workspace_id\n\n @property\n def console_uri(self) -> str:\n \"\"\"The web console URI for PAI service.\"\"\"\n if self.is_inner:\n return \"https://pai-next.alibaba-inc.com\"\n else:\n return \"https://pai.console.aliyun.com/console\"\n\n def _init_oss_config(\n self,\n ):\n \"\"\"Initialize a OssConfig instance.\"\"\"\n if not self._oss_bucket_name:\n # If OSS bucket name is not provided, use the default OSS storage URI\n # that is configured for the workspace.\n default_oss_uri = self.workspace_api.get_default_storage_uri(\n self.workspace_id\n )\n if not default_oss_uri:\n raise RuntimeError(\n \"No default OSS URI is configured for the workspace.\"\n )\n oss_uri_obj = OssUriObj(default_oss_uri)\n self._oss_bucket_name = oss_uri_obj.bucket_name\n\n if not self._oss_endpoint:\n self._oss_endpoint = self._get_default_oss_endpoint()\n\n def _get_oss_auth(self):\n auth = oss2.ProviderAuth(\n credentials_provider=CredentialProviderWrapper(\n config=self._credential_config,\n )\n )\n return auth\n\n @property\n def oss_bucket(self):\n \"\"\"A OSS2 bucket instance used by the session.\"\"\"\n if not self._oss_bucket_name or not self._oss_endpoint:\n self._init_oss_config()\n oss_bucket = oss2.Bucket(\n auth=self._get_oss_auth(),\n endpoint=self._oss_endpoint,\n bucket_name=self._oss_bucket_name,\n )\n return oss_bucket\n\n def save_config(self, config_path=None):\n \"\"\"Save the configuration of the session to a local file.\"\"\"\n attrs = {key.lstrip(\"_\"): value for key, value in vars(self).items()}\n config = {\n key: value\n for key, value in attrs.items()\n if key in _DEFAULT_CONFIG_KEYS and value is not None\n }\n\n config_path = config_path or DEFAULT_CONFIG_PATH\n os.makedirs(os.path.dirname(config_path), exist_ok=True)\n with open(config_path, \"w\") as f:\n f.write(json.dumps(config, indent=4))\n logger.info(\"Write PAI config succeed: config_path=%s\" % config_path)\n\n def patch_oss_endpoint(self, oss_uri: str):\n oss_uri_obj = OssUriObj(oss_uri)\n if oss_uri_obj.endpoint:\n return oss_uri\n\n # patch endpoint using current OSS bucket endpoint.\n endpoint = self.oss_bucket.endpoint\n if endpoint.startswith(\"http://\"):\n endpoint = endpoint.lstrip(\"http://\")\n elif endpoint.startswith(\"https://\"):\n endpoint = endpoint.lstrip(\"https://\")\n return \"oss://{bucket_name}.{endpoint}/{key}\".format(\n bucket_name=oss_uri_obj.bucket_name,\n endpoint=endpoint,\n key=oss_uri_obj.object_key,\n )\n\n def _get_default_oss_endpoint(self) -> str:\n \"\"\"Returns a default OSS endpoint.\"\"\"\n\n # OSS Endpoint document:\n # https://help.aliyun.com/document_detail/31837.html\n internet_endpoint = \"oss-{}.aliyuncs.com\".format(self.region_id)\n internal_endpoint = \"oss-{}-internal.aliyuncs.com\".format(self.region_id)\n\n return (\n internet_endpoint\n if is_domain_connectable(internal_endpoint)\n else internet_endpoint\n )\n\n def get_oss_bucket(self, bucket_name: str, endpoint: str = None) -> oss2.Bucket:\n \"\"\"Get a OSS bucket using the credentials of the session.\n\n Args:\n bucket_name (str): The name of the bucket.\n endpoint (str): Endpoint of the bucket.\n\n Returns:\n :class:`oss2.Bucket`: A OSS bucket instance.\n\n \"\"\"\n endpoint = endpoint or self._oss_endpoint or self._get_default_oss_endpoint()\n oss_bucket = oss2.Bucket(\n auth=self._get_oss_auth(),\n endpoint=endpoint,\n bucket_name=bucket_name,\n )\n return oss_bucket\n\n @classmethod\n def get_storage_path_by_category(\n cls, category: str, dir_name: Optional[str] = None\n ) -> str:\n \"\"\"Get an OSS storage path for the resource.\n\n Args:\n category (str): The category of the resource.\n dir_name (str, optional): The directory name of the resource.\n\n Returns:\n str: A OSS storage path.\n\n \"\"\"\n dir_name = dir_name or datetime.now().strftime(\"%Y%m%d_%H%M%S_%f\")\n storage_path = posixpath.join(\"pai\", category, dir_name).strip()\n\n if not storage_path.endswith(\"/\"):\n storage_path += \"/\"\n return storage_path\n\n def is_supported_training_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is supported for training.\"\"\"\n instance_generator = make_list_resource_iterator(self.job_api.list_ecs_specs)\n machine_spec = next(\n (\n item\n for item in instance_generator\n if item[\"InstanceType\"] == instance_type\n ),\n None,\n )\n return bool(machine_spec)\n\n def is_gpu_training_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is GPU instance for training.\"\"\"\n instance_generator = make_list_resource_iterator(self.job_api.list_ecs_specs)\n machine_spec = next(\n (\n item\n for item in instance_generator\n if item[\"InstanceType\"] == instance_type\n ),\n None,\n )\n if not machine_spec:\n raise ValueError(\n f\"Instance type {instance_type} is not supported for training job. \"\n \"Please provide a supported instance type.\"\n )\n return machine_spec[\"AcceleratorType\"] == \"GPU\"\n\n def is_supported_inference_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is supported for inference.\"\"\"\n res = self.service_api.describe_machine()[\"InstanceMetas\"]\n spec = next(\n (item for item in res if item[\"InstanceType\"] == instance_type), None\n )\n return bool(spec)\n\n def is_gpu_inference_instance(self, instance_type: str) -> bool:\n \"\"\"Check if the instance type is GPU instance for inference.\"\"\"\n res = self.service_api.describe_machine()[\"InstanceMetas\"]\n spec = next(\n (item for item in res if item[\"InstanceType\"] == instance_type), None\n )\n\n if not spec:\n raise ValueError(\n f\"Instance type {instance_type} is not supported for deploying. \"\n \"Please provide a supported instance type.\"\n )\n return bool(spec[\"GPU\"])" }, { "identifier": "get_default_session", "path": "pai/session.py", "snippet": "def get_default_session() -> \"Session\":\n \"\"\"Get the default session used by the program.\n\n If the global default session is set, the function will try to initialize\n a session from config file.\n\n Returns:\n :class:`pai.session.Session`: The default session.\n\n \"\"\"\n global _default_session\n if not _default_session:\n config = load_default_config_file()\n if not config:\n return\n _default_session = Session(**config)\n return _default_session" } ]
import copy import distutils.dir_util import json import logging import os.path import posixpath import shlex import shutil import tempfile import textwrap import time import requests from typing import Any, Dict, Iterator, List, Optional, Tuple, Union from addict import Dict as AttrDict from oss2 import ObjectIterator from .common import git_utils from .common.consts import INSTANCE_TYPE_LOCAL_GPU, ModelFormat from .common.docker_utils import ContainerRun, run_container from .common.oss_utils import OssUriObj, download, is_oss_uri, upload from .common.utils import ( generate_repr, is_local_run_instance_type, random_str, to_plain_text, ) from .exception import DuplicatedMountException, MountPathIsOccupiedException from .image import ImageInfo from .predictor import AsyncPredictor, LocalPredictor, Predictor, ServiceType from .serializers import SerializerBase from .session import Session, get_default_session from .estimator import AlgorithmEstimator
17,212
service_name=service_name, session=self.session, serializer=serializer, ) print( "View the service detail by accessing the console URI: \n{}".format( predictor.console_uri ) ) if wait: predictor.wait_for_ready() return predictor def _wait_service_visible(self, service_name, attempts=3, interval=2): """Wait for the service to be visible in DescribeService API. hack: https://aone.alibaba-inc.com/v2/project/1134421/bug#viewIdentifier=5dfb195e2e2b84f6b2f24718&openWorkitemIdentifier=50192431 """ while attempts > 0: obj = self.session.service_api.get(service_name) if "ServiceUid" in obj: return attempts -= 1 time.sleep(interval) logger.warning("DescribeService API failed to get the Service object.") def _build_service_config( self, service_name: str = None, instance_count: int = None, instance_type: str = None, resource_config: Union[ResourceConfig, Dict[str, Any]] = None, resource_id: str = None, service_type: str = None, options: Dict[str, Any] = None, ) -> Dict[str, Any]: """Build a service config dictionary used to create a PAI EAS service.""" self.model_data = self._upload_model_data() resource_config = ( ResourceConfig(**resource_config) if resource_config and isinstance(resource_config, dict) else None ) if resource_config and instance_type: raise ValueError( f"Only one of 'instance_type' and 'resource_config' " f"is required, but both have been provided: instance_type" f"={instance_type}, resource_config=" f"{resource_config}." ) inference_spec = InferenceSpec( self._get_inference_spec().to_dict() if self.inference_spec else dict() ) if self.model_data: if not inference_spec.is_container_serving(): # if model_data is an OSS URI with endpoint, truncate the endpoint. oss_uri_obj = OssUriObj(self.model_data) model_path_uri = "oss://{bucket_name}/{key}".format( bucket_name=oss_uri_obj.bucket_name, key=oss_uri_obj.object_key, ) inference_spec.add_option("model_path", model_path_uri) else: try: inference_spec.mount( self.model_data, mount_path=DefaultServiceConfig.model_path, ) except DuplicatedMountException as e: # ignore duplicated mount logger.info("Model is already mounted the container: %s", e) if service_type: inference_spec.add_option("metadata.type", service_type) if inference_spec.is_container_serving(): inference_spec.add_option("metadata.rpc.proxy_path", "/") if service_name: inference_spec.add_option("name", service_name) if instance_count: inference_spec.add_option("metadata.instance", instance_count) if instance_type: inference_spec.add_option("cloud.computing.instance_type", instance_type) elif resource_config: inference_spec.add_option("metadata.cpu", resource_config.cpu) inference_spec.add_option("metadata.memory", resource_config.memory) if resource_config.gpu: inference_spec.add_option("metadata.gpu", resource_config.gpu) if resource_config.gpu_memory: inference_spec.add_option( "metadata.gpu_memory", resource_config.gpu_memory ) if resource_config.gpu: logger.warning( "Parameters 'gpu' is set, the 'gpu_memory' parameter " "does not take effect." ) if resource_id: inference_spec.add_option("metadata.resource", resource_id) if options: inference_spec.merge_options(options=options) return inference_spec.to_dict() def _deploy_local( self, instance_type: str, serializer: SerializerBase = None, wait: bool = True,
# Copyright 2023 Alibaba, Inc. or its affiliates. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.getLogger(__name__) # Reserved ports for internal use, do not use them for service _RESERVED_PORTS = [8080, 9090] class DefaultServiceConfig(object): """Default configuration used in creating prediction service.""" # Listen Port listen_port = 8000 # Default model path in container model_path = "/eas/workspace/model/" # Default user code path in container code_path = "/ml/usercode/" class ResourceConfig(object): """A class that represents the resource used by a PAI prediction service instance.""" def __init__(self, cpu: int, memory: int, gpu: int = None, gpu_memory: int = None): """ResourceConfig initializer. The public resource group does not support requesting GPU resources with `ResourceConfig`. Use the 'gpu' and 'gpu_memory' parameter only for services deployed to dedicated resource groups that provide GPU machine instances. Args: cpu (int): The number of CPUs that each instance requires. memory (int): The amount of memory that each instance requires, must be an integer, Unit: MB. gpu (int): The number of GPUs that each instance requires. gpu_memory (int): The amount of GPU memory that each instance requires. The value must be an integer, Unit: GB. PAI allows memory resources of a GPU to be allocated to multiple instances. If you want multiple instances to share the memory resources of a GPU, set the gpu parameter to 0. If you set the ``gpu`` parameter to 1, each instance occupies a GPU and the gpu_memory parameter does not take effect. .. note:: **Important** PAI does not enable the strict isolation of GPU memory. To prevent out of memory (OOM) errors, make sure that the GPU memory used by each instance does not exceed the requested amount. """ self.cpu = cpu self.memory = memory self.gpu = gpu self.gpu_memory = gpu_memory def __repr__(self): return ( f"ResourceConfig(cpu={self.cpu}, memory={self.memory}MB, gpu={self.gpu or 0}," f" gpu_memory={self.gpu_memory or 0}GB)" ) def __str__(self): return self.__repr__() def to_dict(self): """Transform the ResourceConfig instance to a dictionary. Returns: dict: """ res = { "cpu": self.cpu, "gpu": self.gpu, "gpu_memory": self.gpu_memory, "memory": self.memory, } return {k: v for k, v in res.items() if v is not None} class InferenceSpec(object): """A class used to describe how to create a prediction service. InferenceSpec is using to describe how the model is serving in PAI. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. Example of how to config a InferneceSpec:: >>> # build an inference_spec that using XGBoost processor. >>> infer_spec = InferenceSpec(processor="xgboost") >>> infer_spec.metadata.rpc.keepalive = 1000 >>> infer_spec.warm_up_data_path = "oss://bucket-name/path/to/warmup-data" >>> infer_spec.add_option("metadata.rpc.max_batch_size", 8) >>> print(infer_spec.processor) xgboost >>> print(infer_spec.metadata.rpc.keepalive) 1000 >>> print(infer_spec.metadata.rpc.max_batch_size) 8 >>> print(infer_spec.to_dict()) {'processor': 'xgboost', 'metadata': {'rpc': {'keepalive': 1000, 'max_batch_size': 8}}, 'warm_up_data_path': 'oss://bucket-name/path/to/warmup-data'} """ def __init__(self, *args, **kwargs): """InferenceSpec initializer. Args: **kwargs: Parameters of the inference spec. """ properties = kwargs.pop("__properties", []) cfg_dict = copy.deepcopy(kwargs) cfg_dict = {k: v for k, v in cfg_dict.items() if not k.startswith("_")} if args: if len(args) > 1: raise TypeError() cfg_dict.update(args[0]) super(InferenceSpec, self).__setattr__( "_cfg_dict", self._transform_value(cfg_dict) ) super(InferenceSpec, self).__setattr__("__properties", properties) def __repr__(self): return json.dumps(self.to_dict(), sort_keys=True, indent=4) def _transform_value(self, value): if isinstance(value, (List, Tuple)): return [self._transform_value(item) for item in value] elif isinstance(value, (Dict, AttrDict)): return AttrDict( {key: self._transform_value(value) for key, value in value.items()} ) return value def __missing__(self, name): return self._cfg_dict.__missing__(name) def __setitem__(self, name, value): return self._cfg_dict.__setitem__(name, self._transform_value(value)) def __setattr__(self, name, value): if name in getattr(self, "__properties"): super(InferenceSpec, self).__setattr__(name, self._transform_value(value)) else: self._cfg_dict.__setattr__(name, self._transform_value(value)) def __getattr__(self, item): if item.startswith("_"): return getattr(self, item) return self._cfg_dict.__getitem__(item) def __contains__(self, item): return item in self._cfg_dict def to_dict(self) -> Dict: """Return a dictionary that represent the InferenceSpec.""" return self._cfg_dict.to_dict() def add_option(self, name: str, value): """Add an option to the inference_spec instance. Args: name (str): Name of the option to set, represented as the JSON path of the parameter for the InferenceSpec. To view the full supported parameters, please see the following hyperlink: `Parameters of model services <https://help.aliyun.com/document_detail/450525.htm>`_. value: Value for the option. Examples: >>> infer_spec = InferenceSpec(processor="tensorflow_gpu_1.12") >>> infer_spec.add_option("metadata.rpc.keepalive", 10000) >>> infer_spec.metadata.rpc.keepalive 10000 >>> infer_spec.to_dict() {'processor': 'tensorflow_gpu_1.12', 'metadata': {'rpc': {'keepalive': 10000}}} """ src = self._transform_value(value) for k in reversed(name.split(".")): src = {k: src} self._cfg_dict.update(AttrDict(src)) def merge_options(self, options: Dict[str, Any]): """Merge options from a dictionary.""" for key, value in options.items(): self.add_option(key, value) @classmethod def from_dict(cls, config: Dict[str, Any]) -> "InferenceSpec": """Initialize a InferenceSpec from a dictionary. You can use this method to initialize a InferenceSpec instance from a dictionary. Returns: :class:`pai.model.InferenceSpec`: A InferenceSpec instance. """ config = config or dict() return cls(**config) def is_container_serving(self): return "containers" in self._cfg_dict @classmethod def _upload_source_dir(cls, source_dir, session): """Upload source files to OSS bucket.""" if not os.path.exists(source_dir): raise ValueError(f"Input source code path does not exist: {source_dir}.") if not os.path.isdir(source_dir): raise ValueError( f"Input source code path should be a directory: {source_dir}." ) target_dir = session.get_storage_path_by_category(category="inference_src") # upload local script data to the OSS bucket. uploaded_source_code = upload( source_dir, target_dir, session.oss_bucket, ) logger.debug("Uploaded source code to OSS: %s", uploaded_source_code) return uploaded_source_code def mount( self, source: str, mount_path: str, session: Session = None, ) -> Dict[str, Any]: """Mount a source storage to the running container. .. note:: If source is a local path, it will be uploaded to the OSS bucket and mounted. If source is a OSS path, it will be mounted directly. Args: source (str): The source storage to be attached, currently only support OSS path in OSS URI format and local path. mount_path (str): The mount path in the container. session (Session, optional): A PAI session instance used for communicating with PAI service. Returns: Dict[str, Any]: The storage config. Raises: DuplicateMountException: If the mount path is already used or source OSS path is mounted to the container. Examples:: # Mount a OSS storage path to the running container. >>> inference_spec.mount("oss://<YourOssBucket>/path/to/directory/model.json", ... "/ml/model/") # 'Mount' a local path to the running container. >>> inference_spec.mount("/path/to/your/data/", "/ml/model/") """ session = session or get_default_session() # TODO: supports more storages, such as NAS, PAI Dataset, PAI CodeSource, etc. if not isinstance(source, str): raise ValueError( "Parameter should be a string which represents an OSS storage path" " or a local file path." ) if "storage" in self._cfg_dict: configs = self._cfg_dict.get("storage", []) else: configs = [] uris = set() for conf in configs: # check if target mount path is already used. if conf.get("mount_path") == mount_path: raise MountPathIsOccupiedException( f"The mount path '{mount_path}' has already been used." ) mount_uri = conf.get("oss", {}).get("path") uris.add(mount_uri) if is_oss_uri(source): oss_uri_obj = OssUriObj(source) storage_config = { "mount_path": mount_path, "oss": {"path": oss_uri_obj.get_dir_uri()}, } elif os.path.exists(source): # if source is a local path, upload it to OSS bucket and use OSS URI # as storage source. oss_path = session.get_storage_path_by_category("model_data") oss_uri = upload( source_path=source, oss_path=oss_path, bucket=session.oss_bucket ) oss_uri_obj = OssUriObj(oss_uri) storage_config = { "mount_path": mount_path, "oss": {"path": oss_uri_obj.get_dir_uri()}, } else: raise ValueError( "Source path is not a valid OSS URI or a existing local path." ) # check if the source OSS Path is already mounted to the container. if oss_uri_obj.get_dir_uri() in uris: raise DuplicatedMountException( f"Source OSS path '{oss_uri_obj.get_dir_uri()}' is already " f"mounted to the container." ) configs.append(storage_config) self.storage = configs return storage_config def container_serving_spec( command: str, image_uri: Union[str, ImageInfo], source_dir: Optional[str] = None, git_config: Optional[Dict[str, Any]] = None, port: Optional[int] = None, environment_variables: Optional[Dict[str, str]] = None, requirements: Optional[List[str]] = None, requirements_path: Optional[str] = None, health_check: Optional[Dict[str, Any]] = None, session: Optional[Session] = None, ) -> InferenceSpec: """A convenient function to create an InferenceSpec instance that serving the model with given container and script. Examples:: infer_spec: InferenceSpec = container_serving_spec( command="python run.py", source_dir="./model_server/", image_uri="<ServingImageUri>", ) m = Model( model_data="oss://<YourOssBucket>/path/to/your/model", inference_spec=infer_spec, ) m.deploy( instance_type="ecs.c6.xlarge" ) Args: command (str): The command used to launch the Model server. source_dir (str): A relative path or an absolute path to the source code directory used to load model and launch the HTTP server, it will be uploaded to the OSS bucket and mounted to the container. If there is a ``requirements.txt`` file under the directory, it will be installed before the prediction server started. If 'git_config' is provided, 'source_dir' should be a relative location to a directory in the Git repo. With the following GitHub repo directory structure: .. code:: |----- README.md |----- src |----- train.py |----- test.py if you need 'src' directory as the source code directory, you can assign source_dir='./src/'. git_config (Dict[str, str]): Git configuration used to clone the repo. Including ``repo``, ``branch``, ``commit``, ``username``, ``password`` and ``token``. The ``repo`` is required. All other fields are optional. ``repo`` specifies the Git repository. If you don't provide ``branch``, the default value 'master' is used. If you don't provide ``commit``, the latest commit in the specified branch is used. ``username``, ``password`` and ``token`` are for authentication purpose. For example, the following config: .. code:: python git_config = { 'repo': 'https://github.com/modelscope/modelscope.git', 'branch': 'master', 'commit': '9bfc4a9d83c4beaf8378d0a186261ffc1cd9f960' } results in cloning the repo specified in 'repo', then checking out the 'master' branch, and checking out the specified commit. image_uri (str): The Docker image used to run the prediction service. port (int): Expose port of the server in container, the prediction request will be forward to the port. The environment variable ``LISTENING_PORT`` in the container will be set to this value. Default to 8000. environment_variables (Dict[str, str], optional): Dictionary of environment variable key-value pairs to set on the running container. requirements (List[str], optional): A list of Python package dependency, it will be installed before the serving container run. requirements_path (str, optional): A absolute path to the requirements.txt in the container. health_check (Dict[str, Any], optional): The health check configuration. If it not set, A TCP readiness probe will be used to check the health of the HTTP server. session (Session, optional): A PAI session instance used for communicating with PAI service. Returns: :class:`pai.model.InferenceSpec`: An InferenceSpec instance. """ session = session or get_default_session() if git_config: updated_args = git_utils.git_clone_repo( git_config=git_config, source_dir=source_dir, ) source_dir = updated_args["source_dir"] if not port: port = DefaultServiceConfig.listen_port elif int(port) in _RESERVED_PORTS: raise ValueError( "Reserved port {} is not allowed to use as serving port.".format(port), ) if source_dir: if not os.path.exists(source_dir): raise ValueError("Source directory {} does not exist.".format(source_dir)) if not os.path.isdir(source_dir): raise ValueError( "Source directory {} is not a directory.".format(source_dir) ) code_mount_path = DefaultServiceConfig.code_path # build the command for serving container. command = textwrap.dedent( f"""\ # change working directory to code mount path. cd {code_mount_path} {command} """ ) if not requirements_path and os.path.exists( os.path.join(source_dir, "requirements.txt") ): requirements_path = posixpath.join(code_mount_path, "requirements.txt") else: code_mount_path = None requirements_path = None if isinstance(image_uri, ImageInfo): image_uri = image_uri.image_uri environment_variables = environment_variables or dict() container_spec = { "image": image_uri, "port": port, "script": command, "env": [ {"name": key, "value": str(value)} for key, value in environment_variables.items() ] if environment_variables else [], } if health_check: container_spec["health_check"] = health_check if requirements: container_spec["prepare"] = {"pythonRequirements": requirements} if requirements_path: logger.warning( "If the parameter 'requirements' is set, the requirements_path " "parameter will be ignored." ) elif requirements_path: container_spec["prepare"] = { "pythonRequirementsPath": requirements_path, } inference_spec = InferenceSpec(containers=[container_spec]) # mount the uploaded serving scripts to the serving container. if source_dir: inference_spec.mount( source_dir, code_mount_path, session=session, ) return inference_spec class _BuiltinProcessor(object): """Helper class uses for getting the builtin processor""" PMML = "pmml" XGBoost = "xgboost" SupportedFrameworkAcceleratorVersionConfig = { "tensorflow": { "cpu": [ "1.12", "1.14", "1.15", "2.3", ], "gpu": [ "1.12", "1.14", "1.15", ], }, "pytorch": { "cpu": [ "1.6", ], "gpu": [ "1.6", ], }, } # Hard code default processor for specific model format. ModelFormatDefaultProcessorMapping = { ModelFormat.PMML: "pmml", ModelFormat.SavedModel: "tensorflow_cpu_2.3", ModelFormat.TorchScript: "pytorch_cpu_1.6", ModelFormat.FrozenPb: "pytorch_cpu_1.6", ModelFormat.CaffePrototxt: "caffe_cpu", ModelFormat.ONNX: "onnx_cu100", } @classmethod def get_default_by_model_format(cls, model_format: str) -> str: """Get the default processor for a specific model format.""" if model_format in cls.ModelFormatDefaultProcessorMapping: return cls.ModelFormatDefaultProcessorMapping[model_format] @classmethod def from_framework_version( cls, framework_name, framework_version, accelerator=None ): accelerator = accelerator or "cpu" versions = cls.SupportedFrameworkAcceleratorVersionConfig.get( framework_name, dict() ).get(accelerator, []) if framework_version in versions: return "{}_{}_{}".format(framework_name, accelerator, framework_version) else: logger.warning( "Could not find the processor for the framework_version({} {}), use the" " latest processor".format(framework_name, framework_version) ) return "{}_{}_{}".format(framework_name, accelerator, versions[-1]) class ModelBase(object): """A class represent ModelBase.""" def __init__( self, model_data: str, inference_spec: Optional[InferenceSpec] = None, session: Session = None, ): self.model_data = model_data self.inference_spec = inference_spec self.session = session or get_default_session() def download(self, target_dir: str): """Download the model data from OSS to local directory. Args: target_dir (str): The target directory to download the model data. Returns: str: Local directory path stores the model data. """ if not self.model_data: raise ValueError("Could not find the model data for this model.") if not is_oss_uri(self.model_data): raise RuntimeError("Download method only support model data stored in OSS.") self._download_model_data(target_dir) return target_dir def _download_model_data(self, target_dir): if not self.model_data: return logger.info(f"Prepare model data to local directory: {target_dir}") if self.model_data.startswith("oss://"): oss_uri = OssUriObj(self.model_data) oss_bucket = self.session.get_oss_bucket(oss_uri.bucket_name) download( oss_path=oss_uri.object_key, local_path=target_dir, bucket=oss_bucket, un_tar=True, ) else: if not os.path.exists(self.model_data): raise ValueError(f"Model data path does not exist: {self.model_data}") os.makedirs(target_dir, exist_ok=True) if os.path.isfile(self.model_data): shutil.copy( self.model_data, os.path.join(target_dir, os.path.basename(self.model_data)), ) else: distutils.dir_util.copy_tree(self.model_data, target_dir) def _upload_model_data(self): """Upload the model artifact to OSS bucket if self.model_data is a local file path. """ if not self.model_data: return elif is_oss_uri(self.model_data): return self.model_data elif not os.path.exists(self.model_data): raise RuntimeError(f"Model data path does not exist: {self.model_data}") dest_oss_path = self.session.get_storage_path_by_category(category="model_data") upload_model_data = upload( source_path=self.model_data, oss_path=dest_oss_path, bucket=self.session.oss_bucket, ) return upload_model_data def list_model_files(self, uri_format: bool = False) -> Iterator[str]: """List model files under the model path. Args: uri_format (bool): If True, return the model file path in OSS URI format. Returns: Iterator[str]: Iterator of model files. """ if not self.model_data: raise ValueError("Model data path is not specified.") if not is_oss_uri(self.model_data): raise ValueError("Method only support model data stored in OSS.") oss_uri_obj = OssUriObj(self.model_data) bucket = self.session.get_oss_bucket( bucket_name=oss_uri_obj.bucket_name, ) def _get_relative_path(obj_key: str): # if the model_data is reference an object, return the object file # name. if oss_uri_obj.object_key == obj_key: return os.path.basename(obj_key) path = obj_key[len(oss_uri_obj.object_key) :] return path.lstrip("/") if path.startswith("/") else path obj_iter = ObjectIterator(bucket=bucket, prefix=oss_uri_obj.object_key) for obj_info in obj_iter: if uri_format: yield f"oss://{bucket.bucket_name}/{obj_info.key}" else: yield _get_relative_path(obj_info.key) def _get_inference_spec(self): return self.inference_spec def deploy( self, service_name: str, instance_count: Optional[int] = 1, instance_type: Optional[str] = None, resource_config: Optional[Union[Dict[str, int], ResourceConfig]] = None, resource_id: Optional[str] = None, options: Optional[Dict[str, Any]] = None, service_type: Optional[str] = None, wait: bool = True, serializer: Optional["SerializerBase"] = None, **kwargs, ): """Deploy a prediction service with the model.""" if is_local_run_instance_type(instance_type): return self._deploy_local( instance_type=instance_type, serializer=serializer, wait=wait, ) else: return self._deploy( service_name=service_name, instance_count=instance_count, instance_type=instance_type, resource_config=resource_config, resource_id=resource_id, service_type=service_type, options=options, wait=wait, serializer=serializer, ) def _generate_service_name(self): s = os.path.basename(self.model_data.rstrip("/")) + random_str(8) return to_plain_text(s) def _deploy( self, service_name: str = None, instance_count: int = 1, instance_type: str = None, resource_config: Union[Dict[str, int], ResourceConfig] = None, resource_id: str = None, service_type: str = None, options: Dict[str, Any] = None, wait: bool = True, serializer: "SerializerBase" = None, ): """Create a prediction service.""" if not service_name: service_name = self._generate_service_name() logger.info( "Service name is not specified, using a generated service" f" name to create the service: service_name={service_name}" ) config = self._build_service_config( service_name=service_name, instance_count=instance_count, instance_type=instance_type, service_type=service_type, resource_config=resource_config, resource_id=resource_id, options=options, ) service_name = self.session.service_api.create(config=config) self._wait_service_visible(service_name) if service_type == ServiceType.Async: predictor = AsyncPredictor( service_name=service_name, session=self.session, serializer=serializer, ) else: predictor = Predictor( service_name=service_name, session=self.session, serializer=serializer, ) print( "View the service detail by accessing the console URI: \n{}".format( predictor.console_uri ) ) if wait: predictor.wait_for_ready() return predictor def _wait_service_visible(self, service_name, attempts=3, interval=2): """Wait for the service to be visible in DescribeService API. hack: https://aone.alibaba-inc.com/v2/project/1134421/bug#viewIdentifier=5dfb195e2e2b84f6b2f24718&openWorkitemIdentifier=50192431 """ while attempts > 0: obj = self.session.service_api.get(service_name) if "ServiceUid" in obj: return attempts -= 1 time.sleep(interval) logger.warning("DescribeService API failed to get the Service object.") def _build_service_config( self, service_name: str = None, instance_count: int = None, instance_type: str = None, resource_config: Union[ResourceConfig, Dict[str, Any]] = None, resource_id: str = None, service_type: str = None, options: Dict[str, Any] = None, ) -> Dict[str, Any]: """Build a service config dictionary used to create a PAI EAS service.""" self.model_data = self._upload_model_data() resource_config = ( ResourceConfig(**resource_config) if resource_config and isinstance(resource_config, dict) else None ) if resource_config and instance_type: raise ValueError( f"Only one of 'instance_type' and 'resource_config' " f"is required, but both have been provided: instance_type" f"={instance_type}, resource_config=" f"{resource_config}." ) inference_spec = InferenceSpec( self._get_inference_spec().to_dict() if self.inference_spec else dict() ) if self.model_data: if not inference_spec.is_container_serving(): # if model_data is an OSS URI with endpoint, truncate the endpoint. oss_uri_obj = OssUriObj(self.model_data) model_path_uri = "oss://{bucket_name}/{key}".format( bucket_name=oss_uri_obj.bucket_name, key=oss_uri_obj.object_key, ) inference_spec.add_option("model_path", model_path_uri) else: try: inference_spec.mount( self.model_data, mount_path=DefaultServiceConfig.model_path, ) except DuplicatedMountException as e: # ignore duplicated mount logger.info("Model is already mounted the container: %s", e) if service_type: inference_spec.add_option("metadata.type", service_type) if inference_spec.is_container_serving(): inference_spec.add_option("metadata.rpc.proxy_path", "/") if service_name: inference_spec.add_option("name", service_name) if instance_count: inference_spec.add_option("metadata.instance", instance_count) if instance_type: inference_spec.add_option("cloud.computing.instance_type", instance_type) elif resource_config: inference_spec.add_option("metadata.cpu", resource_config.cpu) inference_spec.add_option("metadata.memory", resource_config.memory) if resource_config.gpu: inference_spec.add_option("metadata.gpu", resource_config.gpu) if resource_config.gpu_memory: inference_spec.add_option( "metadata.gpu_memory", resource_config.gpu_memory ) if resource_config.gpu: logger.warning( "Parameters 'gpu' is set, the 'gpu_memory' parameter " "does not take effect." ) if resource_id: inference_spec.add_option("metadata.resource", resource_id) if options: inference_spec.merge_options(options=options) return inference_spec.to_dict() def _deploy_local( self, instance_type: str, serializer: SerializerBase = None, wait: bool = True,
) -> LocalPredictor:
17
2023-12-01 01:40:12+00:00
24k
mpenning/ciscoconfparse2
dev_tools/compare_v4_v6_methods.py
[ { "identifier": "IPv4Obj", "path": "ciscoconfparse2/ccp_util.py", "snippet": "class IPv4Obj(object):\n dna: str = \"IPv4Obj\"\n v4input: Optional[Union[str,int]] = None\n strict: bool = False\n debug: int = 0\n\n ip_object: Any = None\n network_object: Any = None\n finished_parsing: bool = False\n empty: bool = False\n\n # This method is on IPv4Obj(). @logger.catch() breaks the __init__() method.\n def __init__(self, v4input: Optional[Union[str,int]]=None, strict: bool=False, debug: int=0):\n \"\"\"An object to represent IPv4 addresses and IPv4 networks.\n\n When :class:`~ccp_util.IPv4Obj` objects are compared or sorted, network numbers are sorted lower to higher. If network numbers are the same, shorter masks are lower than longer masks. After comparing mask length, numerically higher IP addresses are greater than numerically lower IP addresses.. Comparisons between :class:`~ccp_util.IPv4Obj` instances was chosen so it's easy to find the longest-match for a given prefix (see examples below).\n\n This object emulates the behavior of ipaddr.IPv4Network (in Python2) where host-bits were retained in the IPv4Network() object. :class:`ipaddress.IPv4Network` in Python3 does not retain host-bits; the desire to retain host-bits in both Python2 and Python3 ip network objects was the genesis of this API.\n\n :param v4input: A string (or integer) containing an IPv4 address, and optionally a netmask or masklength. Integers are also accepted and the masklength of the integer is assumed to be 32-bits. The following address/netmask formats are supported: \"10.1.1.1/24\", \"10.1.1.1 255.255.255.0\", \"10.1.1.1/255.255.255.0\"\n :type v4input: Union[str,int]\n :param strict: When `strict` is True, the value of `v4input` must not have host-bits set. The default value is False.\n :type strict: bool\n\n .. code-block:: python\n\n >>> from ciscoconfparse2.ccp_util import IPv4Obj\n >>> ## Parse from an integer...\n >>> net = IPv4Obj(2886729984)\n >>> net\n <IPv4Obj 172.16.1.0/32>\n >>> net.prefixlen = 24\n >>> net\n <IPv4Obj 172.16.1.0/24>\n >>> ## Parse from an string...\n >>> net = IPv4Obj('172.16.1.0/24')\n >>> net\n <IPv4Obj 172.16.1.0/24>\n >>> net.ip\n IPv4Address('172.16.1.0')\n >>> net.ip + 1\n IPv4Address('172.16.1.1')\n >>> str(net.ip+1)\n '172.16.1.1'\n >>> net.network\n IPv4Network('172.16.1.0/24')\n >>> net.network_object\n IPv4Network('172.16.1.0/24')\n >>> str(net.network_object)\n '172.16.1.0/24'\n >>> net.prefixlen\n 24\n >>> net.network_object.iterhosts()\n <generator object iterhosts at 0x7f00bfcce730>\n >>>\n >>> # Example of finding the longest-match IPv4 route for an addr...\n >>> prefix_list = ['0.0.0.0/0', '4.0.0.0/8', '2.0.0.0/7', '4.0.0.0/16', '2.0.0.0/32']\n >>> rt_table = sorted([IPv4Obj(ii) for ii in prefix_list], reverse=True)\n >>> addr = IPv4Obj('4.0.1.1')\n >>> for route in rt_table:\n ... if addr in route:\n ... break\n ...\n >>> # The longest match is contained in route\n >>> route\n <IPv4Obj 4.0.0.0/16>\n >>>\n\n\n Attributes\n ----------\n as_binary_tuple : :py:class:`tuple`\n The address as a tuple of zero-padded binary strings\n as_cidr_addr : str\n Return a string representing the IPv4 host and netmask of this object in cidr notation. Example - '172.16.0.1/24'\n as_cidr_net : str\n Return a string representing the IPv4 network and netmask of this object in cidr notation. Example - '172.16.5.0/24'\n as_decimal : int\n The ip address as a decimal integer\n as_decimal_network : int\n The network address as a decimal integer\n as_hex_tuple : tuple\n The address as a tuple of zero-padded 8-bit hex strings\n as_zeropadded : str\n Return a zero-padded string of the ip address (example: '10.1.1.1' returns '010.001.001.001')\n as_zeropadded_network : str\n Return a zero-padded string of the ip network (example: '10.1.1.1' returns '010.001.001.000')\n broadcast : str\n An IPv4Address object representing the broadcast address\n get_regex : str\n Returns the regex string used for an IPv4 Address\n exploded : str\n Returns the IPv4 Address object as a string. The string representation is in dotted decimal notation. Leading zeroes are never included in the representation.\n hostmask : :class:`ipaddress.IPv4Address`\n A :class:`ipaddress.IPv4Address` representing the hostmask\n ip : :class:`ipaddress.IPv4Address`\n Returns an :class:`ipaddress.IPv4Address` with the host address of this object\n ip_object : :class:`ipaddress.IPv4Address`\n Returns an :class:`ipaddress.IPv4Address` with the host address of this object\n is_multicast : bool\n Return a boolean True if this object represents a multicast address; otherwise return False.\n is_private : bool\n Return a boolean True if this object represents a private IPv4 address; otherwise return False.\n is_reserved : bool\n Return a boolean True if this object represents a reserved IPv4 address; otherwise return False.\n netmask : :class:`ipaddress.IPv4Address`\n An :class:`ipaddress.IPv4Address` object containing the netmask\n network : :class:`ipaddress.IPv4Network`\n Returns an :class:`ipaddress.IPv4Network` with the network of this object\n network_offset : int\n Returns the integer difference between host number and network number. This must be less than `numhosts`\n network_object : :class:`ipaddress.IPv4Network`\n Returns an :class:`ipaddress.IPv4Network` with the network of this object\n numhosts : int\n An integer representing the number of host addresses contained in the network\n packed : str\n Returns the IPv4 object as packed hex bytes\n prefixlen : int\n An python setter/getter method which return an integer representing the length of the netmask\n prefixlength : int\n An integer representing the length of the netmask\n inverse_netmask : :class:`ipaddress.IPv4Address`\n A :class:`ipaddress.IPv4Address` representing the hostmask. .hostmask and .inverse_netmask return the same values\n version : int\n Returns an integer representing the IP version of this object. Only 4 or 6 are valid results\n \"\"\"\n if isinstance(debug, int):\n if debug > 0:\n logger.info(f\"IPv4Obj(v4input='{v4input}', strict={strict}, debug={debug}) was called\")\n else:\n error = f\"IPv4Obj() debug must be an int, but `debug`=`{debug}` was called.\"\n logger.critical(error)\n raise ValueError(error)\n\n try:\n if v4input is not None and isinstance(v4input, (str, int, IPv4Obj)) is False:\n raise ValueError()\n except ValueError as eee:\n raise AddressValueError(\n f\"Could not parse '{v4input}' (type: {type(v4input)}) into an IPv4 Address. {eee}\"\n )\n except BaseException as eee:\n raise AddressValueError(\n f\"Could not parse '{v4input}' (type: {type(v4input)}) into an IPv4 Address. {eee}\"\n )\n\n self.strict = strict\n self.debug = debug\n self.empty = False\n\n # Initialize attributes\n self.ip_object = None\n self.network_object = None\n self.finished_parsing = False\n #################################### NEW\n\n if v4input is None:\n self.empty = True\n\n elif isinstance(v4input, str):\n v4_str_rgx = _RGX_IPV4ADDR_WITH_MASK.search(v4input.strip())\n if v4_str_rgx is not None:\n v4_groupdict = v4_str_rgx.groupdict()\n else:\n v4_groupdict = {}\n\n v4addr_nomask = v4_groupdict.get(\"v4addr_nomask\", None) or \"\"\n v4addr_netmask = v4_groupdict.get(\"v4addr_netmask\", None) or \"\"\n v4addr_prefixlen = v4_groupdict.get(\"v4addr_prefixlen\", None) or \"\"\n netmask = v4_groupdict.get(\"netmask\", None) or \"\"\n prefixlen = v4_groupdict.get(\"masklen\", None) or \"\"\n\n # There is a bug here... if I don't use this if condition, address\n # parsing fails\n if netmask == \"\" and prefixlen == \"\":\n prefixlen = \"32\"\n\n # Fix parsing problems...\n if not re.search(r\"^\\d+$\", prefixlen.strip()):\n prefixlen = \"\"\n if not re.search(r\"^\\d+\\.\\d+\\.\\d+\\.\\d+$\", netmask.strip()):\n netmask = \"\"\n\n # Fix a tricky parsing error above... both netmask and prefixlen should\n # not be defined...\n if netmask != \"\" and prefixlen != \"\":\n prefixlen = \"\"\n\n v4addr = f\"{v4addr_nomask}{v4addr_netmask}{v4addr_prefixlen}\"\n mask_prefixlen = f\"{netmask}{prefixlen}\"\n if re.search(r\"\\d+\\.\\d+\\.\\d+\\.\\d+\", v4addr):\n self.ip_object = IPv4Address(v4addr)\n self.network_object = IPv4Network(f\"{v4addr}/{mask_prefixlen}\", strict=False)\n self.prefixlen = self.network_object.prefixlen\n elif v4addr == \"dhcp\":\n raise DynamicAddressException(\"Cannot parse address from a DHCP string.\")\n else:\n raise AddressValueError(\n f\"Could not parse '{v4input}' {type(v4input)} into an IPv4 Address\"\n )\n\n self.finished_parsing = True\n\n elif isinstance(v4input, int):\n if not (0 <= v4input <= IPV4_MAXINT):\n raise RequirementFailure()\n self.ip_object = IPv4Address(v4input)\n self.network_object = IPv4Network(v4input, strict=False)\n self.finished_parsing = True\n\n elif isinstance(v4input, IPv4Obj):\n self.ip_object = IPv4Address(v4input.ip)\n self.network_object = IPv4Network(v4input.as_cidr_net, strict=False)\n self.finished_parsing = True\n\n else:\n raise AddressValueError(\n f\"Could not parse '{v4input}' {type(v4input)} into an IPv4 Address\"\n )\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n def __repr__(self):\n if self.empty is False:\n return f\"\"\"<IPv4Obj {str(self.ip_object)}/{self.prefixlen}>\"\"\"\n else:\n return f\"\"\"<IPv4Obj None empty={self.empty}>\"\"\"\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n def __eq__(self, val):\n if isinstance(val, IPv4Obj) and (self.empty is True or val.empty is True):\n return self.empty == val.empty\n try:\n # Code to fix Github issue #180\n for obj in [self, val]:\n for attr_name in [\"as_decimal\", \"prefixlen\"]:\n try:\n if getattr(obj, attr_name, None) is None:\n raise RequirementFailure()\n except RequirementFailure:\n return False\n\n # Compare objects numerically...\n if self.as_decimal == val.as_decimal and self.prefixlen == val.prefixlen:\n return True\n return False\n except AttributeError as eee:\n errmsg = f\"'{self.__repr__()}' cannot compare itself to '{val}': {eee}\"\n raise AttributeError(errmsg)\n except BaseException as eee:\n errmsg = f\"'{self.__repr__()}' cannot compare itself to '{val}': {eee}\"\n raise AttributeError(errmsg)\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @logger.catch(reraise=True)\n def __ne__(self, val):\n if isinstance(val, IPv4Obj):\n return not self.__eq__(val)\n else:\n return True\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n def __gt__(self, val):\n try:\n for obj in [self, val]:\n for attr_name in [\"as_decimal\", \"as_decimal_network\", \"prefixlen\"]:\n try:\n if getattr(obj, attr_name, None) is None:\n raise RequirementFailure()\n except (RequirementFailure):\n error_str = f\"Cannot compare {self} with '{type(obj)}'\"\n raise AssertionError(error_str)\n\n val_prefixlen = int(getattr(val, \"prefixlen\"))\n self_prefixlen = int(getattr(self, \"prefixlen\"))\n val_ndec = int(getattr(val, \"as_decimal_network\"))\n self_ndec = int(getattr(self, \"as_decimal_network\"))\n val_dec = int(getattr(val, \"as_decimal\"))\n self_dec = int(getattr(self, \"as_decimal\"))\n\n if self_ndec == val_ndec and self_prefixlen == val_prefixlen:\n return self_dec > val_dec\n\n # for the same network, longer prefixlens sort \"higher\" than shorter prefixlens\n elif self_ndec == val_ndec:\n return self_prefixlen > val_prefixlen\n\n else:\n return self_ndec > val_ndec\n\n except BaseException:\n errmsg = f\"{self.__repr__()} cannot compare itself to '{val}'\"\n raise ValueError(errmsg)\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n def __lt__(self, val):\n try:\n for obj in [self, val]:\n for attr_name in [\"as_decimal\", \"as_decimal_network\", \"prefixlen\"]:\n try:\n if getattr(obj, attr_name, None) is None:\n raise RequirementFailure()\n except (RequirementFailure):\n error_str = f\"Cannot compare {self} with '{type(obj)}'\"\n raise AssertionError(error_str)\n except BaseException:\n error_str = f\"Cannot compare {self} with '{type(obj)}'\"\n raise AssertionError(error_str)\n\n val_prefixlen = int(getattr(val, \"prefixlen\"))\n self_prefixlen = int(getattr(self, \"prefixlen\"))\n val_ndec = int(getattr(val, \"as_decimal_network\"))\n self_ndec = int(getattr(self, \"as_decimal_network\"))\n val_dec = int(getattr(val, \"as_decimal\"))\n self_dec = int(getattr(self, \"as_decimal\"))\n\n if self_ndec == val_ndec and self_prefixlen == val_prefixlen:\n return self_dec < val_dec\n\n # for the same network, longer prefixlens sort \"higher\" than shorter prefixlens\n elif self_ndec == val_ndec:\n return self_prefixlen < val_prefixlen\n\n else:\n return self_ndec < val_ndec\n\n except Exception:\n errmsg = f\"{self.__repr__()} cannot compare itself to '{val}'\"\n logger.error(errmsg)\n raise ValueError(errmsg)\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n def __int__(self):\n \"\"\"Return this object as an integer\"\"\"\n if getattr(self, \"as_decimal\", None) is not None:\n return self.as_decimal\n else:\n return False\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n def __index__(self):\n \"\"\"Return this object as an integer (used for hex() and bin() operations)\"\"\"\n if getattr(self, \"as_decimal\", None) is not None:\n return self.as_decimal\n else:\n return False\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n def __add__(self, val):\n \"\"\"Add an integer to IPv4Obj() and return an IPv4Obj()\"\"\"\n if not isinstance(val, int):\n raise ValueError(f\"Cannot add type: '{type(val)}' to IPv4Obj()\")\n\n orig_prefixlen = self.prefixlen\n total = self.as_decimal + val\n if total > IPV4_MAXINT:\n raise RequirementFailure(\"Max IPv4 integer exceeded\")\n if total < 0:\n raise RequirementFailure(\"Min IPv4 integer exceeded\")\n retval = IPv4Obj(total)\n retval.prefixlen = orig_prefixlen\n return retval\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n def __sub__(self, val):\n \"\"\"Subtract an integer from IPv4Obj() and return an IPv4Obj()\"\"\"\n if not isinstance(val, int):\n raise ValueError(f\"Cannot subtract type: '{type(val)}' from {self}\")\n\n orig_prefixlen = self.prefixlen\n total = self.as_decimal - val\n if total >= IPV4_MAXINT:\n raise RequirementFailure(\"Max IPv4 integer exceeded\")\n if total < 0:\n raise RequirementFailure(\"Min IPv4 integer exceeded\")\n retval = IPv4Obj(total)\n retval.prefixlen = orig_prefixlen\n return retval\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n def __contains__(self, val):\n # Used for \"foo in bar\"... python calls bar.__contains__(foo)\n if self.empty is True and val.empty is True:\n return True\n elif self.empty is True or val.empty is True:\n return False\n\n # For all other cases, see below...\n try:\n if self.network_object.prefixlen == 0:\n return True\n elif self.network_object.prefixlen > val.network_object.prefixlen:\n # obvious shortcut... if this object's mask is longer than\n # val, this object cannot contain val\n return False\n else:\n # return (val.network in self.network)\n #\n ## Last used: 2020-07-12... version 1.5.6\n # return (self.network <= val.network) and (\n # self.broadcast >= val.broadcast\n # )\n return (self.as_decimal_network <= val.as_decimal_network) and (self.as_decimal_broadcast >= val.as_decimal_broadcast) and (self.prefixlen <= val.prefixlen)\n\n except ValueError as eee:\n raise ValueError(f\"Could not check whether '{val}' is contained in '{self}': {eee}\")\n except BaseException as eee:\n raise ValueError(f\"Could not check whether '{val}' is contained in '{self}': {eee}\")\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n def __hash__(self):\n # Python3 needs __hash__()\n if self.empty is False:\n return hash(str(self.ip_object)) + hash(str(self.prefixlen))\n else:\n return hash(None)\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n def __iter__(self):\n return self.network_object.__iter__()\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n def __next__(self):\n ## For Python3 iteration...\n return self.network_object.__next__()\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n def next(self):\n ## For Python2 iteration...\n return self.network_object.__next__()\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def _version(self):\n \"\"\"\n Fix github issue #203... build a `_prefixlen` attribute...\n \"\"\"\n return self.version\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def _prefixlen(self):\n \"\"\"\n Fix github issue #203... build a `_prefixlen` attribute...\n \"\"\"\n return self.prefixlen\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def _max_prefixlen(self):\n \"\"\"\n Fix github issue #203... build a `_prefixlen` attribute...\n \"\"\"\n return IPV4_MAX_PREFIXLEN\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @staticmethod\n def get_regex():\n return _IPV4_REGEX_STR\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def _ip(self):\n \"\"\"Returns the address as an integer. This property exists for compatibility with ipaddress.IPv4Address() in stdlib\"\"\"\n return int(self.ip_object)\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def ip(self):\n \"\"\"Returns the address as an :class:`ipaddress.IPv4Address` object.\"\"\"\n return self.ip_object\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def ipv4(self):\n \"\"\"Returns the address as an :class:`ipaddress.IPv4Address` object.\"\"\"\n return self.ip_object\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def netmask(self):\n \"\"\"Returns the network mask as an :class:`ipaddress.IPv4Address` object.\"\"\"\n return self.network_object.netmask\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def masklen(self):\n \"\"\"Returns the length of the network mask as an integer.\"\"\"\n return int(self.network_object.prefixlen)\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @masklen.setter\n def masklen(self, arg):\n \"\"\"masklen setter method\"\"\"\n self.network_object = IPv4Network(\n f\"{str(self.ip_object)}/{arg}\", strict=False\n )\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def masklength(self):\n \"\"\"Returns the length of the network mask as an integer.\"\"\"\n return self.prefixlen\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @masklength.setter\n def masklength(self, arg):\n \"\"\"masklen setter method\"\"\"\n self.network_object = IPv4Network(\n f\"{str(self.ip_object)}/{arg}\", strict=False\n )\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def prefixlen(self):\n \"\"\"Returns the length of the network mask as an integer.\"\"\"\n if self.empty is False:\n return int(self.network_object.prefixlen)\n else:\n return None\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @prefixlen.setter\n def prefixlen(self, arg):\n \"\"\"prefixlen setter method\"\"\"\n self.network_object = IPv4Network(\n f\"{str(self.ip_object)}/{arg}\", strict=False\n )\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def prefixlength(self):\n \"\"\"Returns the length of the network mask as an integer.\"\"\"\n return self.prefixlen\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @prefixlength.setter\n def prefixlength(self, arg):\n \"\"\"prefixlength setter method\"\"\"\n self.network_object = IPv4Network(\n f\"{str(self.ip_object)}/{arg}\", strict=False\n )\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def exploded(self):\n \"\"\"Returns the IPv4 Address object as a string. The string representation is in dotted decimal notation. Leading zeroes are never included in the representation.\"\"\"\n return self.ip_object.exploded\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def packed(self):\n \"\"\"Returns the IPv4 object as packed hex bytes\"\"\"\n return self.ip_object.packed\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def broadcast(self):\n \"\"\"Returns the broadcast address as an :class:`ipaddress.IPv4Address` object.\"\"\"\n if sys.version_info[0] < 3:\n return self.network_object.broadcast\n else:\n return self.network_object.broadcast_address\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def network(self):\n \"\"\"Returns an :class:`ipaddress.IPv4Network` object, which represents this network.\"\"\"\n if sys.version_info[0] < 3:\n return self.network_object.network\n else:\n ## The ipaddress module returns an \"IPAddress\" object in Python3...\n return IPv4Network(f\"{self.network_object.compressed}\", strict=False)\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def network_offset(self):\n \"\"\"Returns the integer difference between host number and network number. This must be less than `numhosts`\"\"\"\n offset = self.as_decimal - self.as_decimal_network\n if offset > self.numhosts:\n raise RequirementFailure()\n return offset\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @network_offset.setter\n def network_offset(self, arg):\n \"\"\"\n Accept an integer network_offset and modify this IPv4Obj() to be 'arg' integer offset from the subnet.\n\n Throw an error if the network_offset would exceed the existing subnet boundary.\n\n Example\n -------\n >>> addr = IPv6Obj(\"192.0.2.1/24\")\n >>> addr.network_offset = 20\n >>> addr\n <IPv6Obj 192.0.2.20/24>\n >>>\n \"\"\"\n if isinstance(arg, (int, str)):\n arg = int(arg)\n # get the max offset for this subnet...\n max_offset = self.as_decimal_broadcast - self.as_decimal_network\n if arg <= max_offset:\n self.ip_object = IPv4Address(self.as_decimal_network + arg)\n else:\n raise AddressValueError(f\"{self}.network_offset({arg=}) exceeds the boundaries of '{self.as_cidr_net}'\")\n else:\n raise NotImplementedError\n\n # @property\n # def as_decimal_network(self):\n # \"\"\"Returns an integer calculated from the network address...\"\"\"\n # num_strings = str(self.network).split(\".\")\n # num_strings.reverse() # reverse the order\n # return sum(\n # [int(num, 16) * (65536 ** idx) for idx, num in enumerate(num_strings)]\n # )\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def hostmask(self):\n \"\"\"Returns the host mask as an :class:`ipaddress.IPv4Address` object.\"\"\"\n return self.network_object.hostmask\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def max_int(self):\n \"\"\"Return the maximum size of an IPv4 Address object as an integer\"\"\"\n return IPV4_MAXINT\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def inverse_netmask(self):\n \"\"\"Returns the host mask as an :class:`ipaddress.IPv4Address` object.\"\"\"\n return self.network_object.hostmask\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def version(self):\n \"\"\"Returns the IP version of the object as an integer. i.e. 4\"\"\"\n return 4\n\n # On IPv4Obj()\n @property\n def numhosts(self):\n \"\"\"Returns the total number of IP addresses in this network, including broadcast and the \"subnet zero\" address\"\"\"\n if self.prefixlength <= 30:\n return 2 ** (IPV4_MAX_PREFIXLEN - self.network_object.prefixlen) - 2\n elif self.prefixlength == 31:\n # special case... /31 subnet has no broadcast address\n return 2\n elif self.prefixlength == 32:\n return 1\n else:\n # We (obviously) should never hit this...\n raise NotImplementedError\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def as_decimal(self):\n \"\"\"Returns the IP address as a decimal integer\"\"\"\n num_strings = str(self.ip).split(\".\")\n num_strings.reverse() # reverse the order\n return sum(int(num) * (256**idx) for idx, num in enumerate(num_strings))\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def as_decimal_network(self):\n \"\"\"Returns the integer value of the IP network as a decimal integer; explicitly, if this object represents 1.1.1.5/24, 'as_decimal_network' returns the integer value of 1.1.1.0/24\"\"\"\n num_strings = str(self.network).split(\"/\")[0].split(\".\")\n num_strings.reverse() # reverse the order\n return sum(int(num) * (256**idx) for idx, num in enumerate(num_strings))\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def as_decimal_broadcast(self):\n \"\"\"Returns the integer value of the IP broadcast as a decimal integer; explicitly, if this object represents 1.1.1.5/24, 'as_decimal_broadcast' returns the integer value of 1.1.1.255\"\"\"\n broadcast_offset = 2 ** (IPV4_MAX_PREFIXLEN - self.network_object.prefixlen) - 1\n return self.as_decimal_network + broadcast_offset\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def as_int(self):\n \"\"\"Returns the IP address as a decimal integer\"\"\"\n return self.as_decimal\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def as_zeropadded(self):\n \"\"\"Returns the IP address as a zero-padded string (useful when sorting in a text-file)\"\"\"\n num_strings = str(self.ip).split(\".\")\n return \".\".join([f\"{int(num):03}\" for num in num_strings])\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def as_zeropadded_network(self):\n \"\"\"Returns the IP network as a zero-padded string (useful when sorting in a text-file)\"\"\"\n num_strings = self.as_cidr_net.split(\"/\")[0].split(\".\")\n zero_padded_addr = \".\".join([f\"{int(num):03}\" for num in num_strings])\n return f\"{zero_padded_addr}/{self.prefixlen}\"\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def as_hex(self):\n \"\"\"Returns the IP address as a hex string\"\"\"\n return hex(self)\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def as_binary_tuple(self):\n \"\"\"Returns the IP address as a tuple of zero-padded binary strings\"\"\"\n return tuple(f\"{int(num):08b}\" for num in str(self.ip).split(\".\"))\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def as_hex_tuple(self):\n \"\"\"Returns the IP address as a tuple of zero-padded hex strings\"\"\"\n return tuple(f\"{int(num):02x}\" for num in str(self.ip).split(\".\"))\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def as_cidr_addr(self):\n \"\"\"Returns a string with the address in CIDR notation\"\"\"\n return str(self.ip) + \"/\" + str(self.prefixlen)\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def as_cidr_net(self):\n \"\"\"Returns a string with the network in CIDR notation\"\"\"\n if sys.version_info[0] < 3:\n return str(self.network) + \"/\" + str(self.prefixlen)\n else:\n return str(self.network)\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def is_multicast(self):\n \"\"\"Returns a boolean for whether this is a multicast address\"\"\"\n return self.network_object.is_multicast\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def is_private(self):\n \"\"\"Returns a boolean for whether this is a private address\"\"\"\n return self.network_object.is_private\n\n # do NOT wrap with @logger.catch(...)\n # On IPv4Obj()\n @property\n def is_reserved(self):\n \"\"\"Returns a boolean for whether this is a reserved address\"\"\"\n return self.network_object.is_reserved" }, { "identifier": "IPv6Obj", "path": "ciscoconfparse2/ccp_util.py", "snippet": "class IPv6Obj(object):\n dna: str = \"IPv6Obj\"\n v6input: Optional[Union[str, int]] = None\n strict: bool = False\n debug: int = 0\n\n ip_object: Any = None\n network_object: Any = None\n finished_parsing: bool = False\n empty: bool = False\n\n __setstate__: Any = None\n\n # This method is on IPv6Obj(). @logger.catch() breaks the __init__() method.\n def __init__(self, v6input: Optional[Union[str,int]]=None, strict: bool=False, debug: int=0):\n \"\"\"An object to represent IPv6 addresses and IPv6 networks.\n\n When :class:`~ccp_util.IPv6Obj` objects are compared or sorted, network numbers are sorted lower to higher. If network numbers are the same, shorter masks are lower than longer masks. After comparing mask length, numerically higher IP addresses are greater than numerically lower IP addresses. Comparisons between :class:`~ccp_util.IPv6Obj` instances was chosen so it's easy to find the longest-match for a given prefix.\n\n This object emulates the behavior of ipaddr.IPv6Network() (in Python2) where host-bits were retained in the IPv6Network() object. :class:`ipaddress.IPv6Network` in Python3 does not retain host-bits; the desire to retain host-bits in both Python2 and Python3 ip network objects was the genesis of this API.\n\n :param v6input: A string (or integer) containing an IPv6 address, and optionally a netmask or masklength. Integers are also accepted and the masklength of the integer is assumed to be 128-bits. The following address/netmask formats are supported: \"2001::dead:beef\", \"2001::dead:beef/64\",\n :type v6input: Optional[Union[str,int]]\n :param strict: When `strict` is True, the value of `v4input` must not have host-bits set. The default value is False.\n :type strict: bool\n\n .. code-block:: python\n\n >>> from ciscoconfparse2.ccp_util import IPv6Obj\n >>> net = IPv6Obj(42540488161975842760550356429036175087)\n >>> net\n <IPv6Obj 2001::dead:beef/64>\n >>> net = IPv6Obj(\"2001::dead:beef/64\")\n >>> net\n <IPv6Obj 2001::dead:beef/64>\n >>>\n\n Attributes\n ----------\n network : :class:`ipaddress.IPv6Network`\n Returns an :class:`ipaddress.IPv6Network` with the network of this object\n network_object : :class:`ipaddress.IPv6Network`\n Returns an :class:`ipaddress.IPv6Network` with the network of this object\n ip_object : :class:`ipaddress.IPv6Address`\n Returns an :class:`ipaddress.IPv6Address` with the host address of this object\n ip : :class:`ipaddress.IPv6Address`\n Returns an :class:`ipaddress.IPv6Address` with the host address of this object\n as_binary_tuple : tuple\n The ipv6 address as a tuple of zero-padded binary strings\n as_decimal : int\n The ipv6 address as a decimal integer\n as_decimal_network : int\n The network address as a decimal integer\n as_hex_tuple : tuple\n The ipv6 address as a tuple of zero-padded 8-bit hex strings\n get_regex : str\n Returns the regex string used for an IPv6 Address\n netmask : :class:`ipaddress.IPv6Address`\n An :class:`ipaddress.IPv6Address` object containing the netmask\n network_offset : int\n Returns the integer difference between host number and network number. This must be less than `numhosts`\n numhosts : int\n An integer representing the number of host addresses contained in the network\n prefixlen : int\n An integer representing the length of the netmask\n broadcast: raises `NotImplementedError`; IPv6 doesn't use broadcast addresses\n hostmask : :class:`ipaddress.IPv6Address`\n An :class:`ipaddress.IPv6Address` representing the hostmask\n numhosts : int\n An integer representing the number of hosts contained in the network\n\n \"\"\"\n\n\n if isinstance(debug, int):\n if debug > 0:\n logger.info(f\"IPv6Obj(v6input='{v6input}', strict={strict}, debug={debug}) was called\")\n else:\n error = f\"IPv6Obj() debug must be an int, but `debug`=`{debug}` was called.\"\n logger.critical(error)\n raise ValueError(error)\n\n if v6input is not None:\n try:\n if isinstance(v6input, (str, int, IPv6Obj)) is False:\n raise ValueError()\n except ValueError as eee:\n error = f\"Could not parse '{v6input}' (type: {type(v6input)}) into an IPv6 Address. {eee}\"\n logger.error(error)\n raise AddressValueError(error)\n except BaseException as eee:\n error = f\"Could not parse '{v6input}' (type: {type(v6input)}) into an IPv6 Address. {eee}\"\n logger.error(error)\n raise AddressValueError(error)\n\n # Initialize attributes\n self.ip_object = None\n self.network_object = None\n self.finished_parsing = False\n\n self.v6input = v6input\n self.dna = \"IPv6Obj\"\n self.strict = strict\n self.debug = debug\n\n self.empty = False\n self.__setstate__ = None\n\n if v6input is None:\n self.empty = True\n elif isinstance(v6input, str):\n if len(v6input) > IPV6_MAXSTR_LEN:\n raise RequirementFailure()\n\n tmp = re.split(r\"\\s+\", v6input.strip())\n if len(tmp) == 2:\n v6input = \"/\".join(tmp)\n elif len(tmp) == 1:\n v6input = tmp[0]\n else:\n raise NotImplementedError(v6input.strip())\n\n v6_str_rgx = _RGX_IPV6ADDR.search(v6input.strip())\n # Example 'v6_groupdict'\n # v6_groupdict = {'addr': '2b00:cd80:14:10::1', 'opt1': None, 'opt2': None, 'opt3': None, 'opt4': None, 'opt5': '2b00:cd80:14:10::1', 'opt6': None, 'opt7': None, 'opt8': None, 'opt9': None, 'opt10': None, 'masklen': '64'}\n v6_groupdict = v6_str_rgx.groupdict()\n for key in [\"addr\", \"opt1\", \"opt2\", \"opt3\", \"opt4\", \"opt5\", \"opt6\", \"opt7\", \"opt8\", \"opt9\", \"opt10\", \"opt11\"]:\n _ipv6 = v6_groupdict[key]\n if _ipv6 is not None:\n break\n else:\n _ipv6 = \"::1\"\n if _ipv6 is None:\n raise RequirementFailure()\n\n self.ip_object = IPv6Address(_ipv6)\n if isinstance(v6_groupdict[\"masklen\"], str):\n netstr = _ipv6 + \"/\" + v6_groupdict[\"masklen\"]\n # FIXME - this probably should be removed...\n #elif isinstance(v6_groupdict[\"netmask\"], str):\n # netstr = ipv6 + \"/\" + v6_groupdict[\"netmask\"]\n else:\n netstr = _ipv6 + \"/128\"\n self.network_object = IPv6Network(netstr, strict=False)\n\n elif isinstance(v6input, int):\n if not (0 <= v6input <= IPV6_MAXINT):\n raise RequirementFailure()\n self.ip_object = IPv6Address(v6input)\n self.network_object = IPv6Network(v6input, strict=False)\n\n elif isinstance(v6input, IPv6Obj):\n self.ip_object = IPv6Address(v6input.ip)\n self.network_object = IPv6Network(v6input.as_cidr_net, strict=False)\n\n else:\n raise AddressValueError(f\"Could not parse '{v6input}' {type(v6input)} into an IPv6 Address\")\n\n # On IPv6Obj()\n def __repr__(self):\n # Detect IPv4_mapped IPv6 addresses...\n if self.empty is True:\n return f\"\"\"<IPv6Obj None empty={self.empty}>\"\"\"\n elif self.is_ipv4_mapped:\n return f\"\"\"<IPv6Obj ::ffff:{self.ip.ipv4_mapped}/{self.prefixlen}>\"\"\"\n else:\n return f\"\"\"<IPv6Obj {str(self.ip)}/{self.prefixlen}>\"\"\"\n\n # On IPv6Obj()\n def __eq__(self, val):\n if self.empty is True:\n if val.empty is True:\n return True\n else:\n return False\n try:\n for obj in [self, val]:\n for attr_name in [\"as_decimal\", \"prefixlen\"]:\n if getattr(obj, attr_name, None) is None:\n return False\n\n # Compare objects numerically...\n if self.as_decimal == val.as_decimal and self.prefixlen == val.prefixlen:\n return True\n return False\n except BaseException as eee:\n errmsg = f\"'{self.__repr__()}' cannot compare itself to '{val}': {eee}\"\n raise ValueError(errmsg)\n\n # On IPv6Obj()\n def __ne__(self, val):\n return not self.__eq__(val)\n\n # On IPv6Obj()\n def __gt__(self, val):\n try:\n for obj in [self, val]:\n for attr_name in [\"as_decimal\", \"as_decimal_network\", \"prefixlen\"]:\n if getattr(obj, attr_name, None) is None:\n error_str = f\"Cannot compare {self} with '{type(obj)}'\"\n raise RequirementFailure(error_str)\n\n val_prefixlen = int(getattr(val, \"prefixlen\"))\n self_prefixlen = int(getattr(self, \"prefixlen\"))\n val_ndec = int(getattr(val, \"as_decimal_network\"))\n self_ndec = int(getattr(self, \"as_decimal_network\"))\n val_dec = int(getattr(val, \"as_decimal\"))\n self_dec = int(getattr(self, \"as_decimal\"))\n\n if self_ndec == val_ndec and self_prefixlen == val_prefixlen:\n return self_dec > val_dec\n\n # for the same network, longer prefixlens sort \"higher\" than shorter prefixlens\n elif self_ndec == val_ndec:\n return self_prefixlen > val_prefixlen\n\n else:\n return self_ndec > val_ndec\n\n except BaseException:\n errmsg = f\"{self.__repr__()} cannot compare itself to '{val}'\"\n raise ValueError(errmsg)\n\n # On IPv6Obj()\n def __lt__(self, val):\n try:\n for obj in [self, val]:\n for attr_name in [\"as_decimal\", \"prefixlen\"]:\n if getattr(obj, attr_name, None) is None:\n error_str = f\"Cannot compare {self} with '{type(obj)}'\"\n raise RequirementFailure(error_str)\n\n val_prefixlen = int(getattr(val, \"prefixlen\"))\n self_prefixlen = int(getattr(self, \"prefixlen\"))\n val_ndec = int(getattr(val, \"as_decimal_network\"))\n self_ndec = int(getattr(self, \"as_decimal_network\"))\n val_dec = int(getattr(val, \"as_decimal\"))\n self_dec = int(getattr(self, \"as_decimal\"))\n\n if self_ndec == val_ndec and self_prefixlen == val_prefixlen:\n return self_dec < val_dec\n\n # for the same network, longer prefixlens sort \"higher\" than shorter prefixlens\n elif self_ndec == val_ndec:\n return self_prefixlen < val_prefixlen\n\n else:\n return self_ndec < val_ndec\n\n except BaseException:\n errmsg = f\"{self.__repr__()} cannot compare itself to '{val}'\"\n raise ValueError(errmsg)\n\n # On IPv6Obj()\n def __int__(self):\n \"\"\"Return this object as an integer\"\"\"\n if getattr(self, \"as_decimal\", None) is not None:\n return self.as_decimal\n else:\n return False\n\n # On IPv6Obj()\n def __index__(self):\n \"\"\"Return this object as an integer (used for hex() and bin() operations)\"\"\"\n if getattr(self, \"as_decimal\", None) is not None:\n return self.as_decimal\n else:\n return False\n\n # On IPv6Obj()\n def __add__(self, val):\n \"\"\"Add an integer to IPv6Obj() and return an IPv6Obj()\"\"\"\n if not isinstance(val, int):\n raise ValueError(f\"Cannot add type: '{type(val)}' to {self}\")\n\n orig_prefixlen = self.prefixlen\n total = self.as_decimal + val\n if total > IPV6_MAXINT:\n raise RequirementFailure(\"Max IPv6 integer exceeded\")\n if total < 0:\n raise RequirementFailure(\"Min IPv6 integer exceeded\")\n retval = IPv6Obj(total)\n retval.prefixlen = orig_prefixlen\n return retval\n\n # On IPv6Obj()\n def __sub__(self, val):\n \"\"\"Subtract an integer from IPv6Obj() and return an IPv6Obj()\"\"\"\n if not isinstance(val, int):\n raise ValueError(f\"Cannot subtract type: '{type(val)}' from {self}\")\n\n orig_prefixlen = self.prefixlen\n total = self.as_decimal - val\n if total >= IPV6_MAXINT:\n raise RequirementFailure(\"Max IPv6 integer exceeded\")\n if total < 0:\n raise RequirementFailure(\"Min IPv6 integer exceeded\")\n retval = IPv6Obj(total)\n retval.prefixlen = orig_prefixlen\n return retval\n\n # On IPv6Obj()\n def __contains__(self, val):\n # Used for \"foo in bar\"... python calls bar.__contains__(foo)\n try:\n if self.network_object.prefixlen == 0:\n return True\n elif self.network_object.prefixlen > val.network_object.prefixlen:\n # obvious shortcut... if this object's mask is longer than\n # val, this object cannot contain val\n return False\n else:\n # NOTE: We cannot use the same algorithm as IPv4Obj.__contains__() b/c IPv6Obj has no broadcast\n comparison_01 = (self.as_decimal_network <= val.as_decimal_network)\n comparison_02 = (self.as_decimal_network + self.numhosts - 1) >= (val.as_decimal_network + val.numhosts - 1)\n return comparison_01 and comparison_02\n\n except BaseException as eee:\n raise ValueError(f\"Could not check whether '{val}' is contained in '{self}': {eee}\")\n\n # On IPv6Obj()\n def __hash__(self):\n # Python3 needs __hash__()\n return hash(str(self.ip_object)) + hash(str(self.prefixlen))\n\n # On IPv6Obj()\n def __iter__(self):\n return self.network_object.__iter__()\n\n # On IPv6Obj()\n def __next__(self):\n ## For Python3 iteration...\n return self.network_object.__next__()\n\n # On IPv6Obj()\n def next(self):\n ## For Python2 iteration...\n return self.network_object.__next__()\n\n # On IPv6Obj()\n @staticmethod\n def get_regex():\n return _IPV6_REGEX_STR\n\n # On IPv6Obj()\n @property\n def _version(self):\n \"\"\"\n Fix github issue #203... build a `_prefixlen` attribute...\n \"\"\"\n return self.version\n\n # On IPv6Obj()\n @property\n def _prefixlen(self):\n \"\"\"\n Fix github issue #203... build a `_prefixlen` attribute...\n \"\"\"\n return self.prefixlen\n\n # On IPv6Obj()\n @property\n def _max_prefixlen(self):\n \"\"\"\n Fix github issue #203... build a `_prefixlen` attribute...\n \"\"\"\n return IPV6_MAX_PREFIXLEN\n\n # On IPv6Obj()\n @property\n def is_ipv4_mapped(self):\n # ref RFC 4291 - Section 2.5.5.2\n # https://datatracker.ietf.org/doc/html/rfc4291#section-2.5.5.2\n #\n # ref RFC 5156 - Section 2.2 IPv4 mapped addresses\n # https://datatracker.ietf.org/doc/html/rfc5156#section-2.2\n #\n # if self.ip in IPv6Network(\"::ffff:0:0/96\", strict=False):\n if IPv6Network(\"::ffff:0:0/96\").__contains__(self.ip):\n return True\n return False\n\n # On IPv6Obj()\n @property\n def _ip(self):\n \"\"\"Returns the address as an integer. This property exists for compatibility with ipaddress.IPv6Address() in stdlib\"\"\"\n return int(self.ip_object)\n\n # On IPv6Obj()\n @property\n def ip(self):\n \"\"\"Returns the address as an :class:`ipaddress.IPv6Address` object.\"\"\"\n return self.ip_object\n\n # On IPv6Obj()\n @property\n def ipv6(self):\n \"\"\"Returns the address as an :class:`ipaddress.IPv6Address` object.\"\"\"\n return self.ip_object\n\n # On IPv6Obj()\n @property\n def netmask(self):\n \"\"\"Returns the network mask as an :class:`ipaddress.IPv6Address` object.\"\"\"\n return self.network_object.netmask\n\n # On IPv6Obj()\n @property\n def masklen(self):\n \"\"\"Returns the length of the network mask as an integer.\"\"\"\n return int(self.network_object.prefixlen)\n\n # On IPv6Obj()\n @masklen.setter\n def masklen(self, arg):\n \"\"\"masklen setter method\"\"\"\n self.network_object = IPv6Network(\n f\"{str(self.ip_object)}/{arg}\", strict=False\n )\n\n # On IPv6Obj()\n @property\n def masklength(self):\n \"\"\"Returns the length of the network mask as an integer.\"\"\"\n return self.prefixlen\n\n # On IPv6Obj()\n @masklength.setter\n def masklength(self, arg):\n \"\"\"masklength setter method\"\"\"\n self.network_object = IPv6Network(\n f\"{str(self.ip_object)}/{arg}\", strict=False\n )\n\n # On IPv6Obj()\n @property\n def prefixlen(self):\n \"\"\"Returns the length of the network mask as an integer.\"\"\"\n return int(self.network_object.prefixlen)\n\n # On IPv6Obj()\n @prefixlen.setter\n def prefixlen(self, arg):\n \"\"\"prefixlen setter method\"\"\"\n self.network_object = IPv6Network(\n f\"{str(self.ip_object)}/{arg}\", strict=False\n )\n\n # On IPv6Obj()\n @property\n def prefixlength(self):\n \"\"\"Returns the length of the network mask as an integer.\"\"\"\n return self.prefixlen\n\n # On IPv6Obj()\n @property\n def compressed(self):\n \"\"\"Returns the IPv6 Network object in compressed form\"\"\"\n return self.network_object.compressed\n\n # On IPv6Obj()\n @property\n def exploded(self):\n \"\"\"Returns the IPv6 Address object in exploded form\"\"\"\n return self.ip_object.exploded\n\n # On IPv6Obj()\n @property\n def packed(self):\n \"\"\"Returns the IPv6 object as packed hex bytes\"\"\"\n return self.ip_object.packed\n\n # On IPv6Obj()\n @property\n def broadcast(self):\n raise NotImplementedError(\"IPv6 does not use broadcast\")\n\n # On IPv6Obj()\n @property\n def network(self):\n \"\"\"Returns an :class:`ipaddress.IPv6Network` object, which represents this network.\"\"\"\n ## The ipaddress module returns an \"IPAddress\" object in Python3...\n return IPv6Network(f\"{self.network_object.compressed}\")\n\n # do NOT wrap with @logger.catch(...)\n # On IPv6Obj()\n @property\n def network_offset(self):\n \"\"\"Returns the integer difference between host number and network number. This must be less than `numhosts`\"\"\"\n offset = self.as_decimal - self.as_decimal_network\n if offset > self.numhosts:\n raise RequirementFailure()\n return offset\n\n # do NOT wrap with @logger.catch(...)\n # On IPv6Obj()\n @network_offset.setter\n def network_offset(self, arg):\n \"\"\"\n Accept an integer network_offset and modify this IPv6Obj() to be 'arg' integer offset from the subnet.\n\n Throw an error if the network_offset would exceed the existing subnet boundary.\n\n Example\n -------\n >>> addr = IPv6Obj(\"2b00:cd80:14:10::1/64\")\n >>> addr.network_offset = 20\n >>> addr\n <IPv6Obj 2b00:cd80:14:10::20/64>\n >>>\n \"\"\"\n if isinstance(arg, (int, str)):\n arg = int(arg)\n # get the max offset for this subnet...\n max_offset = self.as_decimal_network_maxint - self.as_decimal_network\n if arg <= max_offset:\n self.ip_object = IPv6Address(self.as_decimal_network + arg)\n else:\n raise AddressValueError(f\"{self}.network_offset({arg=}) exceeds the boundaries of '{self.as_cidr_net}'\")\n else:\n raise NotImplementedError\n\n # On IPv6Obj()\n @property\n def as_decimal_network(self):\n \"\"\"Returns the integer value of the IP network as a decimal integer; explicitly, if this object represents 2b00:cd80:14:10::1/64, 'as_decimal_network' returns the integer value of 2b00:cd80:14:10::0/64\"\"\"\n num_strings = str(self.network.exploded).split(\"/\")[0].split(\":\")\n num_strings.reverse() # reverse the order\n return sum(\n int(num, 16) * (65536**idx) for idx, num in enumerate(num_strings)\n )\n\n # do NOT wrap with @logger.catch(...)\n # On IPv6Obj()\n @property\n def as_decimal_broadcast(self):\n \"\"\"IPv6 does not support broadcast addresses. Use 'as_decimal_network_maxint' if you want the integer value that would otherwise be an IPv6 broadcast.\"\"\"\n raise NotImplementedError(\"IPv6 does not support broadcast addresses. Use 'as_decimal_network_maxint' if you want the integer value that would otherwise be an IPv6 broadcast.\")\n\n # do NOT wrap with @logger.catch(...)\n # On IPv6Obj()\n @property\n def as_decimal_network_maxint(self):\n \"\"\"Returns the integer value of the maximum value of an IPv6 subnet as a decimal integer; explicitly, if this object represents 2b00:cd80:14:10::0/64, 'as_decimal_network_maxint' returns the integer value of 2b00:cd80:14:10:ffff:ffff:ffff:ffff\"\"\"\n network_maxint_offset = 2 ** (IPV6_MAX_PREFIXLEN - self.network_object.prefixlen) - 1\n return self.as_decimal_network + network_maxint_offset\n\n # On IPv6Obj()\n @property\n def hostmask(self):\n \"\"\"Returns the host mask as an :class:`ipaddress.IPv6Address` object.\"\"\"\n return self.network_object.hostmask\n\n # On IPv6Obj()\n @property\n def max_int(self):\n \"\"\"Return the maximum size of an IPv6 Address object as an integer\"\"\"\n return IPV6_MAXINT\n\n # On IPv6Obj()\n @property\n def inverse_netmask(self):\n \"\"\"Returns the host mask as an :class:`ipaddress.IPv6Address` object.\"\"\"\n return self.network_object.hostmask\n\n # On IPv6Obj()\n @property\n def version(self):\n \"\"\"Returns the IP version of the object as an integer. i.e. 6\"\"\"\n return 6\n\n # On IPv6Obj()\n @property\n def numhosts(self):\n \"\"\"Returns the total number of IP addresses in this network, including broadcast and the \"subnet zero\" address\"\"\"\n if self.prefixlength <= 126:\n return 2 ** (IPV6_MAX_PREFIXLEN - self.network_object.prefixlen) - 2\n elif self.prefixlength == 127:\n # special case... /127 subnet has no broadcast address\n return 2\n elif self.prefixlength == 128:\n return 1\n else:\n # We (obviously) should never hit this...\n raise NotImplementedError\n\n # On IPv6Obj()\n @property\n def as_decimal(self):\n \"\"\"Returns the IP address as a decimal integer\"\"\"\n num_strings = str(self.ip.exploded).split(\":\")\n num_strings.reverse() # reverse the order\n return sum(\n int(num, 16) * (65536**idx) for idx, num in enumerate(num_strings)\n )\n\n # On IPv6Obj()\n def as_int(self):\n \"\"\"Returns the IP address as a decimal integer\"\"\"\n return self.as_decimal\n\n # On IPv6Obj()\n @property\n def as_binary_tuple(self):\n \"\"\"Returns the IPv6 address as a tuple of zero-padded 16-bit binary strings\"\"\"\n result_list = [f\"{int(ii, 16):016b}\" for ii in self.as_hex_tuple]\n return tuple(result_list)\n\n # On IPv6Obj()\n @property\n def as_hex(self):\n \"\"\"Returns the IP address as a hex string\"\"\"\n return hex(self)\n\n # On IPv6Obj()\n @property\n def as_hex_tuple(self):\n \"\"\"Returns the IPv6 address as a tuple of zero-padded 16-bit hex strings\"\"\"\n result_list = str(self.ip.exploded).split(\":\")\n return tuple(result_list)\n\n # On IPv6Obj()\n @property\n def as_cidr_addr(self):\n \"\"\"Returns a string with the address in CIDR notation\"\"\"\n return str(self.ip) + \"/\" + str(self.prefixlen)\n\n # On IPv6Obj()\n @property\n def as_cidr_net(self):\n \"\"\"Returns a string with the network in CIDR notation\"\"\"\n if sys.version_info[0] < 3:\n return str(self.network) + \"/\" + str(self.prefixlen)\n else:\n return str(self.network)\n\n # On IPv6Obj()\n @property\n def is_multicast(self):\n \"\"\"Returns a boolean for whether this is a multicast address\"\"\"\n return self.network_object.is_multicast\n\n # On IPv6Obj()\n @property\n def is_private(self):\n \"\"\"Returns a boolean for whether this is a private address\"\"\"\n return self.network_object.is_private\n\n # On IPv6Obj()\n @property\n def is_reserved(self):\n \"\"\"Returns a boolean for whether this is a reserved address\"\"\"\n return self.network_object.is_reserved\n\n # On IPv6Obj()\n @property\n def is_link_local(self):\n \"\"\"Returns a boolean for whether this is an IPv6 link-local address\"\"\"\n return self.network_object.is_link_local\n\n # On IPv6Obj()\n @property\n def is_site_local(self):\n \"\"\"Returns a boolean for whether this is an IPv6 site-local address\"\"\"\n return self.network_object.is_site_local\n\n # On IPv6Obj()\n @property\n def is_unspecified(self):\n \"\"\"Returns a boolean for whether this address is not otherwise\n classified\"\"\"\n return self.network_object.is_unspecified\n\n # On IPv6Obj()\n @property\n def teredo(self):\n return self.network_object.teredo\n\n # On IPv6Obj()\n @property\n def sixtofour(self):\n return self.network_object.sixtofour" } ]
import sys import os from ciscoconfparse import IPv4Obj, IPv6Obj from loguru import logger
15,356
"""Compare methods on IPv4Obj() and IPv6Obj(). Flag missing methods""" sys.path.insert(0, "../") # add the path to the local git repo copy # from this dev_tools/ directory environ = os.environ['VIRTUAL_ENV'] print("ENV", environ) try: print("PYTHONPATH", str(os.environ['PYTHONPATH'])) except Exception as eee: error = f"{eee}: Could not find PYTHONPATH." logger.error(error) raise OSError(error) v4_list = dir(IPv4Obj("127.0.0.1"))
"""Compare methods on IPv4Obj() and IPv6Obj(). Flag missing methods""" sys.path.insert(0, "../") # add the path to the local git repo copy # from this dev_tools/ directory environ = os.environ['VIRTUAL_ENV'] print("ENV", environ) try: print("PYTHONPATH", str(os.environ['PYTHONPATH'])) except Exception as eee: error = f"{eee}: Could not find PYTHONPATH." logger.error(error) raise OSError(error) v4_list = dir(IPv4Obj("127.0.0.1"))
v6_list = dir(IPv6Obj("::1"))
1
2023-12-01 18:43:27+00:00
24k
zerolink-io/zerolink-python
zerolink/req.py
[ { "identifier": "settings", "path": "zerolink/settings.py", "snippet": " CONFIG_FILE = os.path.join(os.environ[\"APPDATA\"], \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(os.environ[\"HOME\"], \".config\", \"zerolink\", \"config\")\n CONFIG_FILE = os.path.join(\n os.environ[\"HOME\"], \"Library\", \"Application Support\", \"zerolink\", \"config\"\n )\ndef create_config() -> None:\ndef get_config() -> configparser.ConfigParser:\ndef get_config_path() -> str:\ndef get_config_var(var: str) -> str:\ndef write_config_var(var: str, value: str):\ndef write_api_key(api_key: str):\ndef read_api_key() -> Optional[str]:" }, { "identifier": "APIError", "path": "zerolink/exc.py", "snippet": "class APIError(Exception):\n def __init__(self, message: str) -> None:\n self.message = message\n\n def __str__(self) -> str:\n return self.message" }, { "identifier": "AuthenticationError", "path": "zerolink/exc.py", "snippet": "class AuthenticationError(Exception):\n def __init__(self) -> None:\n pass\n\n def __str__(self) -> str:\n return \"No API key. Please run `zerolink key` or set the ZEROLINK_API_KEY environment variable\"" }, { "identifier": "Client", "path": "zerolink_client/client.py", "snippet": "class Client:\n \"\"\"A class for keeping track of data related to the API\n\n The following are accepted as keyword arguments and will be used to construct httpx Clients internally:\n\n ``base_url``: The base URL for the API, all requests are made to a relative path to this URL\n\n ``cookies``: A dictionary of cookies to be sent with every request\n\n ``headers``: A dictionary of headers to be sent with every request\n\n ``timeout``: The maximum amount of a time a request can take. API functions will raise\n httpx.TimeoutException if this is exceeded.\n\n ``verify_ssl``: Whether or not to verify the SSL certificate of the API server. This should be True in production,\n but can be set to False for testing purposes.\n\n ``follow_redirects``: Whether or not to follow redirects. Default value is False.\n\n ``httpx_args``: A dictionary of additional arguments to be passed to the ``httpx.Client`` and ``httpx.AsyncClient`` constructor.\n\n\n Attributes:\n raise_on_unexpected_status: Whether or not to raise an errors.UnexpectedStatus if the API returns a\n status code that was not documented in the source OpenAPI document. Can also be provided as a keyword\n argument to the constructor.\n \"\"\"\n\n raise_on_unexpected_status: bool = field(default=False, kw_only=True)\n _base_url: str\n _cookies: Dict[str, str] = field(factory=dict, kw_only=True)\n _headers: Dict[str, str] = field(factory=dict, kw_only=True)\n _timeout: Optional[httpx.Timeout] = field(default=None, kw_only=True)\n _verify_ssl: Union[str, bool, ssl.SSLContext] = field(default=True, kw_only=True)\n _follow_redirects: bool = field(default=False, kw_only=True)\n _httpx_args: Dict[str, Any] = field(factory=dict, kw_only=True)\n _client: Optional[httpx.Client] = field(default=None, init=False)\n _async_client: Optional[httpx.AsyncClient] = field(default=None, init=False)\n\n def with_headers(self, headers: Dict[str, str]) -> \"Client\":\n \"\"\"Get a new client matching this one with additional headers\"\"\"\n if self._client is not None:\n self._client.headers.update(headers)\n if self._async_client is not None:\n self._async_client.headers.update(headers)\n return evolve(self, headers={**self._headers, **headers})\n\n def with_cookies(self, cookies: Dict[str, str]) -> \"Client\":\n \"\"\"Get a new client matching this one with additional cookies\"\"\"\n if self._client is not None:\n self._client.cookies.update(cookies)\n if self._async_client is not None:\n self._async_client.cookies.update(cookies)\n return evolve(self, cookies={**self._cookies, **cookies})\n\n def with_timeout(self, timeout: httpx.Timeout) -> \"Client\":\n \"\"\"Get a new client matching this one with a new timeout (in seconds)\"\"\"\n if self._client is not None:\n self._client.timeout = timeout\n if self._async_client is not None:\n self._async_client.timeout = timeout\n return evolve(self, timeout=timeout)\n\n def set_httpx_client(self, client: httpx.Client) -> \"Client\":\n \"\"\"Manually the underlying httpx.Client\n\n **NOTE**: This will override any other settings on the client, including cookies, headers, and timeout.\n \"\"\"\n self._client = client\n return self\n\n def get_httpx_client(self) -> httpx.Client:\n \"\"\"Get the underlying httpx.Client, constructing a new one if not previously set\"\"\"\n if self._client is None:\n self._client = httpx.Client(\n base_url=self._base_url,\n cookies=self._cookies,\n headers=self._headers,\n timeout=self._timeout,\n verify=self._verify_ssl,\n follow_redirects=self._follow_redirects,\n **self._httpx_args,\n )\n return self._client\n\n def __enter__(self) -> \"Client\":\n \"\"\"Enter a context manager for self.client—you cannot enter twice (see httpx docs)\"\"\"\n self.get_httpx_client().__enter__()\n return self\n\n def __exit__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Exit a context manager for internal httpx.Client (see httpx docs)\"\"\"\n self.get_httpx_client().__exit__(*args, **kwargs)\n\n def set_async_httpx_client(self, async_client: httpx.AsyncClient) -> \"Client\":\n \"\"\"Manually the underlying httpx.AsyncClient\n\n **NOTE**: This will override any other settings on the client, including cookies, headers, and timeout.\n \"\"\"\n self._async_client = async_client\n return self\n\n def get_async_httpx_client(self) -> httpx.AsyncClient:\n \"\"\"Get the underlying httpx.AsyncClient, constructing a new one if not previously set\"\"\"\n if self._async_client is None:\n self._async_client = httpx.AsyncClient(\n base_url=self._base_url,\n cookies=self._cookies,\n headers=self._headers,\n timeout=self._timeout,\n verify=self._verify_ssl,\n follow_redirects=self._follow_redirects,\n **self._httpx_args,\n )\n return self._async_client\n\n async def __aenter__(self) -> \"Client\":\n \"\"\"Enter a context manager for underlying httpx.AsyncClient—you cannot enter twice (see httpx docs)\"\"\"\n await self.get_async_httpx_client().__aenter__()\n return self\n\n async def __aexit__(self, *args: Any, **kwargs: Any) -> None:\n \"\"\"Exit a context manager for underlying httpx.AsyncClient (see httpx docs)\"\"\"\n await self.get_async_httpx_client().__aexit__(*args, **kwargs)" }, { "identifier": "finetune", "path": "zerolink_client/api/default/finetune.py", "snippet": "def _get_kwargs(\n *,\n file: Union[File, str],\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[CreateTuneJobResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[CreateTuneJobResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n file: Union[File, str],\n) -> Response[Union[CreateTuneJobResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n file: Union[File, str],\n) -> Optional[Union[CreateTuneJobResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n file: Union[File, str],\n) -> Response[Union[CreateTuneJobResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n file: Union[File, str],\n) -> Optional[Union[CreateTuneJobResponse, HTTPValidationError]]:" }, { "identifier": "get_models_models_get", "path": "zerolink_client/api/default/get_models_models_get.py", "snippet": "def _get_kwargs() -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[ModelList]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[ModelList]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[ModelList]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[ModelList]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[ModelList]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[ModelList]:" }, { "identifier": "desc_entity_id", "path": "zerolink_client/api/entity/desc_entity_id.py", "snippet": "def _get_kwargs(\n id: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[Entity, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[Entity, HTTPValidationError]]:\ndef sync_detailed(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[Entity, HTTPValidationError]]:\ndef sync(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[Entity, HTTPValidationError]]:\nasync def asyncio_detailed(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[Entity, HTTPValidationError]]:\nasync def asyncio(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[Entity, HTTPValidationError]]:" }, { "identifier": "desc_entity_ontology", "path": "zerolink_client/api/entity/desc_entity_ontology.py", "snippet": "def _get_kwargs(\n id: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[Any, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[Any, HTTPValidationError]]:\ndef sync_detailed(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[Any, HTTPValidationError]]:\ndef sync(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[Any, HTTPValidationError]]:\nasync def asyncio_detailed(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[Any, HTTPValidationError]]:\nasync def asyncio(\n id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[Any, HTTPValidationError]]:" }, { "identifier": "lookup_entity", "path": "zerolink_client/api/entity/lookup_entity.py", "snippet": "def _get_kwargs(\n name: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"Entity\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"Entity\"]]]:\ndef sync_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"Entity\"]]]:\ndef sync(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"Entity\"]]]:\nasync def asyncio_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"Entity\"]]]:\nasync def asyncio(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"Entity\"]]]:" }, { "identifier": "lookup_relation", "path": "zerolink_client/api/entity/lookup_relation.py", "snippet": "def _get_kwargs(\n name: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"Relation\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"Relation\"]]]:\ndef sync_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"Relation\"]]]:\ndef sync(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"Relation\"]]]:\nasync def asyncio_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"Relation\"]]]:\nasync def asyncio(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"Relation\"]]]:" }, { "identifier": "search_entity", "path": "zerolink_client/api/entity/search_entity.py", "snippet": "def _get_kwargs(\n name: str,\n *,\n limit: Union[Unset, int] = 10,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"Match\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"Match\"]]]:\ndef sync_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n) -> Response[Union[HTTPValidationError, List[\"Match\"]]]:\ndef sync(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n) -> Optional[Union[HTTPValidationError, List[\"Match\"]]]:\nasync def asyncio_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n) -> Response[Union[HTTPValidationError, List[\"Match\"]]]:\nasync def asyncio(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n) -> Optional[Union[HTTPValidationError, List[\"Match\"]]]:" }, { "identifier": "extract_text", "path": "zerolink_client/api/extract/extract_text.py", "snippet": "def _get_kwargs(\n *,\n body: TextExtract,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[AssertionResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[AssertionResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: TextExtract,\n session_id: int,\n) -> Response[Union[AssertionResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: TextExtract,\n session_id: int,\n) -> Optional[Union[AssertionResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: TextExtract,\n session_id: int,\n) -> Response[Union[AssertionResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: TextExtract,\n session_id: int,\n) -> Optional[Union[AssertionResponse, HTTPValidationError]]:" }, { "identifier": "create_userattribute", "path": "zerolink_client/api/fact/create_userattribute.py", "snippet": "def _get_kwargs(\n *,\n body: CreateAttribute,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[GenericResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[GenericResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateAttribute,\n session_id: int,\n) -> Response[Union[GenericResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateAttribute,\n session_id: int,\n) -> Optional[Union[GenericResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateAttribute,\n session_id: int,\n) -> Response[Union[GenericResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateAttribute,\n session_id: int,\n) -> Optional[Union[GenericResponse, HTTPValidationError]]:" }, { "identifier": "create_userentity", "path": "zerolink_client/api/fact/create_userentity.py", "snippet": "def _get_kwargs(\n *,\n body: CreateEntity,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[CreateEntityResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[CreateEntityResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateEntity,\n session_id: int,\n) -> Response[Union[CreateEntityResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateEntity,\n session_id: int,\n) -> Optional[Union[CreateEntityResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateEntity,\n session_id: int,\n) -> Response[Union[CreateEntityResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateEntity,\n session_id: int,\n) -> Optional[Union[CreateEntityResponse, HTTPValidationError]]:" }, { "identifier": "create_userrule", "path": "zerolink_client/api/fact/create_userrule.py", "snippet": "def _get_kwargs(\n *,\n body: CreateRule,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[CreateRuleResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[CreateRuleResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateRule,\n session_id: int,\n) -> Response[Union[CreateRuleResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateRule,\n session_id: int,\n) -> Optional[Union[CreateRuleResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateRule,\n session_id: int,\n) -> Response[Union[CreateRuleResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateRule,\n session_id: int,\n) -> Optional[Union[CreateRuleResponse, HTTPValidationError]]:" }, { "identifier": "create_usertriple", "path": "zerolink_client/api/fact/create_usertriple.py", "snippet": "def _get_kwargs(\n *,\n body: CreateTriple,\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[CreateFactResponse, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[CreateFactResponse, HTTPValidationError]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateTriple,\n session_id: int,\n) -> Response[Union[CreateFactResponse, HTTPValidationError]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateTriple,\n session_id: int,\n) -> Optional[Union[CreateFactResponse, HTTPValidationError]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateTriple,\n session_id: int,\n) -> Response[Union[CreateFactResponse, HTTPValidationError]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: CreateTriple,\n session_id: int,\n) -> Optional[Union[CreateFactResponse, HTTPValidationError]]:" }, { "identifier": "get_triple", "path": "zerolink_client/api/kg/get_triple.py", "snippet": "def _get_kwargs(\n name: str,\n *,\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"Triple\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"Triple\"]]]:\ndef sync_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Response[Union[HTTPValidationError, List[\"Triple\"]]]:\ndef sync(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Optional[Union[HTTPValidationError, List[\"Triple\"]]]:\nasync def asyncio_detailed(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Response[Union[HTTPValidationError, List[\"Triple\"]]]:\nasync def asyncio(\n name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n limit: Union[Unset, int] = 10,\n threshold: Union[Unset, float] = 0.3,\n) -> Optional[Union[HTTPValidationError, List[\"Triple\"]]]:" }, { "identifier": "post_question", "path": "zerolink_client/api/question/post_question.py", "snippet": "def _get_kwargs(\n *,\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, QuestionResponse]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, QuestionResponse]]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Response[Union[HTTPValidationError, QuestionResponse]]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Optional[Union[HTTPValidationError, QuestionResponse]]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Response[Union[HTTPValidationError, QuestionResponse]]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n body: Question,\n session_id: Union[Unset, int] = UNSET,\n) -> Optional[Union[HTTPValidationError, QuestionResponse]]:" }, { "identifier": "create_session", "path": "zerolink_client/api/session/create_session.py", "snippet": "def _get_kwargs(\n user_id: str,\n *,\n name: Union[Unset, str] = UNSET,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[ChatSession, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[ChatSession, HTTPValidationError]]:\ndef sync_detailed(\n user_id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n name: Union[Unset, str] = UNSET,\n) -> Response[Union[ChatSession, HTTPValidationError]]:\ndef sync(\n user_id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n name: Union[Unset, str] = UNSET,\n) -> Optional[Union[ChatSession, HTTPValidationError]]:\nasync def asyncio_detailed(\n user_id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n name: Union[Unset, str] = UNSET,\n) -> Response[Union[ChatSession, HTTPValidationError]]:\nasync def asyncio(\n user_id: str,\n *,\n client: Union[AuthenticatedClient, Client],\n name: Union[Unset, str] = UNSET,\n) -> Optional[Union[ChatSession, HTTPValidationError]]:" }, { "identifier": "get_session_entities", "path": "zerolink_client/api/session/get_session_entities.py", "snippet": "def _get_kwargs(\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\ndef sync_detailed(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\ndef sync(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\nasync def asyncio_detailed(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"GenericEntity\"]]]:\nasync def asyncio(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"GenericEntity\"]]]:" }, { "identifier": "get_session_facts", "path": "zerolink_client/api/session/get_session_facts.py", "snippet": "def _get_kwargs(\n session_id: int,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\ndef sync_detailed(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\ndef sync(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\nasync def asyncio_detailed(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[HTTPValidationError, List[\"GenericTriple\"]]]:\nasync def asyncio(\n session_id: int,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[HTTPValidationError, List[\"GenericTriple\"]]]:" }, { "identifier": "get_user_session", "path": "zerolink_client/api/session/get_user_session.py", "snippet": "def _get_kwargs(\n user_id: str,\n session_name: str,\n) -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[Union[ChatSession, HTTPValidationError]]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[Union[ChatSession, HTTPValidationError]]:\ndef sync_detailed(\n user_id: str,\n session_name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[ChatSession, HTTPValidationError]]:\ndef sync(\n user_id: str,\n session_name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[ChatSession, HTTPValidationError]]:\nasync def asyncio_detailed(\n user_id: str,\n session_name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[Union[ChatSession, HTTPValidationError]]:\nasync def asyncio(\n user_id: str,\n session_name: str,\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[Union[ChatSession, HTTPValidationError]]:" }, { "identifier": "create_user", "path": "zerolink_client/api/user/create_user.py", "snippet": "def _get_kwargs() -> Dict[str, Any]:\ndef _parse_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Optional[CreateUser]:\ndef _build_response(*, client: Union[AuthenticatedClient, Client], response: httpx.Response) -> Response[CreateUser]:\ndef sync_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[CreateUser]:\ndef sync(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[CreateUser]:\nasync def asyncio_detailed(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Response[CreateUser]:\nasync def asyncio(\n *,\n client: Union[AuthenticatedClient, Client],\n) -> Optional[CreateUser]:" }, { "identifier": "ChatSession", "path": "zerolink_client/models/chat_session.py", "snippet": "class ChatSession:\n \"\"\"A user chat session.\n\n Attributes:\n id (int):\n name (str): The name of the chat session\n index (int):\n requests (List['Req']):\n responses (List['Rep']):\n created_on (datetime.datetime):\n \"\"\"\n\n id: int\n name: str\n index: int\n requests: List[\"Req\"]\n responses: List[\"Rep\"]\n created_on: datetime.datetime\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.rep import Rep\n from ..models.req import Req\n\n id = self.id\n\n name = self.name\n\n index = self.index\n\n requests = []\n for requests_item_data in self.requests:\n requests_item = requests_item_data.to_dict()\n requests.append(requests_item)\n\n responses = []\n for responses_item_data in self.responses:\n responses_item = responses_item_data.to_dict()\n responses.append(responses_item)\n\n created_on = self.created_on.isoformat()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n \"name\": name,\n \"index\": index,\n \"requests\": requests,\n \"responses\": responses,\n \"created_on\": created_on,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.rep import Rep\n from ..models.req import Req\n\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n name = d.pop(\"name\")\n\n index = d.pop(\"index\")\n\n requests = []\n _requests = d.pop(\"requests\")\n for requests_item_data in _requests:\n requests_item = Req.from_dict(requests_item_data)\n\n requests.append(requests_item)\n\n responses = []\n _responses = d.pop(\"responses\")\n for responses_item_data in _responses:\n responses_item = Rep.from_dict(responses_item_data)\n\n responses.append(responses_item)\n\n created_on = isoparse(d.pop(\"created_on\"))\n\n chat_session = cls(\n id=id,\n name=name,\n index=index,\n requests=requests,\n responses=responses,\n created_on=created_on,\n )\n\n chat_session.additional_properties = d\n return chat_session\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateAttribute", "path": "zerolink_client/models/create_attribute.py", "snippet": "class CreateAttribute:\n \"\"\"\n Attributes:\n subject (str): EID of a builtin entity\n predicate (str): Name of attribute\n attribute (Attribute):\n \"\"\"\n\n subject: str\n predicate: str\n attribute: \"Attribute\"\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.attribute import Attribute\n\n subject = self.subject\n\n predicate = self.predicate\n\n attribute = self.attribute.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"subject\": subject,\n \"predicate\": predicate,\n \"attribute\": attribute,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.attribute import Attribute\n\n d = src_dict.copy()\n subject = d.pop(\"subject\")\n\n predicate = d.pop(\"predicate\")\n\n attribute = Attribute.from_dict(d.pop(\"attribute\"))\n\n create_attribute = cls(\n subject=subject,\n predicate=predicate,\n attribute=attribute,\n )\n\n create_attribute.additional_properties = d\n return create_attribute\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateEntity", "path": "zerolink_client/models/create_entity.py", "snippet": "class CreateEntity:\n \"\"\"\n Attributes:\n entity (str): Name of entity\n entity_type (Union[Unset, EntityType]): Entity types are entities that map to base ontological entities in\n Foundation.\n entity_str (Union[Unset, str]): User specified type\n is_class (Union[Unset, bool]): Whether the entity is a class or instance Default: False.\n \"\"\"\n\n entity: str\n entity_type: Union[Unset, EntityType] = UNSET\n entity_str: Union[Unset, str] = UNSET\n is_class: Union[Unset, bool] = False\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n entity = self.entity\n\n entity_type: Union[Unset, str] = UNSET\n if not isinstance(self.entity_type, Unset):\n entity_type = self.entity_type.value\n\n entity_str = self.entity_str\n\n is_class = self.is_class\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"entity\": entity,\n }\n )\n if entity_type is not UNSET:\n field_dict[\"entity_type\"] = entity_type\n if entity_str is not UNSET:\n field_dict[\"entity_str\"] = entity_str\n if is_class is not UNSET:\n field_dict[\"is_class\"] = is_class\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n entity = d.pop(\"entity\")\n\n _entity_type = d.pop(\"entity_type\", UNSET)\n entity_type: Union[Unset, EntityType]\n if isinstance(_entity_type, Unset):\n entity_type = UNSET\n else:\n entity_type = EntityType(_entity_type)\n\n entity_str = d.pop(\"entity_str\", UNSET)\n\n is_class = d.pop(\"is_class\", UNSET)\n\n create_entity = cls(\n entity=entity,\n entity_type=entity_type,\n entity_str=entity_str,\n is_class=is_class,\n )\n\n create_entity.additional_properties = d\n return create_entity\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateRule", "path": "zerolink_client/models/create_rule.py", "snippet": "class CreateRule:\n \"\"\"\n Attributes:\n rule (str): Textual representation of the rule to parse\n context (Union[Unset, CreateRuleContext]): Context of entities to use for parsing the rule\n \"\"\"\n\n rule: str\n context: Union[Unset, \"CreateRuleContext\"] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.create_rule_context import CreateRuleContext\n\n rule = self.rule\n\n context: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.context, Unset):\n context = self.context.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"rule\": rule,\n }\n )\n if context is not UNSET:\n field_dict[\"context\"] = context\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.create_rule_context import CreateRuleContext\n\n d = src_dict.copy()\n rule = d.pop(\"rule\")\n\n _context = d.pop(\"context\", UNSET)\n context: Union[Unset, CreateRuleContext]\n if isinstance(_context, Unset):\n context = UNSET\n else:\n context = CreateRuleContext.from_dict(_context)\n\n create_rule = cls(\n rule=rule,\n context=context,\n )\n\n create_rule.additional_properties = d\n return create_rule\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateRuleResponse", "path": "zerolink_client/models/create_rule_response.py", "snippet": "class CreateRuleResponse:\n \"\"\"\n Attributes:\n id (str):\n \"\"\"\n\n id: str\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n id = self.id\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n create_rule_response = cls(\n id=id,\n )\n\n create_rule_response.additional_properties = d\n return create_rule_response\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateTriple", "path": "zerolink_client/models/create_triple.py", "snippet": "class CreateTriple:\n \"\"\"\n Attributes:\n predicate (str): Name of predicate relation\n user_subject (Union[Unset, str]): EID of a user entity\n subject (Union[Unset, str]): EID of a builtin entity\n user_object (Union[Unset, str]): EID of a user entity\n object_ (Union[Unset, str]): EID of a builtin entity\n \"\"\"\n\n predicate: str\n user_subject: Union[Unset, str] = UNSET\n subject: Union[Unset, str] = UNSET\n user_object: Union[Unset, str] = UNSET\n object_: Union[Unset, str] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n predicate = self.predicate\n\n user_subject = self.user_subject\n\n subject = self.subject\n\n user_object = self.user_object\n\n object_ = self.object_\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"predicate\": predicate,\n }\n )\n if user_subject is not UNSET:\n field_dict[\"user_subject\"] = user_subject\n if subject is not UNSET:\n field_dict[\"subject\"] = subject\n if user_object is not UNSET:\n field_dict[\"user_object\"] = user_object\n if object_ is not UNSET:\n field_dict[\"object\"] = object_\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n predicate = d.pop(\"predicate\")\n\n user_subject = d.pop(\"user_subject\", UNSET)\n\n subject = d.pop(\"subject\", UNSET)\n\n user_object = d.pop(\"user_object\", UNSET)\n\n object_ = d.pop(\"object\", UNSET)\n\n create_triple = cls(\n predicate=predicate,\n user_subject=user_subject,\n subject=subject,\n user_object=user_object,\n object_=object_,\n )\n\n create_triple.additional_properties = d\n return create_triple\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "CreateTuneJobResponse", "path": "zerolink_client/models/create_tune_job_response.py", "snippet": "class CreateTuneJobResponse:\n \"\"\"\n Attributes:\n id (str):\n status (str):\n \"\"\"\n\n id: str\n status: str\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n id = self.id\n\n status = self.status\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n \"status\": status,\n }\n )\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n status = d.pop(\"status\")\n\n create_tune_job_response = cls(\n id=id,\n status=status,\n )\n\n create_tune_job_response.additional_properties = d\n return create_tune_job_response\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "Entity", "path": "zerolink_client/models/entity.py", "snippet": "class Entity:\n \"\"\"\n Attributes:\n id (str):\n entity (str):\n description (Union[Unset, str]):\n source (Union[Unset, str]):\n source_url (Union[Unset, str]):\n ontology (Union[Unset, Graph]):\n source_id (Union[Unset, str]):\n \"\"\"\n\n id: str\n entity: str\n description: Union[Unset, str] = UNSET\n source: Union[Unset, str] = UNSET\n source_url: Union[Unset, str] = UNSET\n ontology: Union[Unset, \"Graph\"] = UNSET\n source_id: Union[Unset, str] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.graph import Graph\n\n id = self.id\n\n entity = self.entity\n\n description = self.description\n\n source = self.source\n\n source_url = self.source_url\n\n ontology: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.ontology, Unset):\n ontology = self.ontology.to_dict()\n\n source_id = self.source_id\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n \"entity\": entity,\n }\n )\n if description is not UNSET:\n field_dict[\"description\"] = description\n if source is not UNSET:\n field_dict[\"source\"] = source\n if source_url is not UNSET:\n field_dict[\"source_url\"] = source_url\n if ontology is not UNSET:\n field_dict[\"ontology\"] = ontology\n if source_id is not UNSET:\n field_dict[\"source_id\"] = source_id\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.graph import Graph\n\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n entity = d.pop(\"entity\")\n\n description = d.pop(\"description\", UNSET)\n\n source = d.pop(\"source\", UNSET)\n\n source_url = d.pop(\"source_url\", UNSET)\n\n _ontology = d.pop(\"ontology\", UNSET)\n ontology: Union[Unset, Graph]\n if isinstance(_ontology, Unset):\n ontology = UNSET\n else:\n ontology = Graph.from_dict(_ontology)\n\n source_id = d.pop(\"source_id\", UNSET)\n\n entity = cls(\n id=id,\n entity=entity,\n description=description,\n source=source,\n source_url=source_url,\n ontology=ontology,\n source_id=source_id,\n )\n\n entity.additional_properties = d\n return entity\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "HTTPValidationError", "path": "zerolink_client/models/http_validation_error.py", "snippet": "class HTTPValidationError:\n \"\"\"\n Attributes:\n detail (Union[Unset, List['ValidationError']]):\n \"\"\"\n\n detail: Union[Unset, List[\"ValidationError\"]] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.validation_error import ValidationError\n\n detail: Union[Unset, List[Dict[str, Any]]] = UNSET\n if not isinstance(self.detail, Unset):\n detail = []\n for detail_item_data in self.detail:\n detail_item = detail_item_data.to_dict()\n detail.append(detail_item)\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update({})\n if detail is not UNSET:\n field_dict[\"detail\"] = detail\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.validation_error import ValidationError\n\n d = src_dict.copy()\n detail = []\n _detail = d.pop(\"detail\", UNSET)\n for detail_item_data in _detail or []:\n detail_item = ValidationError.from_dict(detail_item_data)\n\n detail.append(detail_item)\n\n http_validation_error = cls(\n detail=detail,\n )\n\n http_validation_error.additional_properties = d\n return http_validation_error\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "Question", "path": "zerolink_client/models/question.py", "snippet": "class Question:\n \"\"\"A question to be answered by querying the knowledge graph and reasoner.\n\n Attributes:\n body (str): The body of the question\n world (Union[Unset, WorldAssumption]): The world assumption is the assumption about the world that the reasoner\n makes. This is used to determine the answer to a query. For example, if\n the world assumption is \"closed\" then the reasoner will assume that the\n answer to the query is \"no\" if it cannot find a triple to satisfy the\n query. Default: WorldAssumption.CLOSED.\n spatial (Union[Unset, SpatialAssumption]): The spatial assumption is the assumption about space that the\n reasoner\n makes. This is used to determine the answer to a query. For example, if the\n spatial assumption is \"earth\" then the reasoner will only consider\n geographic locations on Earth and will assume all instances of 'location'\n are on Earth. If the spatial assumption is \"universe\" then the reasoner\n then this restriction is lifted and the reasoner will consider all\n locations in the universe. Default: SpatialAssumption.EARTH.\n temporal (Union[Unset, TemporalAssumption]): The temporal assumption is the assumption about time that the\n reasoner\n makes. This is used to determine the answer to a query. For example, if\n the temporal assumption is \"current\" then the reasoner will only consider\n triples that refer to entities that are non-historical. Excluding things\n like the Roman Empire and Francoist Spain. Default: TemporalAssumption.CURRENT.\n context (Union[Unset, ContextAssumption]): The context assumption is the assumption about the context that the\n reasoner makes. This is used to determine the answer to a query. For\n example, if the context assumption is \"none\" then the reasoner will only\n consider basic triples like instance_of and subclass_of. If the context\n assumption is \"local\" then the reasoner will consider triples that are\n defined by the user. If the context assumption is \"global\" then the\n reasoner will consider all queryable triples. Default: ContextAssumption.GLOBAL.\n \"\"\"\n\n body: str\n world: Union[Unset, WorldAssumption] = WorldAssumption.CLOSED\n spatial: Union[Unset, SpatialAssumption] = SpatialAssumption.EARTH\n temporal: Union[Unset, TemporalAssumption] = TemporalAssumption.CURRENT\n context: Union[Unset, ContextAssumption] = ContextAssumption.GLOBAL\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n body = self.body\n\n world: Union[Unset, str] = UNSET\n if not isinstance(self.world, Unset):\n world = self.world.value\n\n spatial: Union[Unset, str] = UNSET\n if not isinstance(self.spatial, Unset):\n spatial = self.spatial.value\n\n temporal: Union[Unset, str] = UNSET\n if not isinstance(self.temporal, Unset):\n temporal = self.temporal.value\n\n context: Union[Unset, str] = UNSET\n if not isinstance(self.context, Unset):\n context = self.context.value\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"body\": body,\n }\n )\n if world is not UNSET:\n field_dict[\"world\"] = world\n if spatial is not UNSET:\n field_dict[\"spatial\"] = spatial\n if temporal is not UNSET:\n field_dict[\"temporal\"] = temporal\n if context is not UNSET:\n field_dict[\"context\"] = context\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n body = d.pop(\"body\")\n\n _world = d.pop(\"world\", UNSET)\n world: Union[Unset, WorldAssumption]\n if isinstance(_world, Unset):\n world = UNSET\n else:\n world = WorldAssumption(_world)\n\n _spatial = d.pop(\"spatial\", UNSET)\n spatial: Union[Unset, SpatialAssumption]\n if isinstance(_spatial, Unset):\n spatial = UNSET\n else:\n spatial = SpatialAssumption(_spatial)\n\n _temporal = d.pop(\"temporal\", UNSET)\n temporal: Union[Unset, TemporalAssumption]\n if isinstance(_temporal, Unset):\n temporal = UNSET\n else:\n temporal = TemporalAssumption(_temporal)\n\n _context = d.pop(\"context\", UNSET)\n context: Union[Unset, ContextAssumption]\n if isinstance(_context, Unset):\n context = UNSET\n else:\n context = ContextAssumption(_context)\n\n question = cls(\n body=body,\n world=world,\n spatial=spatial,\n temporal=temporal,\n context=context,\n )\n\n question.additional_properties = d\n return question\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "QuestionResponse", "path": "zerolink_client/models/question_response.py", "snippet": "class QuestionResponse:\n \"\"\"A response to a question request.\n\n Attributes:\n id (int): The ID of the question\n msg (str): A message describing the result of the question\n status (ResultStatus): The status of a result.\n answers (List[str]): The answers to the question\n methods (List[str]): The methods used to answer the question\n reasoners (List[str]): The reasoners used to answer the question\n query (Union[Unset, QuestionResponseQuery]): The query used to answer the question\n \"\"\"\n\n id: int\n msg: str\n status: ResultStatus\n answers: List[str]\n methods: List[str]\n reasoners: List[str]\n query: Union[Unset, \"QuestionResponseQuery\"] = UNSET\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n from ..models.question_response_query import QuestionResponseQuery\n\n id = self.id\n\n msg = self.msg\n\n status = self.status.value\n\n answers = self.answers\n\n methods = self.methods\n\n reasoners = self.reasoners\n\n query: Union[Unset, Dict[str, Any]] = UNSET\n if not isinstance(self.query, Unset):\n query = self.query.to_dict()\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"id\": id,\n \"msg\": msg,\n \"status\": status,\n \"answers\": answers,\n \"methods\": methods,\n \"reasoners\": reasoners,\n }\n )\n if query is not UNSET:\n field_dict[\"query\"] = query\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n from ..models.question_response_query import QuestionResponseQuery\n\n d = src_dict.copy()\n id = d.pop(\"id\")\n\n msg = d.pop(\"msg\")\n\n status = ResultStatus(d.pop(\"status\"))\n\n answers = cast(List[str], d.pop(\"answers\"))\n\n methods = cast(List[str], d.pop(\"methods\"))\n\n reasoners = cast(List[str], d.pop(\"reasoners\"))\n\n _query = d.pop(\"query\", UNSET)\n query: Union[Unset, QuestionResponseQuery]\n if isinstance(_query, Unset):\n query = UNSET\n else:\n query = QuestionResponseQuery.from_dict(_query)\n\n question_response = cls(\n id=id,\n msg=msg,\n status=status,\n answers=answers,\n methods=methods,\n reasoners=reasoners,\n query=query,\n )\n\n question_response.additional_properties = d\n return question_response\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "TextExtract", "path": "zerolink_client/models/text_extract.py", "snippet": "class TextExtract:\n \"\"\"\n Attributes:\n text (str): Text to extract from\n extraction_model (Union[Unset, ExtractModel]): An enumeration. Default: ExtractModel.BASE.\n \"\"\"\n\n text: str\n extraction_model: Union[Unset, ExtractModel] = ExtractModel.BASE\n additional_properties: Dict[str, Any] = _attrs_field(init=False, factory=dict)\n\n def to_dict(self) -> Dict[str, Any]:\n text = self.text\n\n extraction_model: Union[Unset, str] = UNSET\n if not isinstance(self.extraction_model, Unset):\n extraction_model = self.extraction_model.value\n\n field_dict: Dict[str, Any] = {}\n field_dict.update(self.additional_properties)\n field_dict.update(\n {\n \"text\": text,\n }\n )\n if extraction_model is not UNSET:\n field_dict[\"extraction_model\"] = extraction_model\n\n return field_dict\n\n @classmethod\n def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:\n d = src_dict.copy()\n text = d.pop(\"text\")\n\n _extraction_model = d.pop(\"extraction_model\", UNSET)\n extraction_model: Union[Unset, ExtractModel]\n if isinstance(_extraction_model, Unset):\n extraction_model = UNSET\n else:\n extraction_model = ExtractModel(_extraction_model)\n\n text_extract = cls(\n text=text,\n extraction_model=extraction_model,\n )\n\n text_extract.additional_properties = d\n return text_extract\n\n @property\n def additional_keys(self) -> List[str]:\n return list(self.additional_properties.keys())\n\n def __getitem__(self, key: str) -> Any:\n return self.additional_properties[key]\n\n def __setitem__(self, key: str, value: Any) -> None:\n self.additional_properties[key] = value\n\n def __delitem__(self, key: str) -> None:\n del self.additional_properties[key]\n\n def __contains__(self, key: str) -> bool:\n return key in self.additional_properties" }, { "identifier": "File", "path": "zerolink_client/types.py", "snippet": "class File:\n \"\"\"Contains information for file uploads\"\"\"\n\n payload: BinaryIO\n file_name: Optional[str] = None\n mime_type: Optional[str] = None\n\n def to_tuple(self) -> FileJsonType:\n \"\"\"Return a tuple representation that httpx will accept for multipart/form-data\"\"\"\n return self.file_name, self.payload, self.mime_type" }, { "identifier": "UNSET", "path": "zerolink_client/types.py", "snippet": "UNSET: Unset = Unset()" } ]
from typing import Any, Optional, cast from zerolink import settings from zerolink.exc import APIError, AuthenticationError from zerolink_client import Client from zerolink_client.api.default import finetune, get_models_models_get from zerolink_client.api.entity import ( desc_entity_id, desc_entity_ontology, lookup_entity, lookup_relation, search_entity, ) from zerolink_client.api.extract import extract_text from zerolink_client.api.fact import ( create_userattribute, create_userentity, create_userrule, create_usertriple, ) from zerolink_client.api.kg import get_triple from zerolink_client.api.question import post_question from zerolink_client.api.session import ( create_session, get_session_entities, get_session_facts, get_user_session, ) from zerolink_client.api.user import create_user from zerolink_client.models import ( ChatSession, CreateAttribute, CreateEntity, CreateRule, CreateRuleResponse, CreateTriple, CreateTuneJobResponse, Entity, HTTPValidationError, Question, QuestionResponse, TextExtract, ) from zerolink_client.types import File, UNSET
16,267
# ------------------------------------------------------------------------ # Endpoints # ------------------------------------------------------------------------ client = Client( base_url=settings.server_url, raise_on_unexpected_status=False, ) def check_api_key() -> None: """ Check if the API key is set. """ if settings.api_key is None: raise AuthenticationError() else: pass def get_user_id() -> str: """ Get the user ID from the server. Only used for Demo server. """ client._headers["Authorization"] = settings.api_key rep = create_user.sync(client=client) if rep is None: raise Exception("Failed to authenticate.") settings.api_key = rep.user_id if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep.user_id def post_session(user_id: str, **kwargs) -> Optional[ChatSession]: """ Create a new session. """ check_api_key() if user_id is None: user_id = settings.api_key rep = create_session.sync(client=client, user_id=user_id, **kwargs) if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep def get_session_name(user_id: str, session_name: str, **kwargs): """ Lookup a session by user and name. """ check_api_key() rep = get_user_session.sync_detailed(user_id, session_name, client=client, **kwargs) if rep.status_code == 200: return rep.parsed elif rep.status_code == 404: return None else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_entities_list(session_id: int, **kwargs): """ Get the entities of a session. """ check_api_key() rep = get_session_entities.sync_detailed(session_id, client=client, **kwargs) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_facts_list(session_id: int, **kwargs): """ Get the facts of a session. """ check_api_key()
# ------------------------------------------------------------------------ # Endpoints # ------------------------------------------------------------------------ client = Client( base_url=settings.server_url, raise_on_unexpected_status=False, ) def check_api_key() -> None: """ Check if the API key is set. """ if settings.api_key is None: raise AuthenticationError() else: pass def get_user_id() -> str: """ Get the user ID from the server. Only used for Demo server. """ client._headers["Authorization"] = settings.api_key rep = create_user.sync(client=client) if rep is None: raise Exception("Failed to authenticate.") settings.api_key = rep.user_id if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep.user_id def post_session(user_id: str, **kwargs) -> Optional[ChatSession]: """ Create a new session. """ check_api_key() if user_id is None: user_id = settings.api_key rep = create_session.sync(client=client, user_id=user_id, **kwargs) if isinstance(rep, HTTPValidationError): raise APIError(str(rep)) return rep def get_session_name(user_id: str, session_name: str, **kwargs): """ Lookup a session by user and name. """ check_api_key() rep = get_user_session.sync_detailed(user_id, session_name, client=client, **kwargs) if rep.status_code == 200: return rep.parsed elif rep.status_code == 404: return None else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_entities_list(session_id: int, **kwargs): """ Get the entities of a session. """ check_api_key() rep = get_session_entities.sync_detailed(session_id, client=client, **kwargs) if rep.status_code == 200: return rep.parsed else: err = rep.content.decode("utf-8") print(err) raise APIError(err) def get_session_facts_list(session_id: int, **kwargs): """ Get the facts of a session. """ check_api_key()
rep = get_session_facts.sync_detailed(session_id, client=client, **kwargs)
20
2023-12-03 07:50:04+00:00
24k
JunMa11/UHNSeg-Quiz
nnunetv2/training/nnUNetTrainer/variants/network_architecture/nnUNetTrainerNoDeepSupervision.py
[ { "identifier": "DC_and_BCE_loss", "path": "nnunetv2/training/loss/compound_losses.py", "snippet": "class DC_and_BCE_loss(nn.Module):\n def __init__(self, bce_kwargs, soft_dice_kwargs, weight_ce=1, weight_dice=1, use_ignore_label: bool = False,\n dice_class=MemoryEfficientSoftDiceLoss):\n \"\"\"\n DO NOT APPLY NONLINEARITY IN YOUR NETWORK!\n\n target mut be one hot encoded\n IMPORTANT: We assume use_ignore_label is located in target[:, -1]!!!\n\n :param soft_dice_kwargs:\n :param bce_kwargs:\n :param aggregate:\n \"\"\"\n super(DC_and_BCE_loss, self).__init__()\n if use_ignore_label:\n bce_kwargs['reduction'] = 'none'\n\n self.weight_dice = weight_dice\n self.weight_ce = weight_ce\n self.use_ignore_label = use_ignore_label\n\n self.ce = nn.BCEWithLogitsLoss(**bce_kwargs)\n self.dc = dice_class(apply_nonlin=torch.sigmoid, **soft_dice_kwargs)\n\n def forward(self, net_output: torch.Tensor, target: torch.Tensor):\n if self.use_ignore_label:\n # target is one hot encoded here. invert it so that it is True wherever we can compute the loss\n mask = (1 - target[:, -1:]).bool()\n # remove ignore channel now that we have the mask\n target_regions = torch.clone(target[:, :-1])\n else:\n target_regions = target\n mask = None\n\n dc_loss = self.dc(net_output, target_regions, loss_mask=mask)\n if mask is not None:\n ce_loss = (self.ce(net_output, target_regions) * mask).sum() / torch.clip(mask.sum(), min=1e-8)\n else:\n ce_loss = self.ce(net_output, target_regions)\n result = self.weight_ce * ce_loss + self.weight_dice * dc_loss\n return result" }, { "identifier": "DC_and_CE_loss", "path": "nnunetv2/training/loss/compound_losses.py", "snippet": "class DC_and_CE_loss(nn.Module):\n def __init__(self, soft_dice_kwargs, ce_kwargs, weight_ce=1, weight_dice=1, ignore_label=None,\n dice_class=SoftDiceLoss):\n \"\"\"\n Weights for CE and Dice do not need to sum to one. You can set whatever you want.\n :param soft_dice_kwargs:\n :param ce_kwargs:\n :param aggregate:\n :param square_dice:\n :param weight_ce:\n :param weight_dice:\n \"\"\"\n super(DC_and_CE_loss, self).__init__()\n if ignore_label is not None:\n ce_kwargs['ignore_index'] = ignore_label\n\n self.weight_dice = weight_dice\n self.weight_ce = weight_ce\n self.ignore_label = ignore_label\n\n self.ce = RobustCrossEntropyLoss(**ce_kwargs)\n self.dc = dice_class(apply_nonlin=softmax_helper_dim1, **soft_dice_kwargs)\n\n def forward(self, net_output: torch.Tensor, target: torch.Tensor):\n \"\"\"\n target must be b, c, x, y(, z) with c=1\n :param net_output:\n :param target:\n :return:\n \"\"\"\n if self.ignore_label is not None:\n assert target.shape[1] == 1, 'ignore label is not implemented for one hot encoded target variables ' \\\n '(DC_and_CE_loss)'\n mask = (target != self.ignore_label).bool()\n # remove ignore label from target, replace with one of the known labels. It doesn't matter because we\n # ignore gradients in those areas anyway\n target_dice = torch.clone(target)\n target_dice[target == self.ignore_label] = 0\n num_fg = mask.sum()\n else:\n target_dice = target\n mask = None\n\n dc_loss = self.dc(net_output, target_dice, loss_mask=mask) \\\n if self.weight_dice != 0 else 0\n ce_loss = self.ce(net_output, target[:, 0].long()) \\\n if self.weight_ce != 0 and (self.ignore_label is None or num_fg > 0) else 0\n\n result = self.weight_ce * ce_loss + self.weight_dice * dc_loss\n return result" }, { "identifier": "get_tp_fp_fn_tn", "path": "nnunetv2/training/loss/dice.py", "snippet": "def get_tp_fp_fn_tn(net_output, gt, axes=None, mask=None, square=False):\n \"\"\"\n net_output must be (b, c, x, y(, z)))\n gt must be a label map (shape (b, 1, x, y(, z)) OR shape (b, x, y(, z))) or one hot encoding (b, c, x, y(, z))\n if mask is provided it must have shape (b, 1, x, y(, z)))\n :param net_output:\n :param gt:\n :param axes: can be (, ) = no summation\n :param mask: mask must be 1 for valid pixels and 0 for invalid pixels\n :param square: if True then fp, tp and fn will be squared before summation\n :return:\n \"\"\"\n if axes is None:\n axes = tuple(range(2, len(net_output.size())))\n\n shp_x = net_output.shape\n shp_y = gt.shape\n\n with torch.no_grad():\n if len(shp_x) != len(shp_y):\n gt = gt.view((shp_y[0], 1, *shp_y[1:]))\n\n if net_output.shape == gt.shape:\n # if this is the case then gt is probably already a one hot encoding\n y_onehot = gt\n else:\n gt = gt.long()\n y_onehot = torch.zeros(shp_x, device=net_output.device)\n y_onehot.scatter_(1, gt, 1)\n\n tp = net_output * y_onehot\n fp = net_output * (1 - y_onehot)\n fn = (1 - net_output) * y_onehot\n tn = (1 - net_output) * (1 - y_onehot)\n\n if mask is not None:\n with torch.no_grad():\n mask_here = torch.tile(mask, (1, tp.shape[1], *[1 for i in range(2, len(tp.shape))]))\n tp *= mask_here\n fp *= mask_here\n fn *= mask_here\n tn *= mask_here\n # benchmark whether tiling the mask would be faster (torch.tile). It probably is for large batch sizes\n # OK it barely makes a difference but the implementation above is a tiny bit faster + uses less vram\n # (using nnUNetv2_train 998 3d_fullres 0)\n # tp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tp, dim=1)), dim=1)\n # fp = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fp, dim=1)), dim=1)\n # fn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(fn, dim=1)), dim=1)\n # tn = torch.stack(tuple(x_i * mask[:, 0] for x_i in torch.unbind(tn, dim=1)), dim=1)\n\n if square:\n tp = tp ** 2\n fp = fp ** 2\n fn = fn ** 2\n tn = tn ** 2\n\n if len(axes) > 0:\n tp = tp.sum(dim=axes, keepdim=False)\n fp = fp.sum(dim=axes, keepdim=False)\n fn = fn.sum(dim=axes, keepdim=False)\n tn = tn.sum(dim=axes, keepdim=False)\n\n return tp, fp, fn, tn" }, { "identifier": "MemoryEfficientSoftDiceLoss", "path": "nnunetv2/training/loss/dice.py", "snippet": "class MemoryEfficientSoftDiceLoss(nn.Module):\n def __init__(self, apply_nonlin: Callable = None, batch_dice: bool = False, do_bg: bool = True, smooth: float = 1.,\n ddp: bool = True):\n \"\"\"\n saves 1.6 GB on Dataset017 3d_lowres\n \"\"\"\n super(MemoryEfficientSoftDiceLoss, self).__init__()\n\n self.do_bg = do_bg\n self.batch_dice = batch_dice\n self.apply_nonlin = apply_nonlin\n self.smooth = smooth\n self.ddp = ddp\n\n def forward(self, x, y, loss_mask=None):\n if self.apply_nonlin is not None:\n x = self.apply_nonlin(x)\n\n # make everything shape (b, c)\n axes = list(range(2, len(x.shape)))\n with torch.no_grad():\n if len(x.shape) != len(y.shape):\n y = y.view((y.shape[0], 1, *y.shape[1:]))\n\n if x.shape == y.shape:\n # if this is the case then gt is probably already a one hot encoding\n y_onehot = y\n else:\n gt = y.long()\n y_onehot = torch.zeros(x.shape, device=x.device, dtype=torch.bool)\n y_onehot.scatter_(1, gt, 1)\n\n if not self.do_bg:\n y_onehot = y_onehot[:, 1:]\n\n sum_gt = y_onehot.sum(axes) if loss_mask is None else (y_onehot * loss_mask).sum(axes)\n\n # this one MUST be outside the with torch.no_grad(): context. Otherwise no gradients for you\n if not self.do_bg:\n x = x[:, 1:]\n\n intersect = (x * y_onehot).sum(axes) if loss_mask is None else (x * y_onehot * loss_mask).sum(axes)\n sum_pred = x.sum(axes) if loss_mask is None else (x * loss_mask).sum(axes)\n\n if self.ddp and self.batch_dice:\n intersect = AllGatherGrad.apply(intersect).sum(0)\n sum_pred = AllGatherGrad.apply(sum_pred).sum(0)\n sum_gt = AllGatherGrad.apply(sum_gt).sum(0)\n\n if self.batch_dice:\n intersect = intersect.sum(0)\n sum_pred = sum_pred.sum(0)\n sum_gt = sum_gt.sum(0)\n\n dc = (2 * intersect + self.smooth) / (torch.clip(sum_gt + sum_pred + self.smooth, 1e-8))\n\n dc = dc.mean()\n return -dc" }, { "identifier": "nnUNetTrainer", "path": "nnunetv2/training/nnUNetTrainer/nnUNetTrainer.py", "snippet": "class nnUNetTrainer(object):\n def __init__(self, plans: dict, configuration: str, fold: int, dataset_json: dict, unpack_dataset: bool = True,\n device: torch.device = torch.device('cuda')):\n # From https://grugbrain.dev/. Worth a read ya big brains ;-)\n\n # apex predator of grug is complexity\n # complexity bad\n # say again:\n # complexity very bad\n # you say now:\n # complexity very, very bad\n # given choice between complexity or one on one against t-rex, grug take t-rex: at least grug see t-rex\n # complexity is spirit demon that enter codebase through well-meaning but ultimately very clubbable non grug-brain developers and project managers who not fear complexity spirit demon or even know about sometime\n # one day code base understandable and grug can get work done, everything good!\n # next day impossible: complexity demon spirit has entered code and very dangerous situation!\n\n # OK OK I am guilty. But I tried.\n # https://www.osnews.com/images/comics/wtfm.jpg\n # https://i.pinimg.com/originals/26/b2/50/26b250a738ea4abc7a5af4d42ad93af0.jpg\n\n self.is_ddp = dist.is_available() and dist.is_initialized()\n self.local_rank = 0 if not self.is_ddp else dist.get_rank()\n\n self.device = device\n\n # print what device we are using\n if self.is_ddp: # implicitly it's clear that we use cuda in this case\n print(f\"I am local rank {self.local_rank}. {device_count()} GPUs are available. The world size is \"\n f\"{dist.get_world_size()}.\"\n f\"Setting device to {self.device}\")\n self.device = torch.device(type='cuda', index=self.local_rank)\n else:\n if self.device.type == 'cuda':\n # we might want to let the user pick this but for now please pick the correct GPU with CUDA_VISIBLE_DEVICES=X\n self.device = torch.device(type='cuda', index=0)\n print(f\"Using device: {self.device}\")\n\n # loading and saving this class for continuing from checkpoint should not happen based on pickling. This\n # would also pickle the network etc. Bad, bad. Instead we just reinstantiate and then load the checkpoint we\n # need. So let's save the init args\n self.my_init_kwargs = {}\n for k in inspect.signature(self.__init__).parameters.keys():\n self.my_init_kwargs[k] = locals()[k]\n\n ### Saving all the init args into class variables for later access\n self.plans_manager = PlansManager(plans)\n self.configuration_manager = self.plans_manager.get_configuration(configuration)\n self.configuration_name = configuration\n self.dataset_json = dataset_json\n self.fold = fold\n self.unpack_dataset = unpack_dataset\n\n ### Setting all the folder names. We need to make sure things don't crash in case we are just running\n # inference and some of the folders may not be defined!\n self.preprocessed_dataset_folder_base = join(nnUNet_preprocessed, self.plans_manager.dataset_name) \\\n if nnUNet_preprocessed is not None else None\n self.output_folder_base = join(nnUNet_results, self.plans_manager.dataset_name,\n self.__class__.__name__ + '__' + self.plans_manager.plans_name + \"__\" + configuration) \\\n if nnUNet_results is not None else None\n self.output_folder = join(self.output_folder_base, f'fold_{fold}')\n\n self.preprocessed_dataset_folder = join(self.preprocessed_dataset_folder_base,\n self.configuration_manager.data_identifier)\n # unlike the previous nnunet folder_with_segs_from_previous_stage is now part of the plans. For now it has to\n # be a different configuration in the same plans\n # IMPORTANT! the mapping must be bijective, so lowres must point to fullres and vice versa (using\n # \"previous_stage\" and \"next_stage\"). Otherwise it won't work!\n self.is_cascaded = self.configuration_manager.previous_stage_name is not None\n self.folder_with_segs_from_previous_stage = \\\n join(nnUNet_results, self.plans_manager.dataset_name,\n self.__class__.__name__ + '__' + self.plans_manager.plans_name + \"__\" +\n self.configuration_manager.previous_stage_name, 'predicted_next_stage', self.configuration_name) \\\n if self.is_cascaded else None\n\n ### Some hyperparameters for you to fiddle with\n self.initial_lr = 1e-2\n self.weight_decay = 3e-5\n self.oversample_foreground_percent = 0.33\n self.num_iterations_per_epoch = 250\n self.num_val_iterations_per_epoch = 50\n self.num_epochs = 1000\n self.current_epoch = 0\n\n ### Dealing with labels/regions\n self.label_manager = self.plans_manager.get_label_manager(dataset_json)\n # labels can either be a list of int (regular training) or a list of tuples of int (region-based training)\n # needed for predictions. We do sigmoid in case of (overlapping) regions\n\n self.num_input_channels = None # -> self.initialize()\n self.network = None # -> self._get_network()\n self.optimizer = self.lr_scheduler = None # -> self.initialize\n self.grad_scaler = GradScaler() if self.device.type == 'cuda' else None\n self.loss = None # -> self.initialize\n\n ### Simple logging. Don't take that away from me!\n # initialize log file. This is just our log for the print statements etc. Not to be confused with lightning\n # logging\n timestamp = datetime.now()\n maybe_mkdir_p(self.output_folder)\n self.log_file = join(self.output_folder, \"training_log_%d_%d_%d_%02.0d_%02.0d_%02.0d.txt\" %\n (timestamp.year, timestamp.month, timestamp.day, timestamp.hour, timestamp.minute,\n timestamp.second))\n self.logger = nnUNetLogger()\n\n ### placeholders\n self.dataloader_train = self.dataloader_val = None # see on_train_start\n\n ### initializing stuff for remembering things and such\n self._best_ema = None\n\n ### inference things\n self.inference_allowed_mirroring_axes = None # this variable is set in\n # self.configure_rotation_dummyDA_mirroring_and_inital_patch_size and will be saved in checkpoints\n\n ### checkpoint saving stuff\n self.save_every = 50\n self.disable_checkpointing = False\n\n ## DDP batch size and oversampling can differ between workers and needs adaptation\n # we need to change the batch size in DDP because we don't use any of those distributed samplers\n self._set_batch_size_and_oversample()\n\n self.was_initialized = False\n\n self.print_to_log_file(\"\\n#######################################################################\\n\"\n \"Please cite the following paper when using nnU-Net:\\n\"\n \"Isensee, F., Jaeger, P. F., Kohl, S. A., Petersen, J., & Maier-Hein, K. H. (2021). \"\n \"nnU-Net: a self-configuring method for deep learning-based biomedical image segmentation. \"\n \"Nature methods, 18(2), 203-211.\\n\"\n \"#######################################################################\\n\",\n also_print_to_console=True, add_timestamp=False)\n\n def initialize(self):\n if not self.was_initialized:\n self.num_input_channels = determine_num_input_channels(self.plans_manager, self.configuration_manager,\n self.dataset_json)\n\n self.network = self.build_network_architecture(self.plans_manager, self.dataset_json,\n self.configuration_manager,\n self.num_input_channels,\n enable_deep_supervision=True).to(self.device)\n # compile network for free speedup\n if self._do_i_compile():\n self.print_to_log_file('Compiling network...')\n self.network = torch.compile(self.network)\n\n self.optimizer, self.lr_scheduler = self.configure_optimizers()\n # if ddp, wrap in DDP wrapper\n if self.is_ddp:\n self.network = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.network)\n self.network = DDP(self.network, device_ids=[self.local_rank])\n\n self.loss = self._build_loss()\n self.was_initialized = True\n else:\n raise RuntimeError(\"You have called self.initialize even though the trainer was already initialized. \"\n \"That should not happen.\")\n\n def _do_i_compile(self):\n return ('nnUNet_compile' in os.environ.keys()) and (os.environ['nnUNet_compile'].lower() in ('true', '1', 't'))\n\n def _save_debug_information(self):\n # saving some debug information\n if self.local_rank == 0:\n dct = {}\n for k in self.__dir__():\n if not k.startswith(\"__\"):\n if not callable(getattr(self, k)) or k in ['loss', ]:\n dct[k] = str(getattr(self, k))\n elif k in ['network', ]:\n dct[k] = str(getattr(self, k).__class__.__name__)\n else:\n # print(k)\n pass\n if k in ['dataloader_train', 'dataloader_val']:\n if hasattr(getattr(self, k), 'generator'):\n dct[k + '.generator'] = str(getattr(self, k).generator)\n if hasattr(getattr(self, k), 'num_processes'):\n dct[k + '.num_processes'] = str(getattr(self, k).num_processes)\n if hasattr(getattr(self, k), 'transform'):\n dct[k + '.transform'] = str(getattr(self, k).transform)\n import subprocess\n hostname = subprocess.getoutput(['hostname'])\n dct['hostname'] = hostname\n torch_version = torch.__version__\n if self.device.type == 'cuda':\n gpu_name = torch.cuda.get_device_name()\n dct['gpu_name'] = gpu_name\n cudnn_version = torch.backends.cudnn.version()\n else:\n cudnn_version = 'None'\n dct['device'] = str(self.device)\n dct['torch_version'] = torch_version\n dct['cudnn_version'] = cudnn_version\n save_json(dct, join(self.output_folder, \"debug.json\"))\n\n @staticmethod\n def build_network_architecture(plans_manager: PlansManager,\n dataset_json,\n configuration_manager: ConfigurationManager,\n num_input_channels,\n enable_deep_supervision: bool = True) -> nn.Module:\n \"\"\"\n his is where you build the architecture according to the plans. There is no obligation to use\n get_network_from_plans, this is just a utility we use for the nnU-Net default architectures. You can do what\n you want. Even ignore the plans and just return something static (as long as it can process the requested\n patch size)\n but don't bug us with your bugs arising from fiddling with this :-P\n This is the function that is called in inference as well! This is needed so that all network architecture\n variants can be loaded at inference time (inference will use the same nnUNetTrainer that was used for\n training, so if you change the network architecture during training by deriving a new trainer class then\n inference will know about it).\n\n If you need to know how many segmentation outputs your custom architecture needs to have, use the following snippet:\n > label_manager = plans_manager.get_label_manager(dataset_json)\n > label_manager.num_segmentation_heads\n (why so complicated? -> We can have either classical training (classes) or regions. If we have regions,\n the number of outputs is != the number of classes. Also there is the ignore label for which no output\n should be generated. label_manager takes care of all that for you.)\n\n \"\"\"\n return get_network_from_plans(plans_manager, dataset_json, configuration_manager,\n num_input_channels, deep_supervision=enable_deep_supervision)\n\n def _get_deep_supervision_scales(self):\n deep_supervision_scales = list(list(i) for i in 1 / np.cumprod(np.vstack(\n self.configuration_manager.pool_op_kernel_sizes), axis=0))[:-1]\n return deep_supervision_scales\n\n def _set_batch_size_and_oversample(self):\n if not self.is_ddp:\n # set batch size to what the plan says, leave oversample untouched\n self.batch_size = self.configuration_manager.batch_size\n else:\n # batch size is distributed over DDP workers and we need to change oversample_percent for each worker\n batch_sizes = []\n oversample_percents = []\n\n world_size = dist.get_world_size()\n my_rank = dist.get_rank()\n\n global_batch_size = self.configuration_manager.batch_size\n assert global_batch_size >= world_size, 'Cannot run DDP if the batch size is smaller than the number of ' \\\n 'GPUs... Duh.'\n\n batch_size_per_GPU = np.ceil(global_batch_size / world_size).astype(int)\n\n for rank in range(world_size):\n if (rank + 1) * batch_size_per_GPU > global_batch_size:\n batch_size = batch_size_per_GPU - ((rank + 1) * batch_size_per_GPU - global_batch_size)\n else:\n batch_size = batch_size_per_GPU\n\n batch_sizes.append(batch_size)\n\n sample_id_low = 0 if len(batch_sizes) == 0 else np.sum(batch_sizes[:-1])\n sample_id_high = np.sum(batch_sizes)\n\n if sample_id_high / global_batch_size < (1 - self.oversample_foreground_percent):\n oversample_percents.append(0.0)\n elif sample_id_low / global_batch_size > (1 - self.oversample_foreground_percent):\n oversample_percents.append(1.0)\n else:\n percent_covered_by_this_rank = sample_id_high / global_batch_size - sample_id_low / global_batch_size\n oversample_percent_here = 1 - (((1 - self.oversample_foreground_percent) -\n sample_id_low / global_batch_size) / percent_covered_by_this_rank)\n oversample_percents.append(oversample_percent_here)\n\n print(\"worker\", my_rank, \"oversample\", oversample_percents[my_rank])\n print(\"worker\", my_rank, \"batch_size\", batch_sizes[my_rank])\n # self.print_to_log_file(\"worker\", my_rank, \"oversample\", oversample_percents[my_rank])\n # self.print_to_log_file(\"worker\", my_rank, \"batch_size\", batch_sizes[my_rank])\n\n self.batch_size = batch_sizes[my_rank]\n self.oversample_foreground_percent = oversample_percents[my_rank]\n\n def _build_loss(self):\n if self.label_manager.has_regions:\n loss = DC_and_BCE_loss({},\n {'batch_dice': self.configuration_manager.batch_dice,\n 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp},\n use_ignore_label=self.label_manager.ignore_label is not None,\n dice_class=MemoryEfficientSoftDiceLoss)\n else:\n loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice,\n 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1,\n ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss)\n\n deep_supervision_scales = self._get_deep_supervision_scales()\n\n # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases\n # this gives higher resolution outputs more weight in the loss\n weights = np.array([1 / (2 ** i) for i in range(len(deep_supervision_scales))])\n weights[-1] = 0\n\n # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1\n weights = weights / weights.sum()\n # now wrap the loss\n loss = DeepSupervisionWrapper(loss, weights)\n return loss\n\n def configure_rotation_dummyDA_mirroring_and_inital_patch_size(self):\n \"\"\"\n This function is stupid and certainly one of the weakest spots of this implementation. Not entirely sure how we can fix it.\n \"\"\"\n patch_size = self.configuration_manager.patch_size\n dim = len(patch_size)\n # todo rotation should be defined dynamically based on patch size (more isotropic patch sizes = more rotation)\n if dim == 2:\n do_dummy_2d_data_aug = False\n # todo revisit this parametrization\n if max(patch_size) / min(patch_size) > 1.5:\n rotation_for_DA = {\n 'x': (-15. / 360 * 2. * np.pi, 15. / 360 * 2. * np.pi),\n 'y': (0, 0),\n 'z': (0, 0)\n }\n else:\n rotation_for_DA = {\n 'x': (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi),\n 'y': (0, 0),\n 'z': (0, 0)\n }\n mirror_axes = (0, 1)\n elif dim == 3:\n # todo this is not ideal. We could also have patch_size (64, 16, 128) in which case a full 180deg 2d rot would be bad\n # order of the axes is determined by spacing, not image size\n do_dummy_2d_data_aug = (max(patch_size) / patch_size[0]) > ANISO_THRESHOLD\n if do_dummy_2d_data_aug:\n # why do we rotate 180 deg here all the time? We should also restrict it\n rotation_for_DA = {\n 'x': (-180. / 360 * 2. * np.pi, 180. / 360 * 2. * np.pi),\n 'y': (0, 0),\n 'z': (0, 0)\n }\n else:\n rotation_for_DA = {\n 'x': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi),\n 'y': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi),\n 'z': (-30. / 360 * 2. * np.pi, 30. / 360 * 2. * np.pi),\n }\n mirror_axes = (0, 1, 2)\n else:\n raise RuntimeError()\n\n # todo this function is stupid. It doesn't even use the correct scale range (we keep things as they were in the\n # old nnunet for now)\n initial_patch_size = get_patch_size(patch_size[-dim:],\n *rotation_for_DA.values(),\n (0.85, 1.25))\n if do_dummy_2d_data_aug:\n initial_patch_size[0] = patch_size[0]\n\n self.print_to_log_file(f'do_dummy_2d_data_aug: {do_dummy_2d_data_aug}')\n self.inference_allowed_mirroring_axes = mirror_axes\n\n return rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes\n\n def print_to_log_file(self, *args, also_print_to_console=True, add_timestamp=True):\n if self.local_rank == 0:\n timestamp = time()\n dt_object = datetime.fromtimestamp(timestamp)\n\n if add_timestamp:\n args = (f\"{dt_object}:\", *args)\n\n successful = False\n max_attempts = 5\n ctr = 0\n while not successful and ctr < max_attempts:\n try:\n with open(self.log_file, 'a+') as f:\n for a in args:\n f.write(str(a))\n f.write(\" \")\n f.write(\"\\n\")\n successful = True\n except IOError:\n print(f\"{datetime.fromtimestamp(timestamp)}: failed to log: \", sys.exc_info())\n sleep(0.5)\n ctr += 1\n if also_print_to_console:\n print(*args)\n elif also_print_to_console:\n print(*args)\n\n def print_plans(self):\n if self.local_rank == 0:\n dct = deepcopy(self.plans_manager.plans)\n del dct['configurations']\n self.print_to_log_file(f\"\\nThis is the configuration used by this \"\n f\"training:\\nConfiguration name: {self.configuration_name}\\n\",\n self.configuration_manager, '\\n', add_timestamp=False)\n self.print_to_log_file('These are the global plan.json settings:\\n', dct, '\\n', add_timestamp=False)\n\n def configure_optimizers(self):\n optimizer = torch.optim.SGD(self.network.parameters(), self.initial_lr, weight_decay=self.weight_decay,\n momentum=0.99, nesterov=True)\n lr_scheduler = PolyLRScheduler(optimizer, self.initial_lr, self.num_epochs)\n return optimizer, lr_scheduler\n\n def plot_network_architecture(self):\n if self._do_i_compile():\n self.print_to_log_file(\"Unable to plot network architecture: nnUNet_compile is enabled!\")\n return\n\n if self.local_rank == 0:\n try:\n # raise NotImplementedError('hiddenlayer no longer works and we do not have a viable alternative :-(')\n # pip install git+https://github.com/saugatkandel/hiddenlayer.git\n\n # from torchviz import make_dot\n # # not viable.\n # make_dot(tuple(self.network(torch.rand((1, self.num_input_channels,\n # *self.configuration_manager.patch_size),\n # device=self.device)))).render(\n # join(self.output_folder, \"network_architecture.pdf\"), format='pdf')\n # self.optimizer.zero_grad()\n\n # broken.\n\n import hiddenlayer as hl\n g = hl.build_graph(self.network,\n torch.rand((1, self.num_input_channels,\n *self.configuration_manager.patch_size),\n device=self.device),\n transforms=None)\n g.save(join(self.output_folder, \"network_architecture.pdf\"))\n del g\n except Exception as e:\n self.print_to_log_file(\"Unable to plot network architecture:\")\n self.print_to_log_file(e)\n\n # self.print_to_log_file(\"\\nprinting the network instead:\\n\")\n # self.print_to_log_file(self.network)\n # self.print_to_log_file(\"\\n\")\n finally:\n empty_cache(self.device)\n\n def do_split(self):\n \"\"\"\n The default split is a 5 fold CV on all available training cases. nnU-Net will create a split (it is seeded,\n so always the same) and save it as splits_final.pkl file in the preprocessed data directory.\n Sometimes you may want to create your own split for various reasons. For this you will need to create your own\n splits_final.pkl file. If this file is present, nnU-Net is going to use it and whatever splits are defined in\n it. You can create as many splits in this file as you want. Note that if you define only 4 splits (fold 0-3)\n and then set fold=4 when training (that would be the fifth split), nnU-Net will print a warning and proceed to\n use a random 80:20 data split.\n :return:\n \"\"\"\n if self.fold == \"all\":\n # if fold==all then we use all images for training and validation\n case_identifiers = get_case_identifiers(self.preprocessed_dataset_folder)\n tr_keys = case_identifiers\n val_keys = tr_keys\n else:\n splits_file = join(self.preprocessed_dataset_folder_base, \"splits_final.json\")\n dataset = nnUNetDataset(self.preprocessed_dataset_folder, case_identifiers=None,\n num_images_properties_loading_threshold=0,\n folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage)\n # if the split file does not exist we need to create it\n if not isfile(splits_file):\n self.print_to_log_file(\"Creating new 5-fold cross-validation split...\")\n splits = []\n all_keys_sorted = np.sort(list(dataset.keys()))\n kfold = KFold(n_splits=5, shuffle=True, random_state=12345)\n for i, (train_idx, test_idx) in enumerate(kfold.split(all_keys_sorted)):\n train_keys = np.array(all_keys_sorted)[train_idx]\n test_keys = np.array(all_keys_sorted)[test_idx]\n splits.append({})\n splits[-1]['train'] = list(train_keys)\n splits[-1]['val'] = list(test_keys)\n save_json(splits, splits_file)\n\n else:\n self.print_to_log_file(\"Using splits from existing split file:\", splits_file)\n splits = load_json(splits_file)\n self.print_to_log_file(f\"The split file contains {len(splits)} splits.\")\n\n self.print_to_log_file(\"Desired fold for training: %d\" % self.fold)\n if self.fold < len(splits):\n tr_keys = splits[self.fold]['train']\n val_keys = splits[self.fold]['val']\n self.print_to_log_file(\"This split has %d training and %d validation cases.\"\n % (len(tr_keys), len(val_keys)))\n else:\n self.print_to_log_file(\"INFO: You requested fold %d for training but splits \"\n \"contain only %d folds. I am now creating a \"\n \"random (but seeded) 80:20 split!\" % (self.fold, len(splits)))\n # if we request a fold that is not in the split file, create a random 80:20 split\n rnd = np.random.RandomState(seed=12345 + self.fold)\n keys = np.sort(list(dataset.keys()))\n idx_tr = rnd.choice(len(keys), int(len(keys) * 0.8), replace=False)\n idx_val = [i for i in range(len(keys)) if i not in idx_tr]\n tr_keys = [keys[i] for i in idx_tr]\n val_keys = [keys[i] for i in idx_val]\n self.print_to_log_file(\"This random 80:20 split has %d training and %d validation cases.\"\n % (len(tr_keys), len(val_keys)))\n if any([i in val_keys for i in tr_keys]):\n self.print_to_log_file('WARNING: Some validation cases are also in the training set. Please check the '\n 'splits.json or ignore if this is intentional.')\n return tr_keys, val_keys\n\n def get_tr_and_val_datasets(self):\n # create dataset split\n tr_keys, val_keys = self.do_split()\n\n # load the datasets for training and validation. Note that we always draw random samples so we really don't\n # care about distributing training cases across GPUs.\n dataset_tr = nnUNetDataset(self.preprocessed_dataset_folder, tr_keys,\n folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage,\n num_images_properties_loading_threshold=0)\n dataset_val = nnUNetDataset(self.preprocessed_dataset_folder, val_keys,\n folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage,\n num_images_properties_loading_threshold=0)\n return dataset_tr, dataset_val\n\n def get_dataloaders(self):\n # we use the patch size to determine whether we need 2D or 3D dataloaders. We also use it to determine whether\n # we need to use dummy 2D augmentation (in case of 3D training) and what our initial patch size should be\n patch_size = self.configuration_manager.patch_size\n dim = len(patch_size)\n\n # needed for deep supervision: how much do we need to downscale the segmentation targets for the different\n # outputs?\n deep_supervision_scales = self._get_deep_supervision_scales()\n\n rotation_for_DA, do_dummy_2d_data_aug, initial_patch_size, mirror_axes = \\\n self.configure_rotation_dummyDA_mirroring_and_inital_patch_size()\n\n # training pipeline\n tr_transforms = self.get_training_transforms(\n patch_size, rotation_for_DA, deep_supervision_scales, mirror_axes, do_dummy_2d_data_aug,\n order_resampling_data=3, order_resampling_seg=1,\n use_mask_for_norm=self.configuration_manager.use_mask_for_norm,\n is_cascaded=self.is_cascaded, foreground_labels=self.label_manager.foreground_labels,\n regions=self.label_manager.foreground_regions if self.label_manager.has_regions else None,\n ignore_label=self.label_manager.ignore_label)\n\n # validation pipeline\n val_transforms = self.get_validation_transforms(deep_supervision_scales,\n is_cascaded=self.is_cascaded,\n foreground_labels=self.label_manager.foreground_labels,\n regions=self.label_manager.foreground_regions if\n self.label_manager.has_regions else None,\n ignore_label=self.label_manager.ignore_label)\n\n dl_tr, dl_val = self.get_plain_dataloaders(initial_patch_size, dim)\n\n allowed_num_processes = get_allowed_n_proc_DA()\n if allowed_num_processes == 0:\n mt_gen_train = SingleThreadedAugmenter(dl_tr, tr_transforms)\n mt_gen_val = SingleThreadedAugmenter(dl_val, val_transforms)\n else:\n mt_gen_train = LimitedLenWrapper(self.num_iterations_per_epoch, data_loader=dl_tr, transform=tr_transforms,\n num_processes=allowed_num_processes, num_cached=6, seeds=None,\n pin_memory=self.device.type == 'cuda', wait_time=0.02)\n mt_gen_val = LimitedLenWrapper(self.num_val_iterations_per_epoch, data_loader=dl_val,\n transform=val_transforms, num_processes=max(1, allowed_num_processes // 2),\n num_cached=3, seeds=None, pin_memory=self.device.type == 'cuda',\n wait_time=0.02)\n return mt_gen_train, mt_gen_val\n\n def get_plain_dataloaders(self, initial_patch_size: Tuple[int, ...], dim: int):\n dataset_tr, dataset_val = self.get_tr_and_val_datasets()\n\n if dim == 2:\n dl_tr = nnUNetDataLoader2D(dataset_tr, self.batch_size,\n initial_patch_size,\n self.configuration_manager.patch_size,\n self.label_manager,\n oversample_foreground_percent=self.oversample_foreground_percent,\n sampling_probabilities=None, pad_sides=None)\n dl_val = nnUNetDataLoader2D(dataset_val, self.batch_size,\n self.configuration_manager.patch_size,\n self.configuration_manager.patch_size,\n self.label_manager,\n oversample_foreground_percent=self.oversample_foreground_percent,\n sampling_probabilities=None, pad_sides=None)\n else:\n dl_tr = nnUNetDataLoader3D(dataset_tr, self.batch_size,\n initial_patch_size,\n self.configuration_manager.patch_size,\n self.label_manager,\n oversample_foreground_percent=self.oversample_foreground_percent,\n sampling_probabilities=None, pad_sides=None)\n dl_val = nnUNetDataLoader3D(dataset_val, self.batch_size,\n self.configuration_manager.patch_size,\n self.configuration_manager.patch_size,\n self.label_manager,\n oversample_foreground_percent=self.oversample_foreground_percent,\n sampling_probabilities=None, pad_sides=None)\n return dl_tr, dl_val\n\n @staticmethod\n def get_training_transforms(patch_size: Union[np.ndarray, Tuple[int]],\n rotation_for_DA: dict,\n deep_supervision_scales: Union[List, Tuple],\n mirror_axes: Tuple[int, ...],\n do_dummy_2d_data_aug: bool,\n order_resampling_data: int = 3,\n order_resampling_seg: int = 1,\n border_val_seg: int = -1,\n use_mask_for_norm: List[bool] = None,\n is_cascaded: bool = False,\n foreground_labels: Union[Tuple[int, ...], List[int]] = None,\n regions: List[Union[List[int], Tuple[int, ...], int]] = None,\n ignore_label: int = None) -> AbstractTransform:\n tr_transforms = []\n if do_dummy_2d_data_aug:\n ignore_axes = (0,)\n tr_transforms.append(Convert3DTo2DTransform())\n patch_size_spatial = patch_size[1:]\n else:\n patch_size_spatial = patch_size\n ignore_axes = None\n\n tr_transforms.append(SpatialTransform(\n patch_size_spatial, patch_center_dist_from_border=None,\n do_elastic_deform=False, alpha=(0, 0), sigma=(0, 0),\n do_rotation=True, angle_x=rotation_for_DA['x'], angle_y=rotation_for_DA['y'], angle_z=rotation_for_DA['z'],\n p_rot_per_axis=1, # todo experiment with this\n do_scale=True, scale=(0.7, 1.4),\n border_mode_data=\"constant\", border_cval_data=0, order_data=order_resampling_data,\n border_mode_seg=\"constant\", border_cval_seg=border_val_seg, order_seg=order_resampling_seg,\n random_crop=False, # random cropping is part of our dataloaders\n p_el_per_sample=0, p_scale_per_sample=0.2, p_rot_per_sample=0.2,\n independent_scale_for_each_axis=False # todo experiment with this\n ))\n\n if do_dummy_2d_data_aug:\n tr_transforms.append(Convert2DTo3DTransform())\n\n tr_transforms.append(GaussianNoiseTransform(p_per_sample=0.1))\n tr_transforms.append(GaussianBlurTransform((0.5, 1.), different_sigma_per_channel=True, p_per_sample=0.2,\n p_per_channel=0.5))\n tr_transforms.append(BrightnessMultiplicativeTransform(multiplier_range=(0.75, 1.25), p_per_sample=0.15))\n tr_transforms.append(ContrastAugmentationTransform(p_per_sample=0.15))\n tr_transforms.append(SimulateLowResolutionTransform(zoom_range=(0.5, 1), per_channel=True,\n p_per_channel=0.5,\n order_downsample=0, order_upsample=3, p_per_sample=0.25,\n ignore_axes=ignore_axes))\n tr_transforms.append(GammaTransform((0.7, 1.5), True, True, retain_stats=True, p_per_sample=0.1))\n tr_transforms.append(GammaTransform((0.7, 1.5), False, True, retain_stats=True, p_per_sample=0.3))\n\n if mirror_axes is not None and len(mirror_axes) > 0:\n tr_transforms.append(MirrorTransform(mirror_axes))\n\n if use_mask_for_norm is not None and any(use_mask_for_norm):\n tr_transforms.append(MaskTransform([i for i in range(len(use_mask_for_norm)) if use_mask_for_norm[i]],\n mask_idx_in_seg=0, set_outside_to=0))\n\n tr_transforms.append(RemoveLabelTransform(-1, 0))\n\n if is_cascaded:\n assert foreground_labels is not None, 'We need foreground_labels for cascade augmentations'\n tr_transforms.append(MoveSegAsOneHotToData(1, foreground_labels, 'seg', 'data'))\n tr_transforms.append(ApplyRandomBinaryOperatorTransform(\n channel_idx=list(range(-len(foreground_labels), 0)),\n p_per_sample=0.4,\n key=\"data\",\n strel_size=(1, 8),\n p_per_label=1))\n tr_transforms.append(\n RemoveRandomConnectedComponentFromOneHotEncodingTransform(\n channel_idx=list(range(-len(foreground_labels), 0)),\n key=\"data\",\n p_per_sample=0.2,\n fill_with_other_class_p=0,\n dont_do_if_covers_more_than_x_percent=0.15))\n\n tr_transforms.append(RenameTransform('seg', 'target', True))\n\n if regions is not None:\n # the ignore label must also be converted\n tr_transforms.append(ConvertSegmentationToRegionsTransform(list(regions) + [ignore_label]\n if ignore_label is not None else regions,\n 'target', 'target'))\n\n if deep_supervision_scales is not None:\n tr_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target',\n output_key='target'))\n tr_transforms.append(NumpyToTensor(['data', 'target'], 'float'))\n tr_transforms = Compose(tr_transforms)\n return tr_transforms\n\n @staticmethod\n def get_validation_transforms(deep_supervision_scales: Union[List, Tuple],\n is_cascaded: bool = False,\n foreground_labels: Union[Tuple[int, ...], List[int]] = None,\n regions: List[Union[List[int], Tuple[int, ...], int]] = None,\n ignore_label: int = None) -> AbstractTransform:\n val_transforms = []\n val_transforms.append(RemoveLabelTransform(-1, 0))\n\n if is_cascaded:\n val_transforms.append(MoveSegAsOneHotToData(1, foreground_labels, 'seg', 'data'))\n\n val_transforms.append(RenameTransform('seg', 'target', True))\n\n if regions is not None:\n # the ignore label must also be converted\n val_transforms.append(ConvertSegmentationToRegionsTransform(list(regions) + [ignore_label]\n if ignore_label is not None else regions,\n 'target', 'target'))\n\n if deep_supervision_scales is not None:\n val_transforms.append(DownsampleSegForDSTransform2(deep_supervision_scales, 0, input_key='target',\n output_key='target'))\n\n val_transforms.append(NumpyToTensor(['data', 'target'], 'float'))\n val_transforms = Compose(val_transforms)\n return val_transforms\n\n def set_deep_supervision_enabled(self, enabled: bool):\n \"\"\"\n This function is specific for the default architecture in nnU-Net. If you change the architecture, there are\n chances you need to change this as well!\n \"\"\"\n if self.is_ddp:\n self.network.module.decoder.deep_supervision = enabled\n else:\n self.network.decoder.deep_supervision = enabled\n\n def on_train_start(self):\n if not self.was_initialized:\n self.initialize()\n\n maybe_mkdir_p(self.output_folder)\n\n # make sure deep supervision is on in the network\n self.set_deep_supervision_enabled(True)\n\n self.print_plans()\n empty_cache(self.device)\n\n # maybe unpack\n if self.unpack_dataset and self.local_rank == 0:\n self.print_to_log_file('unpacking dataset...')\n unpack_dataset(self.preprocessed_dataset_folder, unpack_segmentation=True, overwrite_existing=False,\n num_processes=max(1, round(get_allowed_n_proc_DA() // 2)))\n self.print_to_log_file('unpacking done...')\n\n if self.is_ddp:\n dist.barrier()\n\n # dataloaders must be instantiated here because they need access to the training data which may not be present\n # when doing inference\n self.dataloader_train, self.dataloader_val = self.get_dataloaders()\n\n # copy plans and dataset.json so that they can be used for restoring everything we need for inference\n save_json(self.plans_manager.plans, join(self.output_folder_base, 'plans.json'), sort_keys=False)\n save_json(self.dataset_json, join(self.output_folder_base, 'dataset.json'), sort_keys=False)\n\n # we don't really need the fingerprint but its still handy to have it with the others\n shutil.copy(join(self.preprocessed_dataset_folder_base, 'dataset_fingerprint.json'),\n join(self.output_folder_base, 'dataset_fingerprint.json'))\n\n # produces a pdf in output folder\n self.plot_network_architecture()\n\n self._save_debug_information()\n\n # print(f\"batch size: {self.batch_size}\")\n # print(f\"oversample: {self.oversample_foreground_percent}\")\n\n def on_train_end(self):\n # dirty hack because on_epoch_end increments the epoch counter and this is executed afterwards.\n # This will lead to the wrong current epoch to be stored\n self.current_epoch -= 1\n self.save_checkpoint(join(self.output_folder, \"checkpoint_final.pth\"))\n self.current_epoch += 1\n\n # now we can delete latest\n if self.local_rank == 0 and isfile(join(self.output_folder, \"checkpoint_latest.pth\")):\n os.remove(join(self.output_folder, \"checkpoint_latest.pth\"))\n\n # shut down dataloaders\n old_stdout = sys.stdout\n with open(os.devnull, 'w') as f:\n sys.stdout = f\n if self.dataloader_train is not None:\n self.dataloader_train._finish()\n if self.dataloader_val is not None:\n self.dataloader_val._finish()\n sys.stdout = old_stdout\n\n empty_cache(self.device)\n self.print_to_log_file(\"Training done.\")\n\n def on_train_epoch_start(self):\n self.network.train()\n self.lr_scheduler.step(self.current_epoch)\n self.print_to_log_file('')\n self.print_to_log_file(f'Epoch {self.current_epoch}')\n self.print_to_log_file(\n f\"Current learning rate: {np.round(self.optimizer.param_groups[0]['lr'], decimals=5)}\")\n # lrs are the same for all workers so we don't need to gather them in case of DDP training\n self.logger.log('lrs', self.optimizer.param_groups[0]['lr'], self.current_epoch)\n\n def train_step(self, batch: dict) -> dict:\n data = batch['data']\n target = batch['target']\n\n data = data.to(self.device, non_blocking=True)\n if isinstance(target, list):\n target = [i.to(self.device, non_blocking=True) for i in target]\n else:\n target = target.to(self.device, non_blocking=True)\n\n self.optimizer.zero_grad(set_to_none=True)\n # Autocast is a little bitch.\n # If the device_type is 'cpu' then it's slow as heck and needs to be disabled.\n # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False)\n # So autocast will only be active if we have a cuda device.\n with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():\n output = self.network(data)\n # del data\n l = self.loss(output, target)\n\n if self.grad_scaler is not None:\n self.grad_scaler.scale(l).backward()\n self.grad_scaler.unscale_(self.optimizer)\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)\n self.grad_scaler.step(self.optimizer)\n self.grad_scaler.update()\n else:\n l.backward()\n torch.nn.utils.clip_grad_norm_(self.network.parameters(), 12)\n self.optimizer.step()\n return {'loss': l.detach().cpu().numpy()}\n\n def on_train_epoch_end(self, train_outputs: List[dict]):\n outputs = collate_outputs(train_outputs)\n\n if self.is_ddp:\n losses_tr = [None for _ in range(dist.get_world_size())]\n dist.all_gather_object(losses_tr, outputs['loss'])\n loss_here = np.vstack(losses_tr).mean()\n else:\n loss_here = np.mean(outputs['loss'])\n\n self.logger.log('train_losses', loss_here, self.current_epoch)\n\n def on_validation_epoch_start(self):\n self.network.eval()\n\n def validation_step(self, batch: dict) -> dict:\n data = batch['data']\n target = batch['target']\n\n data = data.to(self.device, non_blocking=True)\n if isinstance(target, list):\n target = [i.to(self.device, non_blocking=True) for i in target]\n else:\n target = target.to(self.device, non_blocking=True)\n\n # Autocast is a little bitch.\n # If the device_type is 'cpu' then it's slow as heck and needs to be disabled.\n # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False)\n # So autocast will only be active if we have a cuda device.\n with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context():\n output = self.network(data)\n del data\n l = self.loss(output, target)\n\n # we only need the output with the highest output resolution\n output = output[0]\n target = target[0]\n\n # the following is needed for online evaluation. Fake dice (green line)\n axes = [0] + list(range(2, output.ndim))\n\n if self.label_manager.has_regions:\n predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long()\n else:\n # no need for softmax\n output_seg = output.argmax(1)[:, None]\n predicted_segmentation_onehot = torch.zeros(output.shape, device=output.device, dtype=torch.float32)\n predicted_segmentation_onehot.scatter_(1, output_seg, 1)\n del output_seg\n\n if self.label_manager.has_ignore_label:\n if not self.label_manager.has_regions:\n mask = (target != self.label_manager.ignore_label).float()\n # CAREFUL that you don't rely on target after this line!\n target[target == self.label_manager.ignore_label] = 0\n else:\n mask = 1 - target[:, -1:]\n # CAREFUL that you don't rely on target after this line!\n target = target[:, :-1]\n else:\n mask = None\n\n tp, fp, fn, _ = get_tp_fp_fn_tn(predicted_segmentation_onehot, target, axes=axes, mask=mask)\n\n tp_hard = tp.detach().cpu().numpy()\n fp_hard = fp.detach().cpu().numpy()\n fn_hard = fn.detach().cpu().numpy()\n if not self.label_manager.has_regions:\n # if we train with regions all segmentation heads predict some kind of foreground. In conventional\n # (softmax training) there needs tobe one output for the background. We are not interested in the\n # background Dice\n # [1:] in order to remove background\n tp_hard = tp_hard[1:]\n fp_hard = fp_hard[1:]\n fn_hard = fn_hard[1:]\n\n return {'loss': l.detach().cpu().numpy(), 'tp_hard': tp_hard, 'fp_hard': fp_hard, 'fn_hard': fn_hard}\n\n def on_validation_epoch_end(self, val_outputs: List[dict]):\n outputs_collated = collate_outputs(val_outputs)\n tp = np.sum(outputs_collated['tp_hard'], 0)\n fp = np.sum(outputs_collated['fp_hard'], 0)\n fn = np.sum(outputs_collated['fn_hard'], 0)\n\n if self.is_ddp:\n world_size = dist.get_world_size()\n\n tps = [None for _ in range(world_size)]\n dist.all_gather_object(tps, tp)\n tp = np.vstack([i[None] for i in tps]).sum(0)\n\n fps = [None for _ in range(world_size)]\n dist.all_gather_object(fps, fp)\n fp = np.vstack([i[None] for i in fps]).sum(0)\n\n fns = [None for _ in range(world_size)]\n dist.all_gather_object(fns, fn)\n fn = np.vstack([i[None] for i in fns]).sum(0)\n\n losses_val = [None for _ in range(world_size)]\n dist.all_gather_object(losses_val, outputs_collated['loss'])\n loss_here = np.vstack(losses_val).mean()\n else:\n loss_here = np.mean(outputs_collated['loss'])\n\n global_dc_per_class = [i for i in [2 * i / (2 * i + j + k) for i, j, k in\n zip(tp, fp, fn)]]\n mean_fg_dice = np.nanmean(global_dc_per_class)\n self.logger.log('mean_fg_dice', mean_fg_dice, self.current_epoch)\n self.logger.log('dice_per_class_or_region', global_dc_per_class, self.current_epoch)\n self.logger.log('val_losses', loss_here, self.current_epoch)\n\n def on_epoch_start(self):\n self.logger.log('epoch_start_timestamps', time(), self.current_epoch)\n\n def on_epoch_end(self):\n self.logger.log('epoch_end_timestamps', time(), self.current_epoch)\n\n # todo find a solution for this stupid shit\n self.print_to_log_file('train_loss', np.round(self.logger.my_fantastic_logging['train_losses'][-1], decimals=4))\n self.print_to_log_file('val_loss', np.round(self.logger.my_fantastic_logging['val_losses'][-1], decimals=4))\n self.print_to_log_file('Pseudo dice', [np.round(i, decimals=4) for i in\n self.logger.my_fantastic_logging['dice_per_class_or_region'][-1]])\n self.print_to_log_file(\n f\"Epoch time: {np.round(self.logger.my_fantastic_logging['epoch_end_timestamps'][-1] - self.logger.my_fantastic_logging['epoch_start_timestamps'][-1], decimals=2)} s\")\n\n # handling periodic checkpointing\n current_epoch = self.current_epoch\n if (current_epoch + 1) % self.save_every == 0 and current_epoch != (self.num_epochs - 1):\n self.save_checkpoint(join(self.output_folder, 'checkpoint_latest.pth'))\n\n # handle 'best' checkpointing. ema_fg_dice is computed by the logger and can be accessed like this\n if self._best_ema is None or self.logger.my_fantastic_logging['ema_fg_dice'][-1] > self._best_ema:\n self._best_ema = self.logger.my_fantastic_logging['ema_fg_dice'][-1]\n self.print_to_log_file(f\"Yayy! New best EMA pseudo Dice: {np.round(self._best_ema, decimals=4)}\")\n self.save_checkpoint(join(self.output_folder, 'checkpoint_best.pth'))\n\n if self.local_rank == 0:\n self.logger.plot_progress_png(self.output_folder)\n\n self.current_epoch += 1\n\n def save_checkpoint(self, filename: str) -> None:\n if self.local_rank == 0:\n if not self.disable_checkpointing:\n if self.is_ddp:\n mod = self.network.module\n else:\n mod = self.network\n if isinstance(mod, OptimizedModule):\n mod = mod._orig_mod\n\n checkpoint = {\n 'network_weights': mod.state_dict(),\n 'optimizer_state': self.optimizer.state_dict(),\n 'grad_scaler_state': self.grad_scaler.state_dict() if self.grad_scaler is not None else None,\n 'logging': self.logger.get_checkpoint(),\n '_best_ema': self._best_ema,\n 'current_epoch': self.current_epoch + 1,\n 'init_args': self.my_init_kwargs,\n 'trainer_name': self.__class__.__name__,\n 'inference_allowed_mirroring_axes': self.inference_allowed_mirroring_axes,\n }\n torch.save(checkpoint, filename)\n else:\n self.print_to_log_file('No checkpoint written, checkpointing is disabled')\n\n def load_checkpoint(self, filename_or_checkpoint: Union[dict, str]) -> None:\n if not self.was_initialized:\n self.initialize()\n\n if isinstance(filename_or_checkpoint, str):\n checkpoint = torch.load(filename_or_checkpoint, map_location=self.device)\n # if state dict comes from nn.DataParallel but we use non-parallel model here then the state dict keys do not\n # match. Use heuristic to make it match\n new_state_dict = {}\n for k, value in checkpoint['network_weights'].items():\n key = k\n if key not in self.network.state_dict().keys() and key.startswith('module.'):\n key = key[7:]\n new_state_dict[key] = value\n\n self.my_init_kwargs = checkpoint['init_args']\n self.current_epoch = checkpoint['current_epoch']\n self.logger.load_checkpoint(checkpoint['logging'])\n self._best_ema = checkpoint['_best_ema']\n self.inference_allowed_mirroring_axes = checkpoint[\n 'inference_allowed_mirroring_axes'] if 'inference_allowed_mirroring_axes' in checkpoint.keys() else self.inference_allowed_mirroring_axes\n\n # messing with state dict naming schemes. Facepalm.\n if self.is_ddp:\n if isinstance(self.network.module, OptimizedModule):\n self.network.module._orig_mod.load_state_dict(new_state_dict)\n else:\n self.network.module.load_state_dict(new_state_dict)\n else:\n if isinstance(self.network, OptimizedModule):\n self.network._orig_mod.load_state_dict(new_state_dict)\n else:\n self.network.load_state_dict(new_state_dict)\n self.optimizer.load_state_dict(checkpoint['optimizer_state'])\n if self.grad_scaler is not None:\n if checkpoint['grad_scaler_state'] is not None:\n self.grad_scaler.load_state_dict(checkpoint['grad_scaler_state'])\n\n def perform_actual_validation(self, save_probabilities: bool = False):\n self.set_deep_supervision_enabled(False)\n self.network.eval()\n\n predictor = nnUNetPredictor(tile_step_size=0.5, use_gaussian=True, use_mirroring=True,\n perform_everything_on_gpu=True, device=self.device, verbose=False,\n verbose_preprocessing=False, allow_tqdm=False)\n predictor.manual_initialization(self.network, self.plans_manager, self.configuration_manager, None,\n self.dataset_json, self.__class__.__name__,\n self.inference_allowed_mirroring_axes)\n\n with multiprocessing.get_context(\"spawn\").Pool(default_num_processes) as segmentation_export_pool:\n worker_list = [i for i in segmentation_export_pool._pool]\n validation_output_folder = join(self.output_folder, 'validation')\n maybe_mkdir_p(validation_output_folder)\n\n # we cannot use self.get_tr_and_val_datasets() here because we might be DDP and then we have to distribute\n # the validation keys across the workers.\n _, val_keys = self.do_split()\n if self.is_ddp:\n val_keys = val_keys[self.local_rank:: dist.get_world_size()]\n\n dataset_val = nnUNetDataset(self.preprocessed_dataset_folder, val_keys,\n folder_with_segs_from_previous_stage=self.folder_with_segs_from_previous_stage,\n num_images_properties_loading_threshold=0)\n\n next_stages = self.configuration_manager.next_stage_names\n\n if next_stages is not None:\n _ = [maybe_mkdir_p(join(self.output_folder_base, 'predicted_next_stage', n)) for n in next_stages]\n\n results = []\n\n for k in dataset_val.keys():\n proceed = not check_workers_alive_and_busy(segmentation_export_pool, worker_list, results,\n allowed_num_queued=2)\n while not proceed:\n sleep(0.1)\n proceed = not check_workers_alive_and_busy(segmentation_export_pool, worker_list, results,\n allowed_num_queued=2)\n\n self.print_to_log_file(f\"predicting {k}\")\n data, seg, properties = dataset_val.load_case(k)\n\n if self.is_cascaded:\n data = np.vstack((data, convert_labelmap_to_one_hot(seg[-1], self.label_manager.foreground_labels,\n output_dtype=data.dtype)))\n with warnings.catch_warnings():\n # ignore 'The given NumPy array is not writable' warning\n warnings.simplefilter(\"ignore\")\n data = torch.from_numpy(data)\n\n output_filename_truncated = join(validation_output_folder, k)\n\n try:\n prediction = predictor.predict_sliding_window_return_logits(data)\n except RuntimeError:\n predictor.perform_everything_on_gpu = False\n prediction = predictor.predict_sliding_window_return_logits(data)\n predictor.perform_everything_on_gpu = True\n\n prediction = prediction.cpu()\n\n # this needs to go into background processes\n results.append(\n segmentation_export_pool.starmap_async(\n export_prediction_from_logits, (\n (prediction, properties, self.configuration_manager, self.plans_manager,\n self.dataset_json, output_filename_truncated, save_probabilities),\n )\n )\n )\n # for debug purposes\n # export_prediction(prediction_for_export, properties, self.configuration, self.plans, self.dataset_json,\n # output_filename_truncated, save_probabilities)\n\n # if needed, export the softmax prediction for the next stage\n if next_stages is not None:\n for n in next_stages:\n next_stage_config_manager = self.plans_manager.get_configuration(n)\n expected_preprocessed_folder = join(nnUNet_preprocessed, self.plans_manager.dataset_name,\n next_stage_config_manager.data_identifier)\n\n try:\n # we do this so that we can use load_case and do not have to hard code how loading training cases is implemented\n tmp = nnUNetDataset(expected_preprocessed_folder, [k],\n num_images_properties_loading_threshold=0)\n d, s, p = tmp.load_case(k)\n except FileNotFoundError:\n self.print_to_log_file(\n f\"Predicting next stage {n} failed for case {k} because the preprocessed file is missing! \"\n f\"Run the preprocessing for this configuration first!\")\n continue\n\n target_shape = d.shape[1:]\n output_folder = join(self.output_folder_base, 'predicted_next_stage', n)\n output_file = join(output_folder, k + '.npz')\n\n # resample_and_save(prediction, target_shape, output_file, self.plans_manager, self.configuration_manager, properties,\n # self.dataset_json)\n results.append(segmentation_export_pool.starmap_async(\n resample_and_save, (\n (prediction, target_shape, output_file, self.plans_manager,\n self.configuration_manager,\n properties,\n self.dataset_json),\n )\n ))\n\n _ = [r.get() for r in results]\n\n if self.is_ddp:\n dist.barrier()\n\n if self.local_rank == 0:\n metrics = compute_metrics_on_folder(join(self.preprocessed_dataset_folder_base, 'gt_segmentations'),\n validation_output_folder,\n join(validation_output_folder, 'summary.json'),\n self.plans_manager.image_reader_writer_class(),\n self.dataset_json[\"file_ending\"],\n self.label_manager.foreground_regions if self.label_manager.has_regions else\n self.label_manager.foreground_labels,\n self.label_manager.ignore_label, chill=True)\n self.print_to_log_file(\"Validation complete\", also_print_to_console=True)\n self.print_to_log_file(\"Mean Validation Dice: \", (metrics['foreground_mean'][\"Dice\"]), also_print_to_console=True)\n\n self.set_deep_supervision_enabled(True)\n compute_gaussian.cache_clear()\n\n def run_training(self):\n self.on_train_start()\n\n for epoch in range(self.current_epoch, self.num_epochs):\n self.on_epoch_start()\n\n self.on_train_epoch_start()\n train_outputs = []\n for batch_id in range(self.num_iterations_per_epoch):\n train_outputs.append(self.train_step(next(self.dataloader_train)))\n self.on_train_epoch_end(train_outputs)\n\n with torch.no_grad():\n self.on_validation_epoch_start()\n val_outputs = []\n for batch_id in range(self.num_val_iterations_per_epoch):\n val_outputs.append(self.validation_step(next(self.dataloader_val)))\n self.on_validation_epoch_end(val_outputs)\n\n self.on_epoch_end()\n\n self.on_train_end()" }, { "identifier": "dummy_context", "path": "nnunetv2/utilities/helpers.py", "snippet": "class dummy_context(object):\n def __enter__(self):\n pass\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n pass" }, { "identifier": "determine_num_input_channels", "path": "nnunetv2/utilities/label_handling/label_handling.py", "snippet": "def determine_num_input_channels(plans_manager: PlansManager,\n configuration_or_config_manager: Union[str, ConfigurationManager],\n dataset_json: dict) -> int:\n if isinstance(configuration_or_config_manager, str):\n config_manager = plans_manager.get_configuration(configuration_or_config_manager)\n else:\n config_manager = configuration_or_config_manager\n\n label_manager = plans_manager.get_label_manager(dataset_json)\n num_modalities = len(dataset_json['modality']) if 'modality' in dataset_json.keys() else len(dataset_json['channel_names'])\n\n # cascade has different number of input channels\n if config_manager.previous_stage_name is not None:\n num_label_inputs = len(label_manager.foreground_labels)\n num_input_channels = num_modalities + num_label_inputs\n else:\n num_input_channels = num_modalities\n return num_input_channels" } ]
import torch from torch import autocast from nnunetv2.training.loss.compound_losses import DC_and_BCE_loss, DC_and_CE_loss from nnunetv2.training.loss.dice import get_tp_fp_fn_tn, MemoryEfficientSoftDiceLoss from nnunetv2.training.nnUNetTrainer.nnUNetTrainer import nnUNetTrainer from nnunetv2.utilities.helpers import dummy_context from nnunetv2.utilities.label_handling.label_handling import determine_num_input_channels from torch.nn.parallel import DistributedDataParallel as DDP
17,939
class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): def _build_loss(self): if self.label_manager.has_regions: loss = DC_and_BCE_loss({}, {'batch_dice': self.configuration_manager.batch_dice, 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp}, use_ignore_label=self.label_manager.ignore_label is not None, dice_class=MemoryEfficientSoftDiceLoss) else: loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice, 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1, ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss) return loss def _get_deep_supervision_scales(self): return None def initialize(self): if not self.was_initialized: self.num_input_channels = determine_num_input_channels(self.plans_manager, self.configuration_manager, self.dataset_json) self.network = self.build_network_architecture(self.plans_manager, self.dataset_json, self.configuration_manager, self.num_input_channels, enable_deep_supervision=False).to(self.device) self.optimizer, self.lr_scheduler = self.configure_optimizers() # if ddp, wrap in DDP wrapper if self.is_ddp: self.network = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.network) self.network = DDP(self.network, device_ids=[self.local_rank]) self.loss = self._build_loss() self.was_initialized = True else: raise RuntimeError("You have called self.initialize even though the trainer was already initialized. " "That should not happen.") def set_deep_supervision_enabled(self, enabled: bool): pass def validation_step(self, batch: dict) -> dict: data = batch['data'] target = batch['target'] data = data.to(self.device, non_blocking=True) if isinstance(target, list): target = [i.to(self.device, non_blocking=True) for i in target] else: target = target.to(self.device, non_blocking=True) self.optimizer.zero_grad(set_to_none=True) # Autocast is a little bitch. # If the device_type is 'cpu' then it's slow as heck and needs to be disabled. # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False) # So autocast will only be active if we have a cuda device. with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context(): output = self.network(data) del data l = self.loss(output, target) # the following is needed for online evaluation. Fake dice (green line) axes = [0] + list(range(2, output.ndim)) if self.label_manager.has_regions: predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long() else: # no need for softmax output_seg = output.argmax(1)[:, None] predicted_segmentation_onehot = torch.zeros(output.shape, device=output.device, dtype=torch.float32) predicted_segmentation_onehot.scatter_(1, output_seg, 1) del output_seg if self.label_manager.has_ignore_label: if not self.label_manager.has_regions: mask = (target != self.label_manager.ignore_label).float() # CAREFUL that you don't rely on target after this line! target[target == self.label_manager.ignore_label] = 0 else: mask = 1 - target[:, -1:] # CAREFUL that you don't rely on target after this line! target = target[:, :-1] else: mask = None
class nnUNetTrainerNoDeepSupervision(nnUNetTrainer): def _build_loss(self): if self.label_manager.has_regions: loss = DC_and_BCE_loss({}, {'batch_dice': self.configuration_manager.batch_dice, 'do_bg': True, 'smooth': 1e-5, 'ddp': self.is_ddp}, use_ignore_label=self.label_manager.ignore_label is not None, dice_class=MemoryEfficientSoftDiceLoss) else: loss = DC_and_CE_loss({'batch_dice': self.configuration_manager.batch_dice, 'smooth': 1e-5, 'do_bg': False, 'ddp': self.is_ddp}, {}, weight_ce=1, weight_dice=1, ignore_label=self.label_manager.ignore_label, dice_class=MemoryEfficientSoftDiceLoss) return loss def _get_deep_supervision_scales(self): return None def initialize(self): if not self.was_initialized: self.num_input_channels = determine_num_input_channels(self.plans_manager, self.configuration_manager, self.dataset_json) self.network = self.build_network_architecture(self.plans_manager, self.dataset_json, self.configuration_manager, self.num_input_channels, enable_deep_supervision=False).to(self.device) self.optimizer, self.lr_scheduler = self.configure_optimizers() # if ddp, wrap in DDP wrapper if self.is_ddp: self.network = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.network) self.network = DDP(self.network, device_ids=[self.local_rank]) self.loss = self._build_loss() self.was_initialized = True else: raise RuntimeError("You have called self.initialize even though the trainer was already initialized. " "That should not happen.") def set_deep_supervision_enabled(self, enabled: bool): pass def validation_step(self, batch: dict) -> dict: data = batch['data'] target = batch['target'] data = data.to(self.device, non_blocking=True) if isinstance(target, list): target = [i.to(self.device, non_blocking=True) for i in target] else: target = target.to(self.device, non_blocking=True) self.optimizer.zero_grad(set_to_none=True) # Autocast is a little bitch. # If the device_type is 'cpu' then it's slow as heck and needs to be disabled. # If the device_type is 'mps' then it will complain that mps is not implemented, even if enabled=False is set. Whyyyyyyy. (this is why we don't make use of enabled=False) # So autocast will only be active if we have a cuda device. with autocast(self.device.type, enabled=True) if self.device.type == 'cuda' else dummy_context(): output = self.network(data) del data l = self.loss(output, target) # the following is needed for online evaluation. Fake dice (green line) axes = [0] + list(range(2, output.ndim)) if self.label_manager.has_regions: predicted_segmentation_onehot = (torch.sigmoid(output) > 0.5).long() else: # no need for softmax output_seg = output.argmax(1)[:, None] predicted_segmentation_onehot = torch.zeros(output.shape, device=output.device, dtype=torch.float32) predicted_segmentation_onehot.scatter_(1, output_seg, 1) del output_seg if self.label_manager.has_ignore_label: if not self.label_manager.has_regions: mask = (target != self.label_manager.ignore_label).float() # CAREFUL that you don't rely on target after this line! target[target == self.label_manager.ignore_label] = 0 else: mask = 1 - target[:, -1:] # CAREFUL that you don't rely on target after this line! target = target[:, :-1] else: mask = None
tp, fp, fn, _ = get_tp_fp_fn_tn(predicted_segmentation_onehot, target, axes=axes, mask=mask)
2
2023-12-04 19:43:14+00:00
24k
opisaac9001/TTS-With-ooba-and-voice
TTS/tts/models/tortoise.py
[ { "identifier": "TorchMelSpectrogram", "path": "TTS/tts/layers/tortoise/arch_utils.py", "snippet": "class TorchMelSpectrogram(nn.Module):\n def __init__(\n self,\n filter_length=1024,\n hop_length=256,\n win_length=1024,\n n_mel_channels=80,\n mel_fmin=0,\n mel_fmax=8000,\n sampling_rate=22050,\n normalize=False,\n mel_norm_file=DEFAULT_MEL_NORM_FILE,\n ):\n super().__init__()\n # These are the default tacotron values for the MEL spectrogram.\n self.filter_length = filter_length\n self.hop_length = hop_length\n self.win_length = win_length\n self.n_mel_channels = n_mel_channels\n self.mel_fmin = mel_fmin\n self.mel_fmax = mel_fmax\n self.sampling_rate = sampling_rate\n self.mel_stft = torchaudio.transforms.MelSpectrogram(\n n_fft=self.filter_length,\n hop_length=self.hop_length,\n win_length=self.win_length,\n power=2,\n normalized=normalize,\n sample_rate=self.sampling_rate,\n f_min=self.mel_fmin,\n f_max=self.mel_fmax,\n n_mels=self.n_mel_channels,\n norm=\"slaney\",\n )\n self.mel_norm_file = mel_norm_file\n if self.mel_norm_file is not None:\n with fsspec.open(self.mel_norm_file) as f:\n self.mel_norms = torch.load(f)\n else:\n self.mel_norms = None\n\n def forward(self, inp):\n if (\n len(inp.shape) == 3\n ): # Automatically squeeze out the channels dimension if it is present (assuming mono-audio)\n inp = inp.squeeze(1)\n assert len(inp.shape) == 2\n self.mel_stft = self.mel_stft.to(inp.device)\n mel = self.mel_stft(inp)\n # Perform dynamic range compression\n mel = torch.log(torch.clamp(mel, min=1e-5))\n if self.mel_norms is not None:\n self.mel_norms = self.mel_norms.to(mel.device)\n mel = mel / self.mel_norms.unsqueeze(0).unsqueeze(-1)\n return mel" }, { "identifier": "denormalize_tacotron_mel", "path": "TTS/tts/layers/tortoise/audio_utils.py", "snippet": "def denormalize_tacotron_mel(norm_mel):\n return ((norm_mel + 1) / 2) * (TACOTRON_MEL_MAX - TACOTRON_MEL_MIN) + TACOTRON_MEL_MIN" }, { "identifier": "load_voice", "path": "TTS/tts/layers/tortoise/audio_utils.py", "snippet": "def load_voice(voice: str, extra_voice_dirs: List[str] = []):\n if voice == \"random\":\n return None, None\n\n voices = get_voices(extra_voice_dirs)\n paths = voices[voice]\n if len(paths) == 1 and paths[0].endswith(\".pth\"):\n return None, torch.load(paths[0])\n else:\n conds = []\n for cond_path in paths:\n c = load_required_audio(cond_path)\n conds.append(c)\n return conds, None" }, { "identifier": "wav_to_univnet_mel", "path": "TTS/tts/layers/tortoise/audio_utils.py", "snippet": "def wav_to_univnet_mel(wav, do_normalization=False, device=\"cuda\"):\n stft = TorchSTFT(\n n_fft=1024,\n hop_length=256,\n win_length=1024,\n use_mel=True,\n n_mels=100,\n sample_rate=24000,\n mel_fmin=0,\n mel_fmax=12000,\n )\n stft = stft.to(device)\n mel = stft(wav)\n mel = dynamic_range_compression(mel)\n if do_normalization:\n mel = normalize_tacotron_mel(mel)\n return mel" }, { "identifier": "UnifiedVoice", "path": "TTS/tts/layers/tortoise/autoregressive.py", "snippet": "class UnifiedVoice(nn.Module):\n def __init__(\n self,\n layers=8,\n model_dim=512,\n heads=8,\n max_text_tokens=120,\n max_mel_tokens=250,\n max_conditioning_inputs=1,\n mel_length_compression=1024,\n number_text_tokens=256,\n start_text_token=None,\n number_mel_codes=8194,\n start_mel_token=8192,\n stop_mel_token=8193,\n train_solo_embeddings=False,\n use_mel_codes_as_input=True,\n checkpointing=True,\n types=1,\n ):\n \"\"\"\n Args:\n layers: Number of layers in transformer stack.\n model_dim: Operating dimensions of the transformer\n heads: Number of transformer heads. Must be divisible by model_dim. Recommend model_dim//64\n max_text_tokens: Maximum number of text tokens that will be encountered by model.\n max_mel_tokens: Maximum number of MEL tokens that will be encountered by model.\n max_conditioning_inputs: Maximum number of conditioning inputs provided to the model. If (1), conditioning input can be of format (b,80,s), otherwise (b,n,80,s).\n mel_length_compression: The factor between <number_input_samples> and <mel_tokens>. Used to compute MEL code padding given wav input length.\n number_text_tokens:\n start_text_token:\n stop_text_token:\n number_mel_codes:\n start_mel_token:\n stop_mel_token:\n train_solo_embeddings:\n use_mel_codes_as_input:\n checkpointing:\n \"\"\"\n super().__init__()\n\n self.number_text_tokens = number_text_tokens\n self.start_text_token = number_text_tokens * types if start_text_token is None else start_text_token\n self.stop_text_token = 0\n self.number_mel_codes = number_mel_codes\n self.start_mel_token = start_mel_token\n self.stop_mel_token = stop_mel_token\n self.layers = layers\n self.heads = heads\n self.max_mel_tokens = max_mel_tokens\n self.max_text_tokens = max_text_tokens\n self.model_dim = model_dim\n self.max_conditioning_inputs = max_conditioning_inputs\n self.mel_length_compression = mel_length_compression\n self.conditioning_encoder = ConditioningEncoder(80, model_dim, num_attn_heads=heads)\n self.text_embedding = nn.Embedding(self.number_text_tokens * types + 1, model_dim)\n if use_mel_codes_as_input:\n self.mel_embedding = nn.Embedding(self.number_mel_codes, model_dim)\n else:\n self.mel_embedding = MelEncoder(model_dim, resblocks_per_reduction=1)\n (\n self.gpt,\n self.mel_pos_embedding,\n self.text_pos_embedding,\n self.mel_layer_pos_embedding,\n self.text_layer_pos_embedding,\n ) = build_hf_gpt_transformer(\n layers,\n model_dim,\n heads,\n self.max_mel_tokens + 2 + self.max_conditioning_inputs,\n self.max_text_tokens + 2,\n checkpointing,\n )\n if train_solo_embeddings:\n self.mel_solo_embedding = nn.Parameter(torch.randn(1, 1, model_dim) * 0.02, requires_grad=True)\n self.text_solo_embedding = nn.Parameter(torch.randn(1, 1, model_dim) * 0.02, requires_grad=True)\n else:\n self.mel_solo_embedding = 0\n self.text_solo_embedding = 0\n\n self.final_norm = nn.LayerNorm(model_dim)\n self.text_head = nn.Linear(model_dim, self.number_text_tokens * types + 1)\n self.mel_head = nn.Linear(model_dim, self.number_mel_codes)\n\n # Initialize the embeddings per the GPT-2 scheme\n embeddings = [self.text_embedding]\n if use_mel_codes_as_input:\n embeddings.append(self.mel_embedding)\n for module in embeddings:\n module.weight.data.normal_(mean=0.0, std=0.02)\n\n def post_init_gpt2_config(self, kv_cache=True):\n seq_length = self.max_mel_tokens + self.max_text_tokens + 2\n gpt_config = GPT2Config(\n vocab_size=self.max_mel_tokens,\n n_positions=seq_length,\n n_ctx=seq_length,\n n_embd=self.model_dim,\n n_layer=self.layers,\n n_head=self.heads,\n gradient_checkpointing=False,\n use_cache=True,\n )\n self.inference_model = GPT2InferenceModel(\n gpt_config,\n self.gpt,\n self.mel_pos_embedding,\n self.mel_embedding,\n self.final_norm,\n self.mel_head,\n kv_cache=kv_cache,\n )\n # self.inference_model = PrunedGPT2InferenceModel(gpt_config, self.gpt, self.mel_pos_embedding, self.mel_embedding, self.final_norm, self.mel_head)\n self.gpt.wte = self.mel_embedding\n # self.inference_model.save_pretrained(\"\")\n\n def build_aligned_inputs_and_targets(self, input, start_token, stop_token):\n inp = F.pad(input, (1, 0), value=start_token)\n tar = F.pad(input, (0, 1), value=stop_token)\n return inp, tar\n\n def set_mel_padding(self, mel_input_tokens, wav_lengths):\n \"\"\"\n Given mel tokens that are derived from a padded audio clip and the actual lengths of each batch element in\n that audio clip, reformats the tokens with STOP_MEL_TOKEN in place of the zero padding. This is required\n preformatting to create a working TTS model.\n \"\"\"\n # Set padding areas within MEL (currently it is coded with the MEL code for <zero>).\n mel_lengths = torch.div(wav_lengths, self.mel_length_compression, rounding_mode=\"trunc\")\n for b in range(len(mel_lengths)):\n actual_end = (\n mel_lengths[b] + 1\n ) # Due to the convolutional nature of how these tokens are generated, it would be best if the model predicts a token past the actual last token.\n if actual_end < mel_input_tokens.shape[-1]:\n mel_input_tokens[b, actual_end:] = self.stop_mel_token\n return mel_input_tokens\n\n def get_logits(\n self,\n speech_conditioning_inputs,\n first_inputs,\n first_head,\n second_inputs=None,\n second_head=None,\n get_attns=False,\n return_latent=False,\n ):\n if second_inputs is not None:\n emb = torch.cat([speech_conditioning_inputs, first_inputs, second_inputs], dim=1)\n else:\n emb = torch.cat([speech_conditioning_inputs, first_inputs], dim=1)\n\n gpt_out = self.gpt(inputs_embeds=emb, return_dict=True, output_attentions=get_attns)\n if get_attns:\n return gpt_out.attentions\n\n enc = gpt_out.last_hidden_state[:, 1:] # The first logit is tied to the speech_conditioning_input\n enc = self.final_norm(enc)\n\n if return_latent:\n return (\n enc[\n :,\n speech_conditioning_inputs.shape[1] : speech_conditioning_inputs.shape[1] + first_inputs.shape[1],\n ],\n enc[:, -second_inputs.shape[1] :],\n )\n\n first_logits = enc[:, : first_inputs.shape[1]]\n first_logits = first_head(first_logits)\n first_logits = first_logits.permute(0, 2, 1)\n if second_inputs is not None:\n second_logits = enc[:, -second_inputs.shape[1] :]\n second_logits = second_head(second_logits)\n second_logits = second_logits.permute(0, 2, 1)\n return first_logits, second_logits\n else:\n return first_logits\n\n def get_conditioning(self, speech_conditioning_input):\n speech_conditioning_input = (\n speech_conditioning_input.unsqueeze(1)\n if len(speech_conditioning_input.shape) == 3\n else speech_conditioning_input\n )\n conds = []\n for j in range(speech_conditioning_input.shape[1]):\n conds.append(self.conditioning_encoder(speech_conditioning_input[:, j]))\n conds = torch.stack(conds, dim=1)\n conds = conds.mean(dim=1)\n return conds\n\n def forward(\n self,\n speech_conditioning_latent,\n text_inputs,\n text_lengths,\n mel_codes,\n wav_lengths,\n types=None,\n text_first=True,\n raw_mels=None,\n return_attentions=False,\n return_latent=False,\n clip_inputs=True,\n ):\n \"\"\"\n Forward pass that uses both text and voice in either text conditioning mode or voice conditioning mode\n (actuated by `text_first`).\n\n speech_conditioning_input: MEL float tensor, (b,1024)\n text_inputs: long tensor, (b,t)\n text_lengths: long tensor, (b,)\n mel_inputs: long tensor, (b,m)\n wav_lengths: long tensor, (b,)\n raw_mels: MEL float tensor (b,80,s)\n\n If return_attentions is specified, only logits are returned.\n If return_latent is specified, loss & logits are not computed or returned. Only the predicted latents are returned.\n If clip_inputs is True, the inputs will be clipped to the smallest input size across each input modality.\n \"\"\"\n # Types are expressed by expanding the text embedding space.\n if types is not None:\n text_inputs = text_inputs * (1 + types).unsqueeze(-1)\n\n if clip_inputs:\n # This model will receive micro-batches with a ton of padding for both the text and MELs. Ameliorate this by\n # chopping the inputs by the maximum actual length.\n max_text_len = text_lengths.max()\n text_inputs = text_inputs[:, :max_text_len]\n max_mel_len = wav_lengths.max() // self.mel_length_compression\n mel_codes = mel_codes[:, :max_mel_len]\n if raw_mels is not None:\n raw_mels = raw_mels[:, :, : max_mel_len * 4]\n mel_codes = self.set_mel_padding(mel_codes, wav_lengths)\n text_inputs = F.pad(text_inputs, (0, 1), value=self.stop_text_token)\n mel_codes = F.pad(mel_codes, (0, 1), value=self.stop_mel_token)\n\n conds = speech_conditioning_latent.unsqueeze(1)\n text_inputs, text_targets = self.build_aligned_inputs_and_targets(\n text_inputs, self.start_text_token, self.stop_text_token\n )\n text_emb = self.text_embedding(text_inputs) + self.text_pos_embedding(text_inputs)\n mel_codes, mel_targets = self.build_aligned_inputs_and_targets(\n mel_codes, self.start_mel_token, self.stop_mel_token\n )\n if raw_mels is not None:\n mel_inp = F.pad(raw_mels, (0, 8))\n else:\n mel_inp = mel_codes\n mel_emb = self.mel_embedding(mel_inp)\n mel_emb = mel_emb + self.mel_pos_embedding(mel_codes)\n\n if text_first:\n text_logits, mel_logits = self.get_logits(\n conds,\n text_emb,\n self.text_head,\n mel_emb,\n self.mel_head,\n get_attns=return_attentions,\n return_latent=return_latent,\n )\n if return_latent:\n return mel_logits[\n :, :-2\n ] # Despite the name, these are not logits. Strip off the two tokens added by this forward pass.\n else:\n mel_logits, text_logits = self.get_logits(\n conds,\n mel_emb,\n self.mel_head,\n text_emb,\n self.text_head,\n get_attns=return_attentions,\n return_latent=return_latent,\n )\n if return_latent:\n return text_logits[\n :, :-2\n ] # Despite the name, these are not logits. Strip off the two tokens added by this forward pass.\n\n if return_attentions:\n return mel_logits\n loss_text = F.cross_entropy(text_logits, text_targets.long())\n loss_mel = F.cross_entropy(mel_logits, mel_targets.long())\n return loss_text.mean(), loss_mel.mean(), mel_logits\n\n def inference_speech(\n self,\n speech_conditioning_latent,\n text_inputs,\n input_tokens=None,\n num_return_sequences=1,\n max_generate_length=None,\n typical_sampling=False,\n typical_mass=0.9,\n **hf_generate_kwargs,\n ):\n text_inputs = F.pad(text_inputs, (0, 1), value=self.stop_text_token)\n text_inputs, text_targets = self.build_aligned_inputs_and_targets(\n text_inputs, self.start_text_token, self.stop_text_token\n )\n text_emb = self.text_embedding(text_inputs) + self.text_pos_embedding(text_inputs)\n\n conds = speech_conditioning_latent.unsqueeze(1)\n emb = torch.cat([conds, text_emb], dim=1)\n self.inference_model.store_mel_emb(emb)\n\n fake_inputs = torch.full(\n (\n emb.shape[0],\n conds.shape[1] + emb.shape[1],\n ),\n fill_value=1,\n dtype=torch.long,\n device=text_inputs.device,\n )\n fake_inputs[:, -1] = self.start_mel_token\n trunc_index = fake_inputs.shape[1]\n if input_tokens is None:\n inputs = fake_inputs\n else:\n assert (\n num_return_sequences % input_tokens.shape[0] == 0\n ), \"The number of return sequences must be divisible by the number of input sequences\"\n fake_inputs = fake_inputs.repeat(num_return_sequences, 1)\n input_tokens = input_tokens.repeat(num_return_sequences // input_tokens.shape[0], 1)\n inputs = torch.cat([fake_inputs, input_tokens], dim=1)\n\n logits_processor = (\n LogitsProcessorList([TypicalLogitsWarper(mass=typical_mass)]) if typical_sampling else LogitsProcessorList()\n ) # TODO disable this\n max_length = (\n trunc_index + self.max_mel_tokens - 1 if max_generate_length is None else trunc_index + max_generate_length\n )\n gen = self.inference_model.generate(\n inputs,\n bos_token_id=self.start_mel_token,\n pad_token_id=self.stop_mel_token,\n eos_token_id=self.stop_mel_token,\n max_length=max_length,\n logits_processor=logits_processor,\n num_return_sequences=num_return_sequences,\n **hf_generate_kwargs,\n )\n return gen[:, trunc_index:]" }, { "identifier": "AudioMiniEncoderWithClassifierHead", "path": "TTS/tts/layers/tortoise/classifier.py", "snippet": "class AudioMiniEncoderWithClassifierHead(nn.Module):\n def __init__(self, classes, distribute_zero_label=True, **kwargs):\n super().__init__()\n self.enc = AudioMiniEncoder(**kwargs)\n self.head = nn.Linear(self.enc.dim, classes)\n self.num_classes = classes\n self.distribute_zero_label = distribute_zero_label\n\n def forward(self, x, labels=None):\n h = self.enc(x)\n logits = self.head(h)\n if labels is None:\n return logits\n else:\n if self.distribute_zero_label:\n oh_labels = nn.functional.one_hot(labels, num_classes=self.num_classes)\n zeros_indices = (labels == 0).unsqueeze(-1)\n # Distribute 20% of the probability mass on all classes when zero is specified, to compensate for dataset noise.\n zero_extra_mass = torch.full_like(\n oh_labels,\n dtype=torch.float,\n fill_value=0.2 / (self.num_classes - 1),\n )\n zero_extra_mass[:, 0] = -0.2\n zero_extra_mass = zero_extra_mass * zeros_indices\n oh_labels = oh_labels + zero_extra_mass\n else:\n oh_labels = labels\n loss = nn.functional.cross_entropy(logits, oh_labels)\n return loss" }, { "identifier": "CLVP", "path": "TTS/tts/layers/tortoise/clvp.py", "snippet": "class CLVP(nn.Module):\n \"\"\"\n CLIP model retrofitted for performing contrastive evaluation between tokenized audio data and the corresponding\n transcribed text.\n\n Originally from https://github.com/lucidrains/DALLE-pytorch/blob/main/dalle_pytorch/dalle_pytorch.py\n \"\"\"\n\n def __init__(\n self,\n *,\n dim_text=512,\n dim_speech=512,\n dim_latent=512,\n num_text_tokens=256,\n text_enc_depth=6,\n text_seq_len=120,\n text_heads=8,\n num_speech_tokens=8192,\n speech_enc_depth=6,\n speech_heads=8,\n speech_seq_len=250,\n text_mask_percentage=0,\n voice_mask_percentage=0,\n wav_token_compression=1024,\n use_xformers=False,\n ):\n super().__init__()\n self.text_emb = nn.Embedding(num_text_tokens, dim_text)\n self.to_text_latent = nn.Linear(dim_text, dim_latent, bias=False)\n\n self.speech_emb = nn.Embedding(num_speech_tokens, dim_speech)\n self.to_speech_latent = nn.Linear(dim_speech, dim_latent, bias=False)\n\n if use_xformers:\n self.text_transformer = CheckpointedXTransformerEncoder(\n needs_permute=False,\n exit_permute=False,\n max_seq_len=-1,\n attn_layers=Encoder(\n dim=dim_text,\n depth=text_enc_depth,\n heads=text_heads,\n ff_dropout=0.1,\n ff_mult=2,\n attn_dropout=0.1,\n use_rmsnorm=True,\n ff_glu=True,\n rotary_pos_emb=True,\n ),\n )\n self.speech_transformer = CheckpointedXTransformerEncoder(\n needs_permute=False,\n exit_permute=False,\n max_seq_len=-1,\n attn_layers=Encoder(\n dim=dim_speech,\n depth=speech_enc_depth,\n heads=speech_heads,\n ff_dropout=0.1,\n ff_mult=2,\n attn_dropout=0.1,\n use_rmsnorm=True,\n ff_glu=True,\n rotary_pos_emb=True,\n ),\n )\n else:\n self.text_transformer = Transformer(\n causal=False, seq_len=text_seq_len, dim=dim_text, depth=text_enc_depth, heads=text_heads\n )\n self.speech_transformer = Transformer(\n causal=False, seq_len=speech_seq_len, dim=dim_speech, depth=speech_enc_depth, heads=speech_heads\n )\n\n self.temperature = nn.Parameter(torch.tensor(1.0))\n self.text_mask_percentage = text_mask_percentage\n self.voice_mask_percentage = voice_mask_percentage\n self.wav_token_compression = wav_token_compression\n self.xformers = use_xformers\n if not use_xformers:\n self.text_pos_emb = nn.Embedding(text_seq_len, dim_text)\n self.speech_pos_emb = nn.Embedding(num_speech_tokens, dim_speech)\n\n def forward(self, text, speech_tokens, return_loss=False):\n b, device = text.shape[0], text.device\n if self.training:\n text_mask = torch.rand_like(text.float()) > self.text_mask_percentage\n voice_mask = torch.rand_like(speech_tokens.float()) > self.voice_mask_percentage\n else:\n text_mask = torch.ones_like(text.float()).bool()\n voice_mask = torch.ones_like(speech_tokens.float()).bool()\n\n text_emb = self.text_emb(text)\n speech_emb = self.speech_emb(speech_tokens)\n\n if not self.xformers:\n text_emb += self.text_pos_emb(torch.arange(text.shape[1], device=device))\n speech_emb += self.speech_pos_emb(torch.arange(speech_emb.shape[1], device=device))\n\n enc_text = self.text_transformer(text_emb, mask=text_mask)\n enc_speech = self.speech_transformer(speech_emb, mask=voice_mask)\n\n text_latents = masked_mean(enc_text, text_mask, dim=1)\n speech_latents = masked_mean(enc_speech, voice_mask, dim=1)\n\n text_latents = self.to_text_latent(text_latents)\n speech_latents = self.to_speech_latent(speech_latents)\n\n text_latents, speech_latents = map(lambda t: F.normalize(t, p=2, dim=-1), (text_latents, speech_latents))\n\n temp = self.temperature.exp()\n\n if not return_loss:\n sim = einsum(\"n d, n d -> n\", text_latents, speech_latents) * temp\n return sim\n\n sim = einsum(\"i d, j d -> i j\", text_latents, speech_latents) * temp\n labels = torch.arange(b, device=device)\n loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) / 2\n return loss" }, { "identifier": "SpacedDiffusion", "path": "TTS/tts/layers/tortoise/diffusion.py", "snippet": "class SpacedDiffusion(GaussianDiffusion):\n \"\"\"\n A diffusion process which can skip steps in a base diffusion process.\n\n :param use_timesteps: a collection (sequence or set) of timesteps from the\n original diffusion process to retain.\n :param kwargs: the kwargs to create the base diffusion process.\n \"\"\"\n\n def __init__(self, use_timesteps, **kwargs):\n self.use_timesteps = set(use_timesteps)\n self.timestep_map = []\n self.original_num_steps = len(kwargs[\"betas\"])\n base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa\n last_alpha_cumprod = 1.0\n new_betas = []\n for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod):\n if i in self.use_timesteps:\n new_betas.append(1 - alpha_cumprod / last_alpha_cumprod)\n last_alpha_cumprod = alpha_cumprod\n self.timestep_map.append(i)\n kwargs[\"betas\"] = np.array(new_betas)\n super().__init__(**kwargs)\n\n def p_mean_variance(self, model, *args, **kwargs): # pylint: disable=signature-differs\n return super().p_mean_variance(self._wrap_model(model), *args, **kwargs)\n\n def training_losses(self, model, *args, **kwargs): # pylint: disable=signature-differs\n return super().training_losses(self._wrap_model(model), *args, **kwargs)\n\n def autoregressive_training_losses(self, model, *args, **kwargs): # pylint: disable=signature-differs\n return super().autoregressive_training_losses(self._wrap_model(model, True), *args, **kwargs)\n\n def condition_mean(self, cond_fn, *args, **kwargs):\n return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs)\n\n def condition_score(self, cond_fn, *args, **kwargs):\n return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs)\n\n def _wrap_model(self, model, autoregressive=False):\n if isinstance(model, _WrappedModel) or isinstance(model, _WrappedAutoregressiveModel):\n return model\n mod = _WrappedAutoregressiveModel if autoregressive else _WrappedModel\n return mod(model, self.timestep_map, self.rescale_timesteps, self.original_num_steps)\n\n def _scale_timesteps(self, t):\n # Scaling is done by the wrapped model.\n return t" }, { "identifier": "get_named_beta_schedule", "path": "TTS/tts/layers/tortoise/diffusion.py", "snippet": "def get_named_beta_schedule(schedule_name, num_diffusion_timesteps):\n \"\"\"\n Get a pre-defined beta schedule for the given name.\n\n The beta schedule library consists of beta schedules which remain similar\n in the limit of num_diffusion_timesteps.\n Beta schedules may be added, but should not be removed or changed once\n they are committed to maintain backwards compatibility.\n \"\"\"\n if schedule_name == \"linear\":\n # Linear schedule from Ho et al, extended to work for any number of\n # diffusion steps.\n scale = 1000 / num_diffusion_timesteps\n beta_start = scale * 0.0001\n beta_end = scale * 0.02\n return np.linspace(beta_start, beta_end, num_diffusion_timesteps, dtype=np.float64)\n elif schedule_name == \"cosine\":\n return betas_for_alpha_bar(\n num_diffusion_timesteps,\n lambda t: math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2,\n )\n else:\n raise NotImplementedError(f\"unknown beta schedule: {schedule_name}\")" }, { "identifier": "space_timesteps", "path": "TTS/tts/layers/tortoise/diffusion.py", "snippet": "def space_timesteps(num_timesteps, section_counts):\n \"\"\"\n Create a list of timesteps to use from an original diffusion process,\n given the number of timesteps we want to take from equally-sized portions\n of the original process.\n\n For example, if there's 300 timesteps and the section counts are [10,15,20]\n then the first 100 timesteps are strided to be 10 timesteps, the second 100\n are strided to be 15 timesteps, and the final 100 are strided to be 20.\n\n If the stride is a string starting with \"ddim\", then the fixed striding\n from the DDIM paper is used, and only one section is allowed.\n\n :param num_timesteps: the number of diffusion steps in the original\n process to divide up.\n :param section_counts: either a list of numbers, or a string containing\n comma-separated numbers, indicating the step count\n per section. As a special case, use \"ddimN\" where N\n is a number of steps to use the striding from the\n DDIM paper.\n :return: a set of diffusion steps from the original process to use.\n \"\"\"\n if isinstance(section_counts, str):\n if section_counts.startswith(\"ddim\"):\n desired_count = int(section_counts[len(\"ddim\") :])\n for i in range(1, num_timesteps):\n if len(range(0, num_timesteps, i)) == desired_count:\n return set(range(0, num_timesteps, i))\n raise ValueError(f\"cannot create exactly {num_timesteps} steps with an integer stride\")\n section_counts = [int(x) for x in section_counts.split(\",\")]\n size_per = num_timesteps // len(section_counts)\n extra = num_timesteps % len(section_counts)\n start_idx = 0\n all_steps = []\n for i, section_count in enumerate(section_counts):\n size = size_per + (1 if i < extra else 0)\n if size < section_count:\n raise ValueError(f\"cannot divide section of {size} steps into {section_count}\")\n if section_count <= 1:\n frac_stride = 1\n else:\n frac_stride = (size - 1) / (section_count - 1)\n cur_idx = 0.0\n taken_steps = []\n for _ in range(section_count):\n taken_steps.append(start_idx + round(cur_idx))\n cur_idx += frac_stride\n all_steps += taken_steps\n start_idx += size\n return set(all_steps)" }, { "identifier": "DiffusionTts", "path": "TTS/tts/layers/tortoise/diffusion_decoder.py", "snippet": "class DiffusionTts(nn.Module):\n def __init__(\n self,\n model_channels=512,\n num_layers=8,\n in_channels=100,\n in_latent_channels=512,\n in_tokens=8193,\n out_channels=200, # mean and variance\n dropout=0,\n use_fp16=False,\n num_heads=16,\n # Parameters for regularization.\n layer_drop=0.1,\n unconditioned_percentage=0.1, # This implements a mechanism similar to what is used in classifier-free training.\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.model_channels = model_channels\n self.out_channels = out_channels\n self.dropout = dropout\n self.num_heads = num_heads\n self.unconditioned_percentage = unconditioned_percentage\n self.enable_fp16 = use_fp16\n self.layer_drop = layer_drop\n\n self.inp_block = nn.Conv1d(in_channels, model_channels, 3, 1, 1)\n self.time_embed = nn.Sequential(\n nn.Linear(model_channels, model_channels),\n nn.SiLU(),\n nn.Linear(model_channels, model_channels),\n )\n\n # Either code_converter or latent_converter is used, depending on what type of conditioning data is fed.\n # This model is meant to be able to be trained on both for efficiency purposes - it is far less computationally\n # complex to generate tokens, while generating latents will normally mean propagating through a deep autoregressive\n # transformer network.\n self.code_embedding = nn.Embedding(in_tokens, model_channels)\n self.code_converter = nn.Sequential(\n AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),\n AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),\n AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),\n )\n self.code_norm = normalization(model_channels)\n self.latent_conditioner = nn.Sequential(\n nn.Conv1d(in_latent_channels, model_channels, 3, padding=1),\n AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),\n AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),\n AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),\n AttentionBlock(model_channels, num_heads, relative_pos_embeddings=True),\n )\n self.contextual_embedder = nn.Sequential(\n nn.Conv1d(in_channels, model_channels, 3, padding=1, stride=2),\n nn.Conv1d(model_channels, model_channels * 2, 3, padding=1, stride=2),\n AttentionBlock(\n model_channels * 2,\n num_heads,\n relative_pos_embeddings=True,\n do_checkpoint=False,\n ),\n AttentionBlock(\n model_channels * 2,\n num_heads,\n relative_pos_embeddings=True,\n do_checkpoint=False,\n ),\n AttentionBlock(\n model_channels * 2,\n num_heads,\n relative_pos_embeddings=True,\n do_checkpoint=False,\n ),\n AttentionBlock(\n model_channels * 2,\n num_heads,\n relative_pos_embeddings=True,\n do_checkpoint=False,\n ),\n AttentionBlock(\n model_channels * 2,\n num_heads,\n relative_pos_embeddings=True,\n do_checkpoint=False,\n ),\n )\n self.unconditioned_embedding = nn.Parameter(torch.randn(1, model_channels, 1))\n self.conditioning_timestep_integrator = TimestepEmbedSequential(\n DiffusionLayer(model_channels, dropout, num_heads),\n DiffusionLayer(model_channels, dropout, num_heads),\n DiffusionLayer(model_channels, dropout, num_heads),\n )\n\n self.integrating_conv = nn.Conv1d(model_channels * 2, model_channels, kernel_size=1)\n self.mel_head = nn.Conv1d(model_channels, in_channels, kernel_size=3, padding=1)\n\n self.layers = nn.ModuleList(\n [DiffusionLayer(model_channels, dropout, num_heads) for _ in range(num_layers)]\n + [\n ResBlock(\n model_channels,\n model_channels,\n dropout,\n dims=1,\n use_scale_shift_norm=True,\n )\n for _ in range(3)\n ]\n )\n\n self.out = nn.Sequential(\n normalization(model_channels),\n nn.SiLU(),\n nn.Conv1d(model_channels, out_channels, 3, padding=1),\n )\n\n def get_grad_norm_parameter_groups(self):\n groups = {\n \"minicoder\": list(self.contextual_embedder.parameters()),\n \"layers\": list(self.layers.parameters()),\n \"code_converters\": list(self.code_embedding.parameters())\n + list(self.code_converter.parameters())\n + list(self.latent_conditioner.parameters())\n + list(self.latent_conditioner.parameters()),\n \"timestep_integrator\": list(self.conditioning_timestep_integrator.parameters())\n + list(self.integrating_conv.parameters()),\n \"time_embed\": list(self.time_embed.parameters()),\n }\n return groups\n\n def get_conditioning(self, conditioning_input):\n speech_conditioning_input = (\n conditioning_input.unsqueeze(1) if len(conditioning_input.shape) == 3 else conditioning_input\n )\n conds = []\n for j in range(speech_conditioning_input.shape[1]):\n conds.append(self.contextual_embedder(speech_conditioning_input[:, j]))\n conds = torch.cat(conds, dim=-1)\n conds = conds.mean(dim=-1)\n return conds\n\n def timestep_independent(\n self,\n aligned_conditioning,\n conditioning_latent,\n expected_seq_len,\n return_code_pred,\n ):\n # Shuffle aligned_latent to BxCxS format\n if is_latent(aligned_conditioning):\n aligned_conditioning = aligned_conditioning.permute(0, 2, 1)\n\n cond_scale, cond_shift = torch.chunk(conditioning_latent, 2, dim=1)\n if is_latent(aligned_conditioning):\n code_emb = self.latent_conditioner(aligned_conditioning)\n else:\n code_emb = self.code_embedding(aligned_conditioning).permute(0, 2, 1)\n code_emb = self.code_converter(code_emb)\n code_emb = self.code_norm(code_emb) * (1 + cond_scale.unsqueeze(-1)) + cond_shift.unsqueeze(-1)\n\n unconditioned_batches = torch.zeros((code_emb.shape[0], 1, 1), device=code_emb.device)\n # Mask out the conditioning branch for whole batch elements, implementing something similar to classifier-free guidance.\n if self.training and self.unconditioned_percentage > 0:\n unconditioned_batches = (\n torch.rand((code_emb.shape[0], 1, 1), device=code_emb.device) < self.unconditioned_percentage\n )\n code_emb = torch.where(\n unconditioned_batches,\n self.unconditioned_embedding.repeat(aligned_conditioning.shape[0], 1, 1),\n code_emb,\n )\n expanded_code_emb = F.interpolate(code_emb, size=expected_seq_len, mode=\"nearest\")\n\n if not return_code_pred:\n return expanded_code_emb\n else:\n mel_pred = self.mel_head(expanded_code_emb)\n # Multiply mel_pred by !unconditioned_branches, which drops the gradient on unconditioned branches. This is because we don't want that gradient being used to train parameters through the codes_embedder as it unbalances contributions to that network from the MSE loss.\n mel_pred = mel_pred * unconditioned_batches.logical_not()\n return expanded_code_emb, mel_pred\n\n def forward(\n self,\n x,\n timesteps,\n aligned_conditioning=None,\n conditioning_latent=None,\n precomputed_aligned_embeddings=None,\n conditioning_free=False,\n return_code_pred=False,\n ):\n \"\"\"\n Apply the model to an input batch.\n\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :param aligned_conditioning: an aligned latent or sequence of tokens providing useful data about the sample to be produced.\n :param conditioning_latent: a pre-computed conditioning latent; see get_conditioning().\n :param precomputed_aligned_embeddings: Embeddings returned from self.timestep_independent()\n :param conditioning_free: When set, all conditioning inputs (including tokens and conditioning_input) will not be considered.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n assert precomputed_aligned_embeddings is not None or (\n aligned_conditioning is not None and conditioning_latent is not None\n )\n assert not (\n return_code_pred and precomputed_aligned_embeddings is not None\n ) # These two are mutually exclusive.\n\n unused_params = []\n if conditioning_free:\n code_emb = self.unconditioned_embedding.repeat(x.shape[0], 1, x.shape[-1])\n unused_params.extend(list(self.code_converter.parameters()) + list(self.code_embedding.parameters()))\n unused_params.extend(list(self.latent_conditioner.parameters()))\n else:\n if precomputed_aligned_embeddings is not None:\n code_emb = precomputed_aligned_embeddings\n else:\n code_emb, mel_pred = self.timestep_independent(\n aligned_conditioning, conditioning_latent, x.shape[-1], True\n )\n if is_latent(aligned_conditioning):\n unused_params.extend(\n list(self.code_converter.parameters()) + list(self.code_embedding.parameters())\n )\n else:\n unused_params.extend(list(self.latent_conditioner.parameters()))\n\n unused_params.append(self.unconditioned_embedding)\n\n time_emb = self.time_embed(timestep_embedding(timesteps, self.model_channels))\n code_emb = self.conditioning_timestep_integrator(code_emb, time_emb)\n x = self.inp_block(x)\n x = torch.cat([x, code_emb], dim=1)\n x = self.integrating_conv(x)\n for i, lyr in enumerate(self.layers):\n # Do layer drop where applicable. Do not drop first and last layers.\n if (\n self.training\n and self.layer_drop > 0\n and i != 0\n and i != (len(self.layers) - 1)\n and random.random() < self.layer_drop\n ):\n unused_params.extend(list(lyr.parameters()))\n else:\n # First and last blocks will have autocast disabled for improved precision.\n with autocast(x.device.type, enabled=self.enable_fp16 and i != 0):\n x = lyr(x, time_emb)\n\n x = x.float()\n out = self.out(x)\n\n # Involve probabilistic or possibly unused parameters in loss so we don't get DDP errors.\n extraneous_addition = 0\n for p in unused_params:\n extraneous_addition = extraneous_addition + p.mean()\n out = out + extraneous_addition * 0\n\n if return_code_pred:\n return out, mel_pred\n return out" }, { "identifier": "RandomLatentConverter", "path": "TTS/tts/layers/tortoise/random_latent_generator.py", "snippet": "class RandomLatentConverter(nn.Module):\n def __init__(self, channels):\n super().__init__()\n self.layers = nn.Sequential(\n *[EqualLinear(channels, channels, lr_mul=0.1) for _ in range(5)], nn.Linear(channels, channels)\n )\n self.channels = channels\n\n def forward(self, ref):\n r = torch.randn(ref.shape[0], self.channels, device=ref.device)\n y = self.layers(r)\n return y" }, { "identifier": "VoiceBpeTokenizer", "path": "TTS/tts/layers/tortoise/tokenizer.py", "snippet": "class VoiceBpeTokenizer:\n def __init__(self, vocab_file=DEFAULT_VOCAB_FILE, vocab_str=None):\n self.tokenizer = None\n if vocab_file is not None:\n self.tokenizer = Tokenizer.from_file(vocab_file)\n if vocab_str is not None:\n self.tokenizer = Tokenizer.from_str(vocab_str)\n\n def preprocess_text(self, txt):\n txt = english_cleaners(txt)\n return txt\n\n def encode(self, txt):\n txt = self.preprocess_text(txt)\n txt = txt.replace(\" \", \"[SPACE]\")\n return self.tokenizer.encode(txt).ids\n\n def decode(self, seq):\n if isinstance(seq, torch.Tensor):\n seq = seq.cpu().numpy()\n txt = self.tokenizer.decode(seq, skip_special_tokens=False).replace(\" \", \"\")\n txt = txt.replace(\"[SPACE]\", \" \")\n txt = txt.replace(\"[STOP]\", \"\")\n txt = txt.replace(\"[UNK]\", \"\")\n return txt" }, { "identifier": "VocConf", "path": "TTS/tts/layers/tortoise/vocoder.py", "snippet": "class VocConf(Enum):\n Univnet = VocType(UnivNetGenerator, \"vocoder.pth\", \"model_g\")" }, { "identifier": "VocType", "path": "TTS/tts/layers/tortoise/vocoder.py", "snippet": "class VocType:\n constructor: Callable[[], nn.Module]\n model_path: str\n subkey: Optional[str] = None\n\n def optionally_index(self, model_dict):\n if self.subkey is not None:\n return model_dict[self.subkey]\n return model_dict" }, { "identifier": "Wav2VecAlignment", "path": "TTS/tts/layers/tortoise/wav2vec_alignment.py", "snippet": "class Wav2VecAlignment:\n \"\"\"\n Uses wav2vec2 to perform audio<->text alignment.\n \"\"\"\n\n def __init__(self, device=\"cuda\"):\n self.model = Wav2Vec2ForCTC.from_pretrained(\"jbetker/wav2vec2-large-robust-ft-libritts-voxpopuli\").cpu()\n self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(\"facebook/wav2vec2-large-960h\")\n self.tokenizer = Wav2Vec2CTCTokenizer.from_pretrained(\"jbetker/tacotron-symbols\")\n self.device = device\n\n def align(self, audio, expected_text, audio_sample_rate=24000):\n orig_len = audio.shape[-1]\n\n with torch.no_grad():\n self.model = self.model.to(self.device)\n audio = audio.to(self.device)\n audio = torchaudio.functional.resample(audio, audio_sample_rate, 16000)\n clip_norm = (audio - audio.mean()) / torch.sqrt(audio.var() + 1e-7)\n logits = self.model(clip_norm).logits\n self.model = self.model.cpu()\n\n logits = logits[0]\n pred_string = self.tokenizer.decode(logits.argmax(-1).tolist())\n\n fixed_expectation = max_alignment(expected_text.lower(), pred_string)\n w2v_compression = orig_len // logits.shape[0]\n expected_tokens = self.tokenizer.encode(fixed_expectation)\n expected_chars = list(fixed_expectation)\n if len(expected_tokens) == 1:\n return [0] # The alignment is simple; there is only one token.\n expected_tokens.pop(0) # The first token is a given.\n expected_chars.pop(0)\n\n alignments = [0]\n\n def pop_till_you_win():\n if len(expected_tokens) == 0:\n return None\n popped = expected_tokens.pop(0)\n popped_char = expected_chars.pop(0)\n while popped_char == \"~\":\n alignments.append(-1)\n if len(expected_tokens) == 0:\n return None\n popped = expected_tokens.pop(0)\n popped_char = expected_chars.pop(0)\n return popped\n\n next_expected_token = pop_till_you_win()\n for i, logit in enumerate(logits):\n top = logit.argmax()\n if next_expected_token == top:\n alignments.append(i * w2v_compression)\n if len(expected_tokens) > 0:\n next_expected_token = pop_till_you_win()\n else:\n break\n\n pop_till_you_win()\n if not (len(expected_tokens) == 0 and len(alignments) == len(expected_text)):\n torch.save([audio, expected_text], \"alignment_debug.pth\")\n assert False, (\n \"Something went wrong with the alignment algorithm. I've dumped a file, 'alignment_debug.pth' to\"\n \"your current working directory. Please report this along with the file so it can get fixed.\"\n )\n\n # Now fix up alignments. Anything with -1 should be interpolated.\n alignments.append(orig_len) # This'll get removed but makes the algorithm below more readable.\n for i in range(len(alignments)):\n if alignments[i] == -1:\n for j in range(i + 1, len(alignments)):\n if alignments[j] != -1:\n next_found_token = j\n break\n for j in range(i, next_found_token):\n gap = alignments[next_found_token] - alignments[i - 1]\n alignments[j] = (j - i + 1) * gap // (next_found_token - i + 1) + alignments[i - 1]\n\n return alignments[:-1]\n\n def redact(self, audio, expected_text, audio_sample_rate=24000):\n if \"[\" not in expected_text:\n return audio\n splitted = expected_text.split(\"[\")\n fully_split = [splitted[0]]\n for spl in splitted[1:]:\n assert \"]\" in spl, 'Every \"[\" character must be paired with a \"]\" with no nesting.'\n fully_split.extend(spl.split(\"]\"))\n\n # At this point, fully_split is a list of strings, with every other string being something that should be redacted.\n non_redacted_intervals = []\n last_point = 0\n for i in range(len(fully_split)):\n if i % 2 == 0:\n end_interval = max(0, last_point + len(fully_split[i]) - 1)\n non_redacted_intervals.append((last_point, end_interval))\n last_point += len(fully_split[i])\n\n bare_text = \"\".join(fully_split)\n alignments = self.align(audio, bare_text, audio_sample_rate)\n\n output_audio = []\n for nri in non_redacted_intervals:\n start, stop = nri\n output_audio.append(audio[:, alignments[start] : alignments[stop]])\n return torch.cat(output_audio, dim=-1)" }, { "identifier": "BaseTTS", "path": "TTS/tts/models/base_tts.py", "snippet": "class BaseTTS(BaseTrainerModel):\n \"\"\"Base `tts` class. Every new `tts` model must inherit this.\n\n It defines common `tts` specific functions on top of `Model` implementation.\n \"\"\"\n\n MODEL_TYPE = \"tts\"\n\n def __init__(\n self,\n config: Coqpit,\n ap: \"AudioProcessor\",\n tokenizer: \"TTSTokenizer\",\n speaker_manager: SpeakerManager = None,\n language_manager: LanguageManager = None,\n ):\n super().__init__()\n self.config = config\n self.ap = ap\n self.tokenizer = tokenizer\n self.speaker_manager = speaker_manager\n self.language_manager = language_manager\n self._set_model_args(config)\n\n def _set_model_args(self, config: Coqpit):\n \"\"\"Setup model args based on the config type (`ModelConfig` or `ModelArgs`).\n\n `ModelArgs` has all the fields reuqired to initialize the model architecture.\n\n `ModelConfig` has all the fields required for training, inference and containes `ModelArgs`.\n\n If the config is for training with a name like \"*Config\", then the model args are embeded in the\n config.model_args\n\n If the config is for the model with a name like \"*Args\", then we assign the directly.\n \"\"\"\n # don't use isintance not to import recursively\n if \"Config\" in config.__class__.__name__:\n config_num_chars = (\n self.config.model_args.num_chars if hasattr(self.config, \"model_args\") else self.config.num_chars\n )\n num_chars = config_num_chars if self.tokenizer is None else self.tokenizer.characters.num_chars\n if \"characters\" in config:\n self.config.num_chars = num_chars\n if hasattr(self.config, \"model_args\"):\n config.model_args.num_chars = num_chars\n self.args = self.config.model_args\n else:\n self.config = config\n self.args = config.model_args\n elif \"Args\" in config.__class__.__name__:\n self.args = config\n else:\n raise ValueError(\"config must be either a *Config or *Args\")\n\n def init_multispeaker(self, config: Coqpit, data: List = None):\n \"\"\"Initialize a speaker embedding layer if needen and define expected embedding channel size for defining\n `in_channels` size of the connected layers.\n\n This implementation yields 3 possible outcomes:\n\n 1. If `config.use_speaker_embedding` and `config.use_d_vector_file are False, do nothing.\n 2. If `config.use_d_vector_file` is True, set expected embedding channel size to `config.d_vector_dim` or 512.\n 3. If `config.use_speaker_embedding`, initialize a speaker embedding layer with channel size of\n `config.d_vector_dim` or 512.\n\n You can override this function for new models.\n\n Args:\n config (Coqpit): Model configuration.\n \"\"\"\n # set number of speakers\n if self.speaker_manager is not None:\n self.num_speakers = self.speaker_manager.num_speakers\n elif hasattr(config, \"num_speakers\"):\n self.num_speakers = config.num_speakers\n\n # set ultimate speaker embedding size\n if config.use_speaker_embedding or config.use_d_vector_file:\n self.embedded_speaker_dim = (\n config.d_vector_dim if \"d_vector_dim\" in config and config.d_vector_dim is not None else 512\n )\n # init speaker embedding layer\n if config.use_speaker_embedding and not config.use_d_vector_file:\n print(\" > Init speaker_embedding layer.\")\n self.speaker_embedding = nn.Embedding(self.num_speakers, self.embedded_speaker_dim)\n self.speaker_embedding.weight.data.normal_(0, 0.3)\n\n def get_aux_input(self, **kwargs) -> Dict:\n \"\"\"Prepare and return `aux_input` used by `forward()`\"\"\"\n return {\"speaker_id\": None, \"style_wav\": None, \"d_vector\": None, \"language_id\": None}\n\n def get_aux_input_from_test_sentences(self, sentence_info):\n if hasattr(self.config, \"model_args\"):\n config = self.config.model_args\n else:\n config = self.config\n\n # extract speaker and language info\n text, speaker_name, style_wav, language_name = None, None, None, None\n\n if isinstance(sentence_info, list):\n if len(sentence_info) == 1:\n text = sentence_info[0]\n elif len(sentence_info) == 2:\n text, speaker_name = sentence_info\n elif len(sentence_info) == 3:\n text, speaker_name, style_wav = sentence_info\n elif len(sentence_info) == 4:\n text, speaker_name, style_wav, language_name = sentence_info\n else:\n text = sentence_info\n\n # get speaker id/d_vector\n speaker_id, d_vector, language_id = None, None, None\n if self.speaker_manager is not None:\n if config.use_d_vector_file:\n if speaker_name is None:\n d_vector = self.speaker_manager.get_random_embedding()\n else:\n d_vector = self.speaker_manager.get_d_vector_by_name(speaker_name)\n elif config.use_speaker_embedding:\n if speaker_name is None:\n speaker_id = self.speaker_manager.get_random_id()\n else:\n speaker_id = self.speaker_manager.name_to_id[speaker_name]\n\n # get language id\n if self.language_manager is not None and config.use_language_embedding and language_name is not None:\n language_id = self.language_manager.name_to_id[language_name]\n\n return {\n \"text\": text,\n \"speaker_id\": speaker_id,\n \"style_wav\": style_wav,\n \"d_vector\": d_vector,\n \"language_id\": language_id,\n }\n\n def format_batch(self, batch: Dict) -> Dict:\n \"\"\"Generic batch formatting for `TTSDataset`.\n\n You must override this if you use a custom dataset.\n\n Args:\n batch (Dict): [description]\n\n Returns:\n Dict: [description]\n \"\"\"\n # setup input batch\n text_input = batch[\"token_id\"]\n text_lengths = batch[\"token_id_lengths\"]\n speaker_names = batch[\"speaker_names\"]\n linear_input = batch[\"linear\"]\n mel_input = batch[\"mel\"]\n mel_lengths = batch[\"mel_lengths\"]\n stop_targets = batch[\"stop_targets\"]\n item_idx = batch[\"item_idxs\"]\n d_vectors = batch[\"d_vectors\"]\n speaker_ids = batch[\"speaker_ids\"]\n attn_mask = batch[\"attns\"]\n waveform = batch[\"waveform\"]\n pitch = batch[\"pitch\"]\n energy = batch[\"energy\"]\n language_ids = batch[\"language_ids\"]\n max_text_length = torch.max(text_lengths.float())\n max_spec_length = torch.max(mel_lengths.float())\n\n # compute durations from attention masks\n durations = None\n if attn_mask is not None:\n durations = torch.zeros(attn_mask.shape[0], attn_mask.shape[2])\n for idx, am in enumerate(attn_mask):\n # compute raw durations\n c_idxs = am[:, : text_lengths[idx], : mel_lengths[idx]].max(1)[1]\n # c_idxs, counts = torch.unique_consecutive(c_idxs, return_counts=True)\n c_idxs, counts = torch.unique(c_idxs, return_counts=True)\n dur = torch.ones([text_lengths[idx]]).to(counts.dtype)\n dur[c_idxs] = counts\n # smooth the durations and set any 0 duration to 1\n # by cutting off from the largest duration indeces.\n extra_frames = dur.sum() - mel_lengths[idx]\n largest_idxs = torch.argsort(-dur)[:extra_frames]\n dur[largest_idxs] -= 1\n assert (\n dur.sum() == mel_lengths[idx]\n ), f\" [!] total duration {dur.sum()} vs spectrogram length {mel_lengths[idx]}\"\n durations[idx, : text_lengths[idx]] = dur\n\n # set stop targets wrt reduction factor\n stop_targets = stop_targets.view(text_input.shape[0], stop_targets.size(1) // self.config.r, -1)\n stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze(2)\n stop_target_lengths = torch.divide(mel_lengths, self.config.r).ceil_()\n\n return {\n \"text_input\": text_input,\n \"text_lengths\": text_lengths,\n \"speaker_names\": speaker_names,\n \"mel_input\": mel_input,\n \"mel_lengths\": mel_lengths,\n \"linear_input\": linear_input,\n \"stop_targets\": stop_targets,\n \"stop_target_lengths\": stop_target_lengths,\n \"attn_mask\": attn_mask,\n \"durations\": durations,\n \"speaker_ids\": speaker_ids,\n \"d_vectors\": d_vectors,\n \"max_text_length\": float(max_text_length),\n \"max_spec_length\": float(max_spec_length),\n \"item_idx\": item_idx,\n \"waveform\": waveform,\n \"pitch\": pitch,\n \"energy\": energy,\n \"language_ids\": language_ids,\n \"audio_unique_names\": batch[\"audio_unique_names\"],\n }\n\n def get_sampler(self, config: Coqpit, dataset: TTSDataset, num_gpus=1):\n weights = None\n data_items = dataset.samples\n\n if getattr(config, \"use_language_weighted_sampler\", False):\n alpha = getattr(config, \"language_weighted_sampler_alpha\", 1.0)\n print(\" > Using Language weighted sampler with alpha:\", alpha)\n weights = get_language_balancer_weights(data_items) * alpha\n\n if getattr(config, \"use_speaker_weighted_sampler\", False):\n alpha = getattr(config, \"speaker_weighted_sampler_alpha\", 1.0)\n print(\" > Using Speaker weighted sampler with alpha:\", alpha)\n if weights is not None:\n weights += get_speaker_balancer_weights(data_items) * alpha\n else:\n weights = get_speaker_balancer_weights(data_items) * alpha\n\n if getattr(config, \"use_length_weighted_sampler\", False):\n alpha = getattr(config, \"length_weighted_sampler_alpha\", 1.0)\n print(\" > Using Length weighted sampler with alpha:\", alpha)\n if weights is not None:\n weights += get_length_balancer_weights(data_items) * alpha\n else:\n weights = get_length_balancer_weights(data_items) * alpha\n\n if weights is not None:\n sampler = WeightedRandomSampler(weights, len(weights))\n else:\n sampler = None\n\n # sampler for DDP\n if sampler is None:\n sampler = DistributedSampler(dataset) if num_gpus > 1 else None\n else: # If a sampler is already defined use this sampler and DDP sampler together\n sampler = DistributedSamplerWrapper(sampler) if num_gpus > 1 else sampler\n\n return sampler\n\n def get_data_loader(\n self,\n config: Coqpit,\n assets: Dict,\n is_eval: bool,\n samples: Union[List[Dict], List[List]],\n verbose: bool,\n num_gpus: int,\n rank: int = None,\n ) -> \"DataLoader\":\n if is_eval and not config.run_eval:\n loader = None\n else:\n # setup multi-speaker attributes\n if self.speaker_manager is not None:\n if hasattr(config, \"model_args\"):\n speaker_id_mapping = (\n self.speaker_manager.name_to_id if config.model_args.use_speaker_embedding else None\n )\n d_vector_mapping = self.speaker_manager.embeddings if config.model_args.use_d_vector_file else None\n config.use_d_vector_file = config.model_args.use_d_vector_file\n else:\n speaker_id_mapping = self.speaker_manager.name_to_id if config.use_speaker_embedding else None\n d_vector_mapping = self.speaker_manager.embeddings if config.use_d_vector_file else None\n else:\n speaker_id_mapping = None\n d_vector_mapping = None\n\n # setup multi-lingual attributes\n if self.language_manager is not None:\n language_id_mapping = self.language_manager.name_to_id if self.args.use_language_embedding else None\n else:\n language_id_mapping = None\n\n # init dataloader\n dataset = TTSDataset(\n outputs_per_step=config.r if \"r\" in config else 1,\n compute_linear_spec=config.model.lower() == \"tacotron\" or config.compute_linear_spec,\n compute_f0=config.get(\"compute_f0\", False),\n f0_cache_path=config.get(\"f0_cache_path\", None),\n compute_energy=config.get(\"compute_energy\", False),\n energy_cache_path=config.get(\"energy_cache_path\", None),\n samples=samples,\n ap=self.ap,\n return_wav=config.return_wav if \"return_wav\" in config else False,\n batch_group_size=0 if is_eval else config.batch_group_size * config.batch_size,\n min_text_len=config.min_text_len,\n max_text_len=config.max_text_len,\n min_audio_len=config.min_audio_len,\n max_audio_len=config.max_audio_len,\n phoneme_cache_path=config.phoneme_cache_path,\n precompute_num_workers=config.precompute_num_workers,\n use_noise_augment=False if is_eval else config.use_noise_augment,\n verbose=verbose,\n speaker_id_mapping=speaker_id_mapping,\n d_vector_mapping=d_vector_mapping if config.use_d_vector_file else None,\n tokenizer=self.tokenizer,\n start_by_longest=config.start_by_longest,\n language_id_mapping=language_id_mapping,\n )\n\n # wait all the DDP process to be ready\n if num_gpus > 1:\n dist.barrier()\n\n # sort input sequences from short to long\n dataset.preprocess_samples()\n\n # get samplers\n sampler = self.get_sampler(config, dataset, num_gpus)\n\n loader = DataLoader(\n dataset,\n batch_size=config.eval_batch_size if is_eval else config.batch_size,\n shuffle=config.shuffle if sampler is None else False, # if there is no other sampler\n collate_fn=dataset.collate_fn,\n drop_last=config.drop_last, # setting this False might cause issues in AMP training.\n sampler=sampler,\n num_workers=config.num_eval_loader_workers if is_eval else config.num_loader_workers,\n pin_memory=False,\n )\n return loader\n\n def _get_test_aux_input(\n self,\n ) -> Dict:\n d_vector = None\n if self.config.use_d_vector_file:\n d_vector = [self.speaker_manager.embeddings[name][\"embedding\"] for name in self.speaker_manager.embeddings]\n d_vector = (random.sample(sorted(d_vector), 1),)\n\n aux_inputs = {\n \"speaker_id\": None\n if not self.config.use_speaker_embedding\n else random.sample(sorted(self.speaker_manager.name_to_id.values()), 1),\n \"d_vector\": d_vector,\n \"style_wav\": None, # TODO: handle GST style input\n }\n return aux_inputs\n\n def test_run(self, assets: Dict) -> Tuple[Dict, Dict]:\n \"\"\"Generic test run for `tts` models used by `Trainer`.\n\n You can override this for a different behaviour.\n\n Args:\n assets (dict): A dict of training assets. For `tts` models, it must include `{'audio_processor': ap}`.\n\n Returns:\n Tuple[Dict, Dict]: Test figures and audios to be projected to Tensorboard.\n \"\"\"\n print(\" | > Synthesizing test sentences.\")\n test_audios = {}\n test_figures = {}\n test_sentences = self.config.test_sentences\n aux_inputs = self._get_test_aux_input()\n for idx, sen in enumerate(test_sentences):\n if isinstance(sen, list):\n aux_inputs = self.get_aux_input_from_test_sentences(sen)\n sen = aux_inputs[\"text\"]\n outputs_dict = synthesis(\n self,\n sen,\n self.config,\n \"cuda\" in str(next(self.parameters()).device),\n speaker_id=aux_inputs[\"speaker_id\"],\n d_vector=aux_inputs[\"d_vector\"],\n style_wav=aux_inputs[\"style_wav\"],\n use_griffin_lim=True,\n do_trim_silence=False,\n )\n test_audios[\"{}-audio\".format(idx)] = outputs_dict[\"wav\"]\n test_figures[\"{}-prediction\".format(idx)] = plot_spectrogram(\n outputs_dict[\"outputs\"][\"model_outputs\"], self.ap, output_fig=False\n )\n test_figures[\"{}-alignment\".format(idx)] = plot_alignment(\n outputs_dict[\"outputs\"][\"alignments\"], output_fig=False\n )\n return test_figures, test_audios\n\n def on_init_start(self, trainer):\n \"\"\"Save the speaker.pth and language_ids.json at the beginning of the training. Also update both paths.\"\"\"\n if self.speaker_manager is not None:\n output_path = os.path.join(trainer.output_path, \"speakers.pth\")\n self.speaker_manager.save_ids_to_file(output_path)\n trainer.config.speakers_file = output_path\n # some models don't have `model_args` set\n if hasattr(trainer.config, \"model_args\"):\n trainer.config.model_args.speakers_file = output_path\n trainer.config.save_json(os.path.join(trainer.output_path, \"config.json\"))\n print(f\" > `speakers.pth` is saved to {output_path}.\")\n print(\" > `speakers_file` is updated in the config.json.\")\n\n if self.language_manager is not None:\n output_path = os.path.join(trainer.output_path, \"language_ids.json\")\n self.language_manager.save_ids_to_file(output_path)\n trainer.config.language_ids_file = output_path\n if hasattr(trainer.config, \"model_args\"):\n trainer.config.model_args.language_ids_file = output_path\n trainer.config.save_json(os.path.join(trainer.output_path, \"config.json\"))\n print(f\" > `language_ids.json` is saved to {output_path}.\")\n print(\" > `language_ids_file` is updated in the config.json.\")" } ]
import os import random import torch import torch.nn.functional as F import torchaudio from contextlib import contextmanager from dataclasses import dataclass from time import time from coqpit import Coqpit from tqdm import tqdm from TTS.tts.layers.tortoise.arch_utils import TorchMelSpectrogram from TTS.tts.layers.tortoise.audio_utils import denormalize_tacotron_mel, load_voice, wav_to_univnet_mel from TTS.tts.layers.tortoise.autoregressive import UnifiedVoice from TTS.tts.layers.tortoise.classifier import AudioMiniEncoderWithClassifierHead from TTS.tts.layers.tortoise.clvp import CLVP from TTS.tts.layers.tortoise.diffusion import SpacedDiffusion, get_named_beta_schedule, space_timesteps from TTS.tts.layers.tortoise.diffusion_decoder import DiffusionTts from TTS.tts.layers.tortoise.random_latent_generator import RandomLatentConverter from TTS.tts.layers.tortoise.tokenizer import VoiceBpeTokenizer from TTS.tts.layers.tortoise.vocoder import VocConf, VocType from TTS.tts.layers.tortoise.wav2vec_alignment import Wav2VecAlignment from TTS.tts.models.base_tts import BaseTTS from math import ceil
19,773
else: m = model.to(self.device) yield m m = model.cpu() def get_conditioning_latents( self, voice_samples, return_mels=False, latent_averaging_mode=0, original_tortoise=False, ): """ Transforms one or more voice_samples into a tuple (autoregressive_conditioning_latent, diffusion_conditioning_latent). These are expressive learned latents that encode aspects of the provided clips like voice, intonation, and acoustic properties. :param voice_samples: List of arbitrary reference clips, which should be *pairs* of torch tensors containing arbitrary kHz waveform data. :param latent_averaging_mode: 0/1/2 for following modes: 0 - latents will be generated as in original tortoise, using ~4.27s from each voice sample, averaging latent across all samples 1 - latents will be generated using (almost) entire voice samples, averaged across all the ~4.27s chunks 2 - latents will be generated using (almost) entire voice samples, averaged per voice sample """ assert latent_averaging_mode in [ 0, 1, 2, ], "latent_averaging mode has to be one of (0, 1, 2)" with torch.no_grad(): voice_samples = [[v.to(self.device) for v in ls] for ls in voice_samples] auto_conds = [] for ls in voice_samples: auto_conds.append(format_conditioning(ls[0], device=self.device, mel_norm_file=self.mel_norm_path)) auto_conds = torch.stack(auto_conds, dim=1) with self.temporary_cuda(self.autoregressive) as ar: auto_latent = ar.get_conditioning(auto_conds) diffusion_conds = [] DURS_CONST = self.args.duration_const for ls in voice_samples: # The diffuser operates at a sample rate of 24000 (except for the latent inputs) sample = torchaudio.functional.resample(ls[0], 22050, 24000) if original_tortoise else ls[1] if latent_averaging_mode == 0: sample = pad_or_truncate(sample, DURS_CONST) cond_mel = wav_to_univnet_mel( sample.to(self.device), do_normalization=False, device=self.device, ) diffusion_conds.append(cond_mel) else: if latent_averaging_mode == 2: temp_diffusion_conds = [] for chunk in range(ceil(sample.shape[1] / DURS_CONST)): current_sample = sample[:, chunk * DURS_CONST : (chunk + 1) * DURS_CONST] current_sample = pad_or_truncate(current_sample, DURS_CONST) cond_mel = wav_to_univnet_mel( current_sample.to(self.device), do_normalization=False, device=self.device, ) if latent_averaging_mode == 1: diffusion_conds.append(cond_mel) elif latent_averaging_mode == 2: temp_diffusion_conds.append(cond_mel) if latent_averaging_mode == 2: diffusion_conds.append(torch.stack(temp_diffusion_conds).mean(0)) diffusion_conds = torch.stack(diffusion_conds, dim=1) with self.temporary_cuda(self.diffusion) as diffusion: diffusion_latent = diffusion.get_conditioning(diffusion_conds) if return_mels: return auto_latent, diffusion_latent, auto_conds, diffusion_conds return auto_latent, diffusion_latent def get_random_conditioning_latents(self): # Lazy-load the RLG models. if self.rlg_auto is None: self.rlg_auto = RandomLatentConverter(1024).eval() self.rlg_auto.load_state_dict( torch.load( os.path.join(self.models_dir, "rlg_auto.pth"), map_location=torch.device("cpu"), ) ) self.rlg_diffusion = RandomLatentConverter(2048).eval() self.rlg_diffusion.load_state_dict( torch.load( os.path.join(self.models_dir, "rlg_diffuser.pth"), map_location=torch.device("cpu"), ) ) with torch.no_grad(): return self.rlg_auto(torch.tensor([0.0])), self.rlg_diffusion(torch.tensor([0.0])) def synthesize(self, text, config, speaker_id="random", voice_dirs=None, **kwargs): """Synthesize speech with the given input text. Args: text (str): Input text. config (TortoiseConfig): Config with inference parameters. speaker_id (str): One of the available speaker names. If `random`, it generates a random speaker. voice_dirs (List[str]): List of paths that host reference audio files for speakers. Defaults to None. **kwargs: Inference settings. See `inference()`. Returns: A dictionary of the output values with `wav` as output waveform, `deterministic_seed` as seed used at inference, `text_input` as text token IDs after tokenizer, `voice_samples` as samples used for cloning, `conditioning_latents` as latents used at inference. """ speaker_id = "random" if speaker_id is None else speaker_id if voice_dirs is not None: voice_dirs = [voice_dirs]
def pad_or_truncate(t, length): """ Utility function for forcing <t> to have the specified sequence length, whether by clipping it or padding it with 0s. """ tp = t[..., :length] if t.shape[-1] == length: tp = t elif t.shape[-1] < length: tp = F.pad(t, (0, length - t.shape[-1])) return tp def deterministic_state(seed=None): """ Sets the random seeds that tortoise uses to the current time() and returns that seed so results can be reproduced. """ seed = int(time()) if seed is None else seed torch.manual_seed(seed) random.seed(seed) # Can't currently set this because of CUBLAS. TODO: potentially enable it if necessary. # torch.use_deterministic_algorithms(True) return seed def load_discrete_vocoder_diffuser( trained_diffusion_steps=4000, desired_diffusion_steps=200, cond_free=True, cond_free_k=1, sampler="ddim", ): """ Helper function to load a GaussianDiffusion instance configured for use as a vocoder. """ return SpacedDiffusion( use_timesteps=space_timesteps(trained_diffusion_steps, [desired_diffusion_steps]), model_mean_type="epsilon", model_var_type="learned_range", loss_type="mse", betas=get_named_beta_schedule("linear", trained_diffusion_steps), conditioning_free=cond_free, conditioning_free_k=cond_free_k, sampler=sampler, ) def format_conditioning(clip, cond_length=132300, device="cuda", **kwargs): """ Converts the given conditioning signal to a MEL spectrogram and clips it as expected by the models. """ gap = clip.shape[-1] - cond_length if gap < 0: clip = F.pad(clip, pad=(0, abs(gap))) elif gap > 0: rand_start = random.randint(0, gap) clip = clip[:, rand_start : rand_start + cond_length] mel_clip = TorchMelSpectrogram(**kwargs)(clip.unsqueeze(0)).squeeze(0) return mel_clip.unsqueeze(0).to(device) def fix_autoregressive_output(codes, stop_token, complain=True): """ This function performs some padding on coded audio that fixes a mismatch issue between what the diffusion model was trained on and what the autoregressive code generator creates (which has no padding or end). This is highly specific to the DVAE being used, so this particular coding will not necessarily work if used with a different DVAE. This can be inferred by feeding a audio clip padded with lots of zeros on the end through the DVAE and copying out the last few codes. Failing to do this padding will produce speech with a harsh end that sounds like "BLAH" or similar. """ # Strip off the autoregressive stop token and add padding. stop_token_indices = (codes == stop_token).nonzero() if len(stop_token_indices) == 0: if complain: print( "No stop tokens found in one of the generated voice clips. This typically means the spoken audio is " "too long. In some cases, the output will still be good, though. Listen to it and if it is missing words, " "try breaking up your input text." ) return codes codes[stop_token_indices] = 83 stm = stop_token_indices.min().item() codes[stm:] = 83 if stm - 3 < codes.shape[0]: codes[-3] = 45 codes[-2] = 45 codes[-1] = 248 return codes def do_spectrogram_diffusion( diffusion_model, diffuser, latents, conditioning_latents, temperature=1, verbose=True, ): """ Uses the specified diffusion model to convert discrete codes into a spectrogram. """ with torch.no_grad(): output_seq_len = ( latents.shape[1] * 4 * 24000 // 22050 ) # This diffusion model converts from 22kHz spectrogram codes to a 24kHz spectrogram signal. output_shape = (latents.shape[0], 100, output_seq_len) precomputed_embeddings = diffusion_model.timestep_independent( latents, conditioning_latents, output_seq_len, False ) noise = torch.randn(output_shape, device=latents.device) * temperature mel = diffuser.sample_loop( diffusion_model, output_shape, noise=noise, model_kwargs={"precomputed_aligned_embeddings": precomputed_embeddings}, progress=verbose, ) return denormalize_tacotron_mel(mel)[:, :, :output_seq_len] def classify_audio_clip(clip, model_dir): """ Returns whether or not Tortoises' classifier thinks the given clip came from Tortoise. :param clip: torch tensor containing audio waveform data (get it from load_audio) :return: True if the clip was classified as coming from Tortoise and false if it was classified as real. """ classifier = AudioMiniEncoderWithClassifierHead( 2, spec_dim=1, embedding_dim=512, depth=5, downsample_factor=4, resnet_blocks=2, attn_blocks=4, num_attn_heads=4, base_channels=32, dropout=0, kernel_size=5, distribute_zero_label=False, ) classifier.load_state_dict(torch.load(os.path.join(model_dir, "classifier.pth"), map_location=torch.device("cpu"))) clip = clip.cpu().unsqueeze(0) results = F.softmax(classifier(clip), dim=-1) return results[0][0] def pick_best_batch_size_for_gpu(): """ Tries to pick a batch size that will fit in your GPU. These sizes aren't guaranteed to work, but they should give you a good shot. """ if torch.cuda.is_available(): _, available = torch.cuda.mem_get_info() availableGb = available / (1024**3) batch_size = 1 if availableGb > 14: batch_size = 16 elif availableGb > 10: batch_size = 8 elif availableGb > 7: batch_size = 4 return batch_size @dataclass class TortoiseAudioConfig(Coqpit): sample_rate: int = 22050 diffusion_sample_rate: int = 24000 output_sample_rate: int = 24000 @dataclass class TortoiseArgs(Coqpit): """A dataclass to represent Tortoise model arguments that define the model structure. Args: autoregressive_batch_size (int): The size of the auto-regressive batch. enable_redaction (bool, optional): Whether to enable redaction. Defaults to True. high_vram (bool, optional): Whether to use high VRAM. Defaults to False. kv_cache (bool, optional): Whether to use the kv_cache. Defaults to True. ar_checkpoint (str, optional): The checkpoint for the autoregressive model. Defaults to None. clvp_checkpoint (str, optional): The checkpoint for the ConditionalLatentVariablePerseq model. Defaults to None. diff_checkpoint (str, optional): The checkpoint for the DiffTTS model. Defaults to None. num_chars (int, optional): The maximum number of characters to generate. Defaults to 255. vocoder (VocType, optional): The vocoder to use for synthesis. Defaults to VocConf.Univnet. For UnifiedVoice model: ar_max_mel_tokens (int, optional): The maximum mel tokens for the autoregressive model. Defaults to 604. ar_max_text_tokens (int, optional): The maximum text tokens for the autoregressive model. Defaults to 402. ar_max_conditioning_inputs (int, optional): The maximum conditioning inputs for the autoregressive model. Defaults to 2. ar_layers (int, optional): The number of layers for the autoregressive model. Defaults to 30. ar_model_dim (int, optional): The model dimension for the autoregressive model. Defaults to 1024. ar_heads (int, optional): The number of heads for the autoregressive model. Defaults to 16. ar_number_text_tokens (int, optional): The number of text tokens for the autoregressive model. Defaults to 255. ar_start_text_token (int, optional): The start text token for the autoregressive model. Defaults to 255. ar_checkpointing (bool, optional): Whether to use checkpointing for the autoregressive model. Defaults to False. ar_train_solo_embeddings (bool, optional): Whether to train embeddings for the autoregressive model. Defaults to False. For DiffTTS model: diff_model_channels (int, optional): The number of channels for the DiffTTS model. Defaults to 1024. diff_num_layers (int, optional): The number of layers for the DiffTTS model. Defaults to 10. diff_in_channels (int, optional): The input channels for the DiffTTS model. Defaults to 100. diff_out_channels (int, optional): The output channels for the DiffTTS model. Defaults to 200. diff_in_latent_channels (int, optional): The input latent channels for the DiffTTS model. Defaults to 1024. diff_in_tokens (int, optional): The input tokens for the DiffTTS model. Defaults to 8193. diff_dropout (int, optional): The dropout percentage for the DiffTTS model. Defaults to 0. diff_use_fp16 (bool, optional): Whether to use fp16 for the DiffTTS model. Defaults to False. diff_num_heads (int, optional): The number of heads for the DiffTTS model. Defaults to 16. diff_layer_drop (int, optional): The layer dropout percentage for the DiffTTS model. Defaults to 0. diff_unconditioned_percentage (int, optional): The percentage of unconditioned inputs for the DiffTTS model. Defaults to 0. For ConditionalLatentVariablePerseq model: clvp_dim_text (int): The dimension of the text input for the CLVP module. Defaults to 768. clvp_dim_speech (int): The dimension of the speech input for the CLVP module. Defaults to 768. clvp_dim_latent (int): The dimension of the latent representation for the CLVP module. Defaults to 768. clvp_num_text_tokens (int): The number of text tokens used by the CLVP module. Defaults to 256. clvp_text_enc_depth (int): The depth of the text encoder in the CLVP module. Defaults to 20. clvp_text_seq_len (int): The maximum sequence length of the text input for the CLVP module. Defaults to 350. clvp_text_heads (int): The number of attention heads used by the text encoder in the CLVP module. Defaults to 12. clvp_num_speech_tokens (int): The number of speech tokens used by the CLVP module. Defaults to 8192. clvp_speech_enc_depth (int): The depth of the speech encoder in the CLVP module. Defaults to 20. clvp_speech_heads (int): The number of attention heads used by the speech encoder in the CLVP module. Defaults to 12. clvp_speech_seq_len (int): The maximum sequence length of the speech input for the CLVP module. Defaults to 430. clvp_use_xformers (bool): A flag indicating whether the model uses transformers in the CLVP module. Defaults to True. duration_const (int): A constant value used in the model. Defaults to 102400. """ autoregressive_batch_size: int = 1 enable_redaction: bool = False high_vram: bool = False kv_cache: bool = True ar_checkpoint: str = None clvp_checkpoint: str = None diff_checkpoint: str = None num_chars: int = 255 vocoder: VocType = VocConf.Univnet # UnifiedVoice params ar_max_mel_tokens: int = 604 ar_max_text_tokens: int = 402 ar_max_conditioning_inputs: int = 2 ar_layers: int = 30 ar_model_dim: int = 1024 ar_heads: int = 16 ar_number_text_tokens: int = 255 ar_start_text_token: int = 255 ar_checkpointing: bool = False ar_train_solo_embeddings: bool = False # DiffTTS params diff_model_channels: int = 1024 diff_num_layers: int = 10 diff_in_channels: int = 100 diff_out_channels: int = 200 diff_in_latent_channels: int = 1024 diff_in_tokens: int = 8193 diff_dropout: int = 0 diff_use_fp16: bool = False diff_num_heads: int = 16 diff_layer_drop: int = 0 diff_unconditioned_percentage: int = 0 # clvp params clvp_dim_text: int = 768 clvp_dim_speech: int = 768 clvp_dim_latent: int = 768 clvp_num_text_tokens: int = 256 clvp_text_enc_depth: int = 20 clvp_text_seq_len: int = 350 clvp_text_heads: int = 12 clvp_num_speech_tokens: int = 8192 clvp_speech_enc_depth: int = 20 clvp_speech_heads: int = 12 clvp_speech_seq_len: int = 430 clvp_use_xformers: bool = True # constants duration_const: int = 102400 class Tortoise(BaseTTS): """Tortoise model class. Currently only supports inference. Examples: >>> from TTS.tts.configs.tortoise_config import TortoiseConfig >>> from TTS.tts.models.tortoise import Tortoise >>> config = TortoiseConfig() >>> model = Tortoise.inif_from_config(config) >>> model.load_checkpoint(config, checkpoint_dir="paths/to/models_dir/", eval=True) """ def __init__(self, config: Coqpit): super().__init__(config, ap=None, tokenizer=None) self.mel_norm_path = None self.config = config self.ar_checkpoint = self.args.ar_checkpoint self.diff_checkpoint = self.args.diff_checkpoint # TODO: check if this is even needed self.models_dir = config.model_dir self.autoregressive_batch_size = ( pick_best_batch_size_for_gpu() if self.args.autoregressive_batch_size is None else self.args.autoregressive_batch_size ) self.enable_redaction = self.args.enable_redaction self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") if self.enable_redaction: self.aligner = Wav2VecAlignment() self.tokenizer = VoiceBpeTokenizer() self.autoregressive = UnifiedVoice( max_mel_tokens=self.args.ar_max_mel_tokens, max_text_tokens=self.args.ar_max_text_tokens, max_conditioning_inputs=self.args.ar_max_conditioning_inputs, layers=self.args.ar_layers, model_dim=self.args.ar_model_dim, heads=self.args.ar_heads, number_text_tokens=self.args.ar_number_text_tokens, start_text_token=self.args.ar_start_text_token, checkpointing=self.args.ar_checkpointing, train_solo_embeddings=self.args.ar_train_solo_embeddings, ).cpu() self.diffusion = DiffusionTts( model_channels=self.args.diff_model_channels, num_layers=self.args.diff_num_layers, in_channels=self.args.diff_in_channels, out_channels=self.args.diff_out_channels, in_latent_channels=self.args.diff_in_latent_channels, in_tokens=self.args.diff_in_tokens, dropout=self.args.diff_dropout, use_fp16=self.args.diff_use_fp16, num_heads=self.args.diff_num_heads, layer_drop=self.args.diff_layer_drop, unconditioned_percentage=self.args.diff_unconditioned_percentage, ).cpu() self.clvp = CLVP( dim_text=self.args.clvp_dim_text, dim_speech=self.args.clvp_dim_speech, dim_latent=self.args.clvp_dim_latent, num_text_tokens=self.args.clvp_num_text_tokens, text_enc_depth=self.args.clvp_text_enc_depth, text_seq_len=self.args.clvp_text_seq_len, text_heads=self.args.clvp_text_heads, num_speech_tokens=self.args.clvp_num_speech_tokens, speech_enc_depth=self.args.clvp_speech_enc_depth, speech_heads=self.args.clvp_speech_heads, speech_seq_len=self.args.clvp_speech_seq_len, use_xformers=self.args.clvp_use_xformers, ).cpu() self.vocoder = self.args.vocoder.value.constructor().cpu() # Random latent generators (RLGs) are loaded lazily. self.rlg_auto = None self.rlg_diffusion = None if self.args.high_vram: self.autoregressive = self.autoregressive.to(self.device) self.diffusion = self.diffusion.to(self.device) self.clvp = self.clvp.to(self.device) self.vocoder = self.vocoder.to(self.device) self.high_vram = self.args.high_vram @contextmanager def temporary_cuda(self, model): if self.high_vram: yield model else: m = model.to(self.device) yield m m = model.cpu() def get_conditioning_latents( self, voice_samples, return_mels=False, latent_averaging_mode=0, original_tortoise=False, ): """ Transforms one or more voice_samples into a tuple (autoregressive_conditioning_latent, diffusion_conditioning_latent). These are expressive learned latents that encode aspects of the provided clips like voice, intonation, and acoustic properties. :param voice_samples: List of arbitrary reference clips, which should be *pairs* of torch tensors containing arbitrary kHz waveform data. :param latent_averaging_mode: 0/1/2 for following modes: 0 - latents will be generated as in original tortoise, using ~4.27s from each voice sample, averaging latent across all samples 1 - latents will be generated using (almost) entire voice samples, averaged across all the ~4.27s chunks 2 - latents will be generated using (almost) entire voice samples, averaged per voice sample """ assert latent_averaging_mode in [ 0, 1, 2, ], "latent_averaging mode has to be one of (0, 1, 2)" with torch.no_grad(): voice_samples = [[v.to(self.device) for v in ls] for ls in voice_samples] auto_conds = [] for ls in voice_samples: auto_conds.append(format_conditioning(ls[0], device=self.device, mel_norm_file=self.mel_norm_path)) auto_conds = torch.stack(auto_conds, dim=1) with self.temporary_cuda(self.autoregressive) as ar: auto_latent = ar.get_conditioning(auto_conds) diffusion_conds = [] DURS_CONST = self.args.duration_const for ls in voice_samples: # The diffuser operates at a sample rate of 24000 (except for the latent inputs) sample = torchaudio.functional.resample(ls[0], 22050, 24000) if original_tortoise else ls[1] if latent_averaging_mode == 0: sample = pad_or_truncate(sample, DURS_CONST) cond_mel = wav_to_univnet_mel( sample.to(self.device), do_normalization=False, device=self.device, ) diffusion_conds.append(cond_mel) else: if latent_averaging_mode == 2: temp_diffusion_conds = [] for chunk in range(ceil(sample.shape[1] / DURS_CONST)): current_sample = sample[:, chunk * DURS_CONST : (chunk + 1) * DURS_CONST] current_sample = pad_or_truncate(current_sample, DURS_CONST) cond_mel = wav_to_univnet_mel( current_sample.to(self.device), do_normalization=False, device=self.device, ) if latent_averaging_mode == 1: diffusion_conds.append(cond_mel) elif latent_averaging_mode == 2: temp_diffusion_conds.append(cond_mel) if latent_averaging_mode == 2: diffusion_conds.append(torch.stack(temp_diffusion_conds).mean(0)) diffusion_conds = torch.stack(diffusion_conds, dim=1) with self.temporary_cuda(self.diffusion) as diffusion: diffusion_latent = diffusion.get_conditioning(diffusion_conds) if return_mels: return auto_latent, diffusion_latent, auto_conds, diffusion_conds return auto_latent, diffusion_latent def get_random_conditioning_latents(self): # Lazy-load the RLG models. if self.rlg_auto is None: self.rlg_auto = RandomLatentConverter(1024).eval() self.rlg_auto.load_state_dict( torch.load( os.path.join(self.models_dir, "rlg_auto.pth"), map_location=torch.device("cpu"), ) ) self.rlg_diffusion = RandomLatentConverter(2048).eval() self.rlg_diffusion.load_state_dict( torch.load( os.path.join(self.models_dir, "rlg_diffuser.pth"), map_location=torch.device("cpu"), ) ) with torch.no_grad(): return self.rlg_auto(torch.tensor([0.0])), self.rlg_diffusion(torch.tensor([0.0])) def synthesize(self, text, config, speaker_id="random", voice_dirs=None, **kwargs): """Synthesize speech with the given input text. Args: text (str): Input text. config (TortoiseConfig): Config with inference parameters. speaker_id (str): One of the available speaker names. If `random`, it generates a random speaker. voice_dirs (List[str]): List of paths that host reference audio files for speakers. Defaults to None. **kwargs: Inference settings. See `inference()`. Returns: A dictionary of the output values with `wav` as output waveform, `deterministic_seed` as seed used at inference, `text_input` as text token IDs after tokenizer, `voice_samples` as samples used for cloning, `conditioning_latents` as latents used at inference. """ speaker_id = "random" if speaker_id is None else speaker_id if voice_dirs is not None: voice_dirs = [voice_dirs]
voice_samples, conditioning_latents = load_voice(speaker_id, voice_dirs)
2
2023-11-29 08:15:06+00:00
24k
magic-research/magic-animate
magicanimate/pipelines/pipeline_animation.py
[ { "identifier": "UNet3DConditionModel", "path": "magicanimate/models/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0, \n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n \n # Additional\n use_motion_module = False,\n motion_module_resolutions = ( 1,2,4,8 ),\n motion_module_mid_block = False,\n motion_module_decoder_only = False,\n motion_module_type = None,\n motion_module_kwargs = {},\n unet_use_cross_frame_attention = None,\n unet_use_temporal_attention = None,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n \n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n \n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n # for controlnet\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size, encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n # config[\"mid_block_type\"] = \"UNetMidBlock3DCrossAttn\"\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n \n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n \n return model" }, { "identifier": "ControlNetModel", "path": "magicanimate/models/controlnet.py", "snippet": "class ControlNetModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n in_channels: int = 4,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"DownBlock2D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n projection_class_embeddings_input_dim: Optional[int] = None,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n ):\n super().__init__()\n\n # Check inputs\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # control net conditioning embedding\n self.controlnet_cond_embedding = ControlNetConditioningEmbedding(\n conditioning_embedding_channels=block_out_channels[0],\n block_out_channels=conditioning_embedding_out_channels,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.controlnet_down_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[i],\n downsample_padding=downsample_padding,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n self.down_blocks.append(down_block)\n\n for _ in range(layers_per_block):\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n if not is_final_block:\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n # mid\n mid_block_channel = block_out_channels[-1]\n\n controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_mid_block = controlnet_block\n\n self.mid_block = UNetMidBlock2DCrossAttn(\n in_channels=mid_block_channel,\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n\n @classmethod\n def from_unet(\n cls,\n unet: UNet2DConditionModel,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n load_weights_from_unet: bool = True,\n ):\n r\"\"\"\n Instantiate Controlnet class from UNet2DConditionModel.\n\n Parameters:\n unet (`UNet2DConditionModel`):\n UNet model which weights are copied to the ControlNet. Note that all configuration options are also\n copied where applicable.\n \"\"\"\n controlnet = cls(\n in_channels=unet.config.in_channels,\n flip_sin_to_cos=unet.config.flip_sin_to_cos,\n freq_shift=unet.config.freq_shift,\n down_block_types=unet.config.down_block_types,\n only_cross_attention=unet.config.only_cross_attention,\n block_out_channels=unet.config.block_out_channels,\n layers_per_block=unet.config.layers_per_block,\n downsample_padding=unet.config.downsample_padding,\n mid_block_scale_factor=unet.config.mid_block_scale_factor,\n act_fn=unet.config.act_fn,\n norm_num_groups=unet.config.norm_num_groups,\n norm_eps=unet.config.norm_eps,\n cross_attention_dim=unet.config.cross_attention_dim,\n attention_head_dim=unet.config.attention_head_dim,\n use_linear_projection=unet.config.use_linear_projection,\n class_embed_type=unet.config.class_embed_type,\n num_class_embeds=unet.config.num_class_embeds,\n upcast_attention=unet.config.upcast_attention,\n resnet_time_scale_shift=unet.config.resnet_time_scale_shift,\n projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,\n controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,\n conditioning_embedding_out_channels=conditioning_embedding_out_channels,\n )\n\n if load_weights_from_unet:\n controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())\n controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())\n controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())\n\n if controlnet.class_embedding:\n controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())\n\n controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())\n controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())\n\n return controlnet\n\n # @property\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors\n # def attn_processors(self) -> Dict[str, AttentionProcessor]:\n # r\"\"\"\n # Returns:\n # `dict` of attention processors: A dictionary containing all attention processors used in the model with\n # indexed by its weight name.\n # \"\"\"\n # # set recursively\n # processors = {}\n\n # def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n # if hasattr(module, \"set_processor\"):\n # processors[f\"{name}.processor\"] = module.processor\n\n # for sub_name, child in module.named_children():\n # fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n # return processors\n\n # for name, module in self.named_children():\n # fn_recursive_add_processors(name, module, processors)\n\n # return processors\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor\n # def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):\n # r\"\"\"\n # Parameters:\n # `processor (`dict` of `AttentionProcessor` or `AttentionProcessor`):\n # The instantiated processor class or a dictionary of processor classes that will be set as the processor\n # of **all** `Attention` layers.\n # In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.:\n\n # \"\"\"\n # count = len(self.attn_processors.keys())\n\n # if isinstance(processor, dict) and len(processor) != count:\n # raise ValueError(\n # f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n # f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n # )\n\n # def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n # if hasattr(module, \"set_processor\"):\n # if not isinstance(processor, dict):\n # module.set_processor(processor)\n # else:\n # module.set_processor(processor.pop(f\"{name}.processor\"))\n\n # for sub_name, child in module.named_children():\n # fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n # for name, module in self.named_children():\n # fn_recursive_attn_processor(name, module, processor)\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor\n # def set_default_attn_processor(self):\n # \"\"\"\n # Disables custom attention processors and sets the default attention implementation.\n # \"\"\"\n # self.set_attn_processor(AttnProcessor())\n\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maximum amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_sliceable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_sliceable_dims(module)\n\n num_sliceable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_sliceable_layers * [1]\n\n slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n controlnet_cond: torch.FloatTensor,\n conditioning_scale: float = 1.0,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> Union[ControlNetOutput, Tuple]:\n # check channel order\n channel_order = self.config.controlnet_conditioning_channel_order\n\n if channel_order == \"rgb\":\n # in rgb order by default\n ...\n elif channel_order == \"bgr\":\n controlnet_cond = torch.flip(controlnet_cond, dims=[1])\n else:\n raise ValueError(f\"unknown `controlnet_conditioning_channel_order`: {channel_order}\")\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)\n\n sample += controlnet_cond\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n\n # 5. Control net blocks\n\n controlnet_down_block_res_samples = ()\n\n for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):\n down_block_res_sample = controlnet_block(down_block_res_sample)\n controlnet_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = controlnet_down_block_res_samples\n\n mid_block_res_sample = self.controlnet_mid_block(sample)\n\n # 6. scaling\n down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]\n mid_block_res_sample *= conditioning_scale\n\n if not return_dict:\n return (down_block_res_samples, mid_block_res_sample)\n\n return ControlNetOutput(\n down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample\n )" }, { "identifier": "ReferenceAttentionControl", "path": "magicanimate/models/mutual_self_attention.py", "snippet": "class ReferenceAttentionControl():\n \n def __init__(self, \n unet,\n mode=\"write\",\n do_classifier_free_guidance=False,\n attention_auto_machine_weight = float('inf'),\n gn_auto_machine_weight = 1.0,\n style_fidelity = 1.0,\n reference_attn=True,\n reference_adain=False,\n fusion_blocks=\"midup\",\n batch_size=1, \n ) -> None:\n # 10. Modify self attention and group norm\n self.unet = unet\n assert mode in [\"read\", \"write\"]\n assert fusion_blocks in [\"midup\", \"full\"]\n self.reference_attn = reference_attn\n self.reference_adain = reference_adain\n self.fusion_blocks = fusion_blocks\n self.register_reference_hooks(\n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n fusion_blocks,\n batch_size=batch_size, \n )\n\n def register_reference_hooks(\n self, \n mode, \n do_classifier_free_guidance,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n dtype=torch.float16,\n batch_size=1, \n num_images_per_prompt=1, \n device=torch.device(\"cpu\"), \n fusion_blocks='midup',\n ):\n MODE = mode\n do_classifier_free_guidance = do_classifier_free_guidance\n attention_auto_machine_weight = attention_auto_machine_weight\n gn_auto_machine_weight = gn_auto_machine_weight\n style_fidelity = style_fidelity\n reference_attn = reference_attn\n reference_adain = reference_adain\n fusion_blocks = fusion_blocks\n num_images_per_prompt = num_images_per_prompt\n dtype=dtype\n if do_classifier_free_guidance:\n uc_mask = (\n torch.Tensor([1] * batch_size * num_images_per_prompt * 16 + [0] * batch_size * num_images_per_prompt * 16)\n .to(device)\n .bool()\n )\n else:\n uc_mask = (\n torch.Tensor([0] * batch_size * num_images_per_prompt * 2)\n .to(device)\n .bool()\n )\n \n def hacked_basic_transformer_inner_forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n video_length=None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n if self.only_cross_attention:\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n else:\n if MODE == \"write\":\n self.bank.append(norm_hidden_states.clone())\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if MODE == \"read\":\n self.bank = [rearrange(d.unsqueeze(1).repeat(1, video_length, 1, 1), \"b t l c -> (b t) l c\")[:hidden_states.shape[0]] for d in self.bank]\n hidden_states_uc = self.attn1(norm_hidden_states, \n encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),\n attention_mask=attention_mask) + hidden_states\n hidden_states_c = hidden_states_uc.clone()\n _uc_mask = uc_mask.clone()\n if do_classifier_free_guidance:\n if hidden_states.shape[0] != _uc_mask.shape[0]:\n _uc_mask = (\n torch.Tensor([1] * (hidden_states.shape[0]//2) + [0] * (hidden_states.shape[0]//2))\n .to(device)\n .bool()\n )\n hidden_states_c[_uc_mask] = self.attn1(\n norm_hidden_states[_uc_mask],\n encoder_hidden_states=norm_hidden_states[_uc_mask],\n attention_mask=attention_mask,\n ) + hidden_states[_uc_mask]\n hidden_states = hidden_states_c.clone()\n \n self.bank.clear()\n if self.attn2 is not None:\n # Cross-Attention\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n hidden_states = (\n self.attn2(\n norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n + hidden_states\n )\n\n # Feed-forward\n hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states\n\n # Temporal-Attention\n if self.unet_use_temporal_attention:\n d = hidden_states.shape[1]\n hidden_states = rearrange(hidden_states, \"(b f) d c -> (b d) f c\", f=video_length)\n norm_hidden_states = (\n self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)\n )\n hidden_states = self.attn_temp(norm_hidden_states) + hidden_states\n hidden_states = rearrange(hidden_states, \"(b d) f c -> (b f) d c\", d=d)\n\n return hidden_states\n \n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states\n\n def hacked_mid_forward(self, *args, **kwargs):\n eps = 1e-6\n x = self.original_forward(*args, **kwargs)\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append(mean)\n self.var_bank.append(var)\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))\n var_acc = sum(self.var_bank) / float(len(self.var_bank))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n x_uc = (((x - mean) / std) * std_acc) + mean_acc\n x_c = x_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n x_c[uc_mask] = x[uc_mask]\n x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc\n self.mean_bank = []\n self.var_bank = []\n return x\n\n def hack_CrossAttnDownBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_DownBlock2D_forward(self, hidden_states, temb=None):\n eps = 1e-6\n\n output_states = ()\n\n for i, resnet in enumerate(self.resnets):\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_CrossAttnUpBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n # TODO(Patrick, William) - attention mask is not used\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):\n eps = 1e-6\n for i, resnet in enumerate(self.resnets):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)] \n attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n\n for i, module in enumerate(attn_modules):\n module._original_inner_forward = module.forward\n module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)\n module.bank = []\n module.attn_weight = float(i) / float(len(attn_modules))\n\n if self.reference_adain:\n gn_modules = [self.unet.mid_block]\n self.unet.mid_block.gn_weight = 0\n\n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n module.gn_weight = 1.0 - float(w) / float(len(down_blocks))\n gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n module.gn_weight = float(w) / float(len(up_blocks))\n gn_modules.append(module)\n\n for i, module in enumerate(gn_modules):\n if getattr(module, \"original_forward\", None) is None:\n module.original_forward = module.forward\n if i == 0:\n # mid_block\n module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)\n elif isinstance(module, CrossAttnDownBlock2D):\n module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)\n elif isinstance(module, DownBlock2D):\n module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)\n elif isinstance(module, CrossAttnUpBlock2D):\n module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)\n elif isinstance(module, UpBlock2D):\n module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)\n module.mean_bank = []\n module.var_bank = []\n module.gn_weight *= 2\n \n def update(self, writer, dtype=torch.float16):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in (torch_dfs(writer.unet.mid_block)+torch_dfs(writer.unet.up_blocks)) if isinstance(module, BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in torch_dfs(writer.unet) if isinstance(module, BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) \n writer_attn_modules = sorted(writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r, w in zip(reader_attn_modules, writer_attn_modules):\n r.bank = [v.clone().to(dtype) for v in w.bank]\n # w.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n writer_gn_modules = [writer.unet.mid_block]\n \n down_blocks = writer.unet.down_blocks\n for w, module in enumerate(down_blocks):\n writer_gn_modules.append(module)\n\n up_blocks = writer.unet.up_blocks\n for w, module in enumerate(up_blocks):\n writer_gn_modules.append(module)\n \n for r, w in zip(reader_gn_modules, writer_gn_modules):\n if len(w.mean_bank) > 0 and isinstance(w.mean_bank[0], list):\n r.mean_bank = [[v.clone().to(dtype) for v in vl] for vl in w.mean_bank]\n r.var_bank = [[v.clone().to(dtype) for v in vl] for vl in w.var_bank]\n else:\n r.mean_bank = [v.clone().to(dtype) for v in w.mean_bank]\n r.var_bank = [v.clone().to(dtype) for v in w.var_bank]\n \n def clear(self):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r in reader_attn_modules:\n r.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n for r in reader_gn_modules:\n r.mean_bank.clear()\n r.var_bank.clear()" }, { "identifier": "get_context_scheduler", "path": "magicanimate/pipelines/context.py", "snippet": "def get_context_scheduler(name: str) -> Callable:\n if name == \"uniform\":\n return uniform\n else:\n raise ValueError(f\"Unknown context_overlap policy {name}\")" }, { "identifier": "get_total_steps", "path": "magicanimate/pipelines/context.py", "snippet": "def get_total_steps(\n scheduler,\n timesteps: List[int],\n num_steps: Optional[int] = None,\n num_frames: int = ...,\n context_size: Optional[int] = None,\n context_stride: int = 3,\n context_overlap: int = 4,\n closed_loop: bool = True,\n):\n return sum(\n len(\n list(\n scheduler(\n i,\n num_steps,\n num_frames,\n context_size,\n context_stride,\n context_overlap,\n )\n )\n )\n for i in range(len(timesteps))\n )" }, { "identifier": "get_tensor_interpolation_method", "path": "magicanimate/utils/util.py", "snippet": "def get_tensor_interpolation_method():\n return tensor_interpolation" } ]
import inspect, math import numpy as np import torch import torch.distributed as dist from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from magicanimate.models.unet_controlnet import UNet3DConditionModel from magicanimate.models.controlnet import ControlNetModel from magicanimate.models.mutual_self_attention import ReferenceAttentionControl from magicanimate.pipelines.context import ( get_context_scheduler, get_total_steps ) from magicanimate.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
16,750
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel,
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel,
controlnet: ControlNetModel,
1
2023-11-21 08:33:54+00:00
24k
wenquanlu/HandRefiner
cldm/cldm.py
[ { "identifier": "conv_nd", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def conv_nd(dims, *args, **kwargs):\n \"\"\"\n Create a 1D, 2D, or 3D convolution module.\n \"\"\"\n if dims == 1:\n return nn.Conv1d(*args, **kwargs)\n elif dims == 2:\n return nn.Conv2d(*args, **kwargs)\n elif dims == 3:\n return nn.Conv3d(*args, **kwargs)\n raise ValueError(f\"unsupported dimensions: {dims}\")" }, { "identifier": "linear", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def linear(*args, **kwargs):\n \"\"\"\n Create a linear module.\n \"\"\"\n return nn.Linear(*args, **kwargs)" }, { "identifier": "zero_module", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def zero_module(module):\n \"\"\"\n Zero out the parameters of a module and return it.\n \"\"\"\n for p in module.parameters():\n p.detach().zero_()\n return module" }, { "identifier": "timestep_embedding", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False):\n \"\"\"\n Create sinusoidal timestep embeddings.\n :param timesteps: a 1-D Tensor of N indices, one per batch element.\n These may be fractional.\n :param dim: the dimension of the output.\n :param max_period: controls the minimum frequency of the embeddings.\n :return: an [N x dim] Tensor of positional embeddings.\n \"\"\"\n if not repeat_only:\n half = dim // 2\n freqs = torch.exp(\n -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half\n ).to(device=timesteps.device)\n args = timesteps[:, None].float() * freqs[None]\n embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)\n if dim % 2:\n embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)\n else:\n embedding = repeat(timesteps, 'b -> b d', d=dim)\n return embedding" }, { "identifier": "SpatialTransformer", "path": "ldm/modules/attention.py", "snippet": "class SpatialTransformer(nn.Module):\n \"\"\"\n Transformer block for image-like data.\n First, project the input (aka embedding)\n and reshape to b, t, d.\n Then apply standard transformer action.\n Finally, reshape to image\n NEW: use_linear for more efficiency instead of the 1x1 convs\n \"\"\"\n def __init__(self, in_channels, n_heads, d_head,\n depth=1, dropout=0., context_dim=None,\n disable_self_attn=False, use_linear=False,\n use_checkpoint=True):\n super().__init__()\n if exists(context_dim) and not isinstance(context_dim, list):\n context_dim = [context_dim]\n self.in_channels = in_channels\n inner_dim = n_heads * d_head\n self.norm = Normalize(in_channels)\n if not use_linear:\n self.proj_in = nn.Conv2d(in_channels,\n inner_dim,\n kernel_size=1,\n stride=1,\n padding=0)\n else:\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n self.transformer_blocks = nn.ModuleList(\n [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim[d],\n disable_self_attn=disable_self_attn, checkpoint=use_checkpoint)\n for d in range(depth)]\n )\n if not use_linear:\n self.proj_out = zero_module(nn.Conv2d(inner_dim,\n in_channels,\n kernel_size=1,\n stride=1,\n padding=0))\n else:\n self.proj_out = zero_module(nn.Linear(in_channels, inner_dim))\n self.use_linear = use_linear\n\n def forward(self, x, context=None):\n # note: if no context is given, cross-attention defaults to self-attention\n if not isinstance(context, list):\n context = [context]\n b, c, h, w = x.shape\n x_in = x\n x = self.norm(x)\n if not self.use_linear:\n x = self.proj_in(x)\n x = rearrange(x, 'b c h w -> b (h w) c').contiguous()\n if self.use_linear:\n x = self.proj_in(x)\n for i, block in enumerate(self.transformer_blocks):\n x = block(x, context=context[i])\n if self.use_linear:\n x = self.proj_out(x)\n x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w).contiguous()\n if not self.use_linear:\n x = self.proj_out(x)\n return x + x_in" }, { "identifier": "UNetModel", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class UNetModel(nn.Module):\n \"\"\"\n The full UNet model with attention and timestep embedding.\n :param in_channels: channels in the input Tensor.\n :param model_channels: base channel count for the model.\n :param out_channels: channels in the output Tensor.\n :param num_res_blocks: number of residual blocks per downsample.\n :param attention_resolutions: a collection of downsample rates at which\n attention will take place. May be a set, list, or tuple.\n For example, if this contains 4, then at 4x downsampling, attention\n will be used.\n :param dropout: the dropout probability.\n :param channel_mult: channel multiplier for each level of the UNet.\n :param conv_resample: if True, use learned convolutions for upsampling and\n downsampling.\n :param dims: determines if the signal is 1D, 2D, or 3D.\n :param num_classes: if specified (as an int), then this model will be\n class-conditional with `num_classes` classes.\n :param use_checkpoint: use gradient checkpointing to reduce memory usage.\n :param num_heads: the number of attention heads in each attention layer.\n :param num_heads_channels: if specified, ignore num_heads and instead use\n a fixed channel width per attention head.\n :param num_heads_upsample: works with num_heads to set a different number\n of heads for upsampling. Deprecated.\n :param use_scale_shift_norm: use a FiLM-like conditioning mechanism.\n :param resblock_updown: use residual blocks for up/downsampling.\n :param use_new_attention_order: use a different attention pattern for potentially\n increased efficiency.\n \"\"\"\n\n def __init__(\n self,\n image_size,\n in_channels,\n model_channels,\n out_channels,\n num_res_blocks,\n attention_resolutions,\n dropout=0,\n channel_mult=(1, 2, 4, 8),\n conv_resample=True,\n dims=2,\n num_classes=None,\n use_checkpoint=False,\n use_fp16=False,\n num_heads=-1,\n num_head_channels=-1,\n num_heads_upsample=-1,\n use_scale_shift_norm=False,\n resblock_updown=False,\n use_new_attention_order=False,\n use_spatial_transformer=False, # custom transformer support\n transformer_depth=1, # custom transformer support\n context_dim=None, # custom transformer support\n n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model\n legacy=True,\n disable_self_attentions=None,\n num_attention_blocks=None,\n disable_middle_self_attn=False,\n use_linear_in_transformer=False,\n ):\n super().__init__()\n if use_spatial_transformer:\n assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'\n\n if context_dim is not None:\n assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'\n from omegaconf.listconfig import ListConfig\n if type(context_dim) == ListConfig:\n context_dim = list(context_dim)\n\n if num_heads_upsample == -1:\n num_heads_upsample = num_heads\n\n if num_heads == -1:\n assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'\n\n if num_head_channels == -1:\n assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'\n\n self.image_size = image_size\n self.in_channels = in_channels\n self.model_channels = model_channels\n self.out_channels = out_channels\n if isinstance(num_res_blocks, int):\n self.num_res_blocks = len(channel_mult) * [num_res_blocks]\n else:\n if len(num_res_blocks) != len(channel_mult):\n raise ValueError(\"provide num_res_blocks either as an int (globally constant) or \"\n \"as a list/tuple (per-level) with the same length as channel_mult\")\n self.num_res_blocks = num_res_blocks\n if disable_self_attentions is not None:\n # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not\n assert len(disable_self_attentions) == len(channel_mult)\n if num_attention_blocks is not None:\n assert len(num_attention_blocks) == len(self.num_res_blocks)\n assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(len(num_attention_blocks))))\n print(f\"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. \"\n f\"This option has LESS priority than attention_resolutions {attention_resolutions}, \"\n f\"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, \"\n f\"attention will still not be set.\")\n\n self.attention_resolutions = attention_resolutions\n self.dropout = dropout\n self.channel_mult = channel_mult\n self.conv_resample = conv_resample\n self.num_classes = num_classes\n self.use_checkpoint = use_checkpoint\n self.dtype = th.float16 if use_fp16 else th.float32\n self.num_heads = num_heads\n self.num_head_channels = num_head_channels\n self.num_heads_upsample = num_heads_upsample\n self.predict_codebook_ids = n_embed is not None\n\n time_embed_dim = model_channels * 4\n self.time_embed = nn.Sequential(\n linear(model_channels, time_embed_dim),\n nn.SiLU(),\n linear(time_embed_dim, time_embed_dim),\n )\n\n if self.num_classes is not None:\n if isinstance(self.num_classes, int):\n self.label_emb = nn.Embedding(num_classes, time_embed_dim)\n elif self.num_classes == \"continuous\":\n print(\"setting up linear c_adm embedding layer\")\n self.label_emb = nn.Linear(1, time_embed_dim)\n else:\n raise ValueError()\n\n self.input_blocks = nn.ModuleList(\n [\n TimestepEmbedSequential(\n conv_nd(dims, in_channels, model_channels, 3, padding=1)\n )\n ]\n )\n self._feature_size = model_channels\n input_block_chans = [model_channels]\n ch = model_channels\n ds = 1\n for level, mult in enumerate(channel_mult):\n for nr in range(self.num_res_blocks[level]):\n layers = [\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=mult * model_channels,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = mult * model_channels\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n )\n )\n self.input_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n input_block_chans.append(ch)\n if level != len(channel_mult) - 1:\n out_ch = ch\n self.input_blocks.append(\n TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n down=True,\n )\n if resblock_updown\n else Downsample(\n ch, conv_resample, dims=dims, out_channels=out_ch\n )\n )\n )\n ch = out_ch\n input_block_chans.append(ch)\n ds *= 2\n self._feature_size += ch\n\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n self.middle_block = TimestepEmbedSequential(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer( # always uses a self-attn\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n ),\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n ),\n )\n self._feature_size += ch\n\n self.output_blocks = nn.ModuleList([])\n for level, mult in list(enumerate(channel_mult))[::-1]:\n for i in range(self.num_res_blocks[level] + 1):\n ich = input_block_chans.pop()\n layers = [\n ResBlock(\n ch + ich,\n time_embed_dim,\n dropout,\n out_channels=model_channels * mult,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n )\n ]\n ch = model_channels * mult\n if ds in attention_resolutions:\n if num_head_channels == -1:\n dim_head = ch // num_heads\n else:\n num_heads = ch // num_head_channels\n dim_head = num_head_channels\n if legacy:\n #num_heads = 1\n dim_head = ch // num_heads if use_spatial_transformer else num_head_channels\n if exists(disable_self_attentions):\n disabled_sa = disable_self_attentions[level]\n else:\n disabled_sa = False\n\n if not exists(num_attention_blocks) or i < num_attention_blocks[level]:\n layers.append(\n AttentionBlock(\n ch,\n use_checkpoint=use_checkpoint,\n num_heads=num_heads_upsample,\n num_head_channels=dim_head,\n use_new_attention_order=use_new_attention_order,\n ) if not use_spatial_transformer else SpatialTransformer(\n ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,\n disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,\n use_checkpoint=use_checkpoint\n )\n )\n if level and i == self.num_res_blocks[level]:\n out_ch = ch\n layers.append(\n ResBlock(\n ch,\n time_embed_dim,\n dropout,\n out_channels=out_ch,\n dims=dims,\n use_checkpoint=use_checkpoint,\n use_scale_shift_norm=use_scale_shift_norm,\n up=True,\n )\n if resblock_updown\n else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch)\n )\n ds //= 2\n self.output_blocks.append(TimestepEmbedSequential(*layers))\n self._feature_size += ch\n\n self.out = nn.Sequential(\n normalization(ch),\n nn.SiLU(),\n zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)),\n )\n if self.predict_codebook_ids:\n self.id_predictor = nn.Sequential(\n normalization(ch),\n conv_nd(dims, model_channels, n_embed, 1),\n #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits\n )\n\n def convert_to_fp16(self):\n \"\"\"\n Convert the torso of the model to float16.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f16)\n self.middle_block.apply(convert_module_to_f16)\n self.output_blocks.apply(convert_module_to_f16)\n\n def convert_to_fp32(self):\n \"\"\"\n Convert the torso of the model to float32.\n \"\"\"\n self.input_blocks.apply(convert_module_to_f32)\n self.middle_block.apply(convert_module_to_f32)\n self.output_blocks.apply(convert_module_to_f32)\n\n def forward(self, x, timesteps=None, context=None, y=None,**kwargs):\n \"\"\"\n Apply the model to an input batch.\n :param x: an [N x C x ...] Tensor of inputs.\n :param timesteps: a 1-D batch of timesteps.\n :param context: conditioning plugged in via crossattn\n :param y: an [N] Tensor of labels, if class-conditional.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n assert (y is not None) == (\n self.num_classes is not None\n ), \"must specify y if and only if the model is class-conditional\"\n hs = []\n t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)\n emb = self.time_embed(t_emb)\n\n if self.num_classes is not None:\n assert y.shape[0] == x.shape[0]\n emb = emb + self.label_emb(y)\n\n h = x.type(self.dtype)\n for module in self.input_blocks:\n h = module(h, emb, context)\n hs.append(h)\n h = self.middle_block(h, emb, context)\n for module in self.output_blocks:\n h = th.cat([h, hs.pop()], dim=1)\n h = module(h, emb, context)\n h = h.type(x.dtype)\n if self.predict_codebook_ids:\n return self.id_predictor(h)\n else:\n return self.out(h)" }, { "identifier": "TimestepEmbedSequential", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class TimestepEmbedSequential(nn.Sequential, TimestepBlock):\n \"\"\"\n A sequential module that passes timestep embeddings to the children that\n support it as an extra input.\n \"\"\"\n\n def forward(self, x, emb, context=None):\n for layer in self:\n if isinstance(layer, TimestepBlock):\n x = layer(x, emb)\n elif isinstance(layer, SpatialTransformer):\n x = layer(x, context)\n else:\n x = layer(x)\n return x" }, { "identifier": "ResBlock", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class ResBlock(TimestepBlock):\n \"\"\"\n A residual block that can optionally change the number of channels.\n :param channels: the number of input channels.\n :param emb_channels: the number of timestep embedding channels.\n :param dropout: the rate of dropout.\n :param out_channels: if specified, the number of out channels.\n :param use_conv: if True and out_channels is specified, use a spatial\n convolution instead of a smaller 1x1 convolution to change the\n channels in the skip connection.\n :param dims: determines if the signal is 1D, 2D, or 3D.\n :param use_checkpoint: if True, use gradient checkpointing on this module.\n :param up: if True, use this block for upsampling.\n :param down: if True, use this block for downsampling.\n \"\"\"\n\n def __init__(\n self,\n channels,\n emb_channels,\n dropout,\n out_channels=None,\n use_conv=False,\n use_scale_shift_norm=False,\n dims=2,\n use_checkpoint=False,\n up=False,\n down=False,\n ):\n super().__init__()\n self.channels = channels\n self.emb_channels = emb_channels\n self.dropout = dropout\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.use_checkpoint = use_checkpoint\n self.use_scale_shift_norm = use_scale_shift_norm\n\n self.in_layers = nn.Sequential(\n normalization(channels),\n nn.SiLU(),\n conv_nd(dims, channels, self.out_channels, 3, padding=1),\n )\n\n self.updown = up or down\n\n if up:\n self.h_upd = Upsample(channels, False, dims)\n self.x_upd = Upsample(channels, False, dims)\n elif down:\n self.h_upd = Downsample(channels, False, dims)\n self.x_upd = Downsample(channels, False, dims)\n else:\n self.h_upd = self.x_upd = nn.Identity()\n\n self.emb_layers = nn.Sequential(\n nn.SiLU(),\n linear(\n emb_channels,\n 2 * self.out_channels if use_scale_shift_norm else self.out_channels,\n ),\n )\n self.out_layers = nn.Sequential(\n normalization(self.out_channels),\n nn.SiLU(),\n nn.Dropout(p=dropout),\n zero_module(\n conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1)\n ),\n )\n\n if self.out_channels == channels:\n self.skip_connection = nn.Identity()\n elif use_conv:\n self.skip_connection = conv_nd(\n dims, channels, self.out_channels, 3, padding=1\n )\n else:\n self.skip_connection = conv_nd(dims, channels, self.out_channels, 1)\n\n def forward(self, x, emb):\n \"\"\"\n Apply the block to a Tensor, conditioned on a timestep embedding.\n :param x: an [N x C x ...] Tensor of features.\n :param emb: an [N x emb_channels] Tensor of timestep embeddings.\n :return: an [N x C x ...] Tensor of outputs.\n \"\"\"\n return checkpoint(\n self._forward, (x, emb), self.parameters(), self.use_checkpoint\n )\n\n\n def _forward(self, x, emb):\n if self.updown:\n in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1]\n h = in_rest(x)\n h = self.h_upd(h)\n x = self.x_upd(x)\n h = in_conv(h)\n else:\n h = self.in_layers(x)\n emb_out = self.emb_layers(emb).type(h.dtype)\n while len(emb_out.shape) < len(h.shape):\n emb_out = emb_out[..., None]\n if self.use_scale_shift_norm:\n out_norm, out_rest = self.out_layers[0], self.out_layers[1:]\n scale, shift = th.chunk(emb_out, 2, dim=1)\n h = out_norm(h) * (1 + scale) + shift\n h = out_rest(h)\n else:\n h = h + emb_out\n h = self.out_layers(h)\n return self.skip_connection(x) + h" }, { "identifier": "Downsample", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class Downsample(nn.Module):\n \"\"\"\n A downsampling layer with an optional convolution.\n :param channels: channels in the inputs and outputs.\n :param use_conv: a bool determining if a convolution is applied.\n :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then\n downsampling occurs in the inner-two dimensions.\n \"\"\"\n\n def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.dims = dims\n stride = 2 if dims != 3 else (1, 2, 2)\n if use_conv:\n self.op = conv_nd(\n dims, self.channels, self.out_channels, 3, stride=stride, padding=padding\n )\n else:\n assert self.channels == self.out_channels\n self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride)\n\n def forward(self, x):\n assert x.shape[1] == self.channels\n return self.op(x)" }, { "identifier": "AttentionBlock", "path": "ldm/modules/diffusionmodules/openaimodel.py", "snippet": "class AttentionBlock(nn.Module):\n \"\"\"\n An attention block that allows spatial positions to attend to each other.\n Originally ported from here, but adapted to the N-d case.\n https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66.\n \"\"\"\n\n def __init__(\n self,\n channels,\n num_heads=1,\n num_head_channels=-1,\n use_checkpoint=False,\n use_new_attention_order=False,\n ):\n super().__init__()\n self.channels = channels\n if num_head_channels == -1:\n self.num_heads = num_heads\n else:\n assert (\n channels % num_head_channels == 0\n ), f\"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}\"\n self.num_heads = channels // num_head_channels\n self.use_checkpoint = use_checkpoint\n self.norm = normalization(channels)\n self.qkv = conv_nd(1, channels, channels * 3, 1)\n if use_new_attention_order:\n # split qkv before split heads\n self.attention = QKVAttention(self.num_heads)\n else:\n # split heads before split qkv\n self.attention = QKVAttentionLegacy(self.num_heads)\n\n self.proj_out = zero_module(conv_nd(1, channels, channels, 1))\n\n def forward(self, x):\n return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!!\n #return pt_checkpoint(self._forward, x) # pytorch\n\n def _forward(self, x):\n b, c, *spatial = x.shape\n x = x.reshape(b, c, -1)\n qkv = self.qkv(self.norm(x))\n h = self.attention(qkv)\n h = self.proj_out(h)\n return (x + h).reshape(b, c, *spatial)" }, { "identifier": "LatentDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentDiffusion(DDPM):\n \"\"\"main class\"\"\"\n\n def __init__(self,\n first_stage_config,\n cond_stage_config,\n num_timesteps_cond=None,\n cond_stage_key=\"image\",\n cond_stage_trainable=False,\n concat_mode=True,\n cond_stage_forward=None,\n conditioning_key=None,\n scale_factor=1.0,\n scale_by_std=False,\n force_null_conditioning=False,\n *args, **kwargs):\n self.force_null_conditioning = force_null_conditioning\n self.num_timesteps_cond = default(num_timesteps_cond, 1)\n self.scale_by_std = scale_by_std\n assert self.num_timesteps_cond <= kwargs['timesteps']\n # for backwards compatibility after implementation of DiffusionWrapper\n if conditioning_key is None:\n conditioning_key = 'concat' if concat_mode else 'crossattn'\n if cond_stage_config == '__is_unconditional__' and not self.force_null_conditioning:\n conditioning_key = None\n ckpt_path = kwargs.pop(\"ckpt_path\", None)\n reset_ema = kwargs.pop(\"reset_ema\", False)\n reset_num_ema_updates = kwargs.pop(\"reset_num_ema_updates\", False)\n ignore_keys = kwargs.pop(\"ignore_keys\", [])\n super().__init__(conditioning_key=conditioning_key, *args, **kwargs)\n self.concat_mode = concat_mode\n self.cond_stage_trainable = cond_stage_trainable\n self.cond_stage_key = cond_stage_key\n try:\n self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1\n except:\n self.num_downs = 0\n if not scale_by_std:\n self.scale_factor = scale_factor\n else:\n self.register_buffer('scale_factor', torch.tensor(scale_factor))\n self.instantiate_first_stage(first_stage_config)\n self.instantiate_cond_stage(cond_stage_config)\n self.cond_stage_forward = cond_stage_forward\n self.clip_denoised = False\n self.bbox_tokenizer = None\n\n self.restarted_from_ckpt = False\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys)\n self.restarted_from_ckpt = True\n if reset_ema:\n assert self.use_ema\n print(\n f\"Resetting ema to pure model weights. This is useful when restoring from an ema-only checkpoint.\")\n self.model_ema = LitEma(self.model)\n if reset_num_ema_updates:\n print(\" +++++++++++ WARNING: RESETTING NUM_EMA UPDATES TO ZERO +++++++++++ \")\n assert self.use_ema\n self.model_ema.reset_num_updates()\n\n def make_cond_schedule(self, ):\n self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long)\n ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long()\n self.cond_ids[:self.num_timesteps_cond] = ids\n\n @rank_zero_only\n @torch.no_grad()\n def on_train_batch_start(self, batch, batch_idx, dataloader_idx):\n # only for very first batch\n if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt:\n assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously'\n # set rescale weight to 1./std of encodings\n print(\"### USING STD-RESCALING ###\")\n x = super().get_input(batch, self.first_stage_key)\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n del self.scale_factor\n self.register_buffer('scale_factor', 1. / z.flatten().std())\n print(f\"setting self.scale_factor to {self.scale_factor}\")\n print(\"### USING STD-RESCALING ###\")\n\n def register_schedule(self,\n given_betas=None, beta_schedule=\"linear\", timesteps=1000,\n linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s)\n\n self.shorten_cond_schedule = self.num_timesteps_cond > 1\n if self.shorten_cond_schedule:\n self.make_cond_schedule()\n\n def instantiate_first_stage(self, config):\n model = instantiate_from_config(config)\n self.first_stage_model = model.eval()\n self.first_stage_model.train = disabled_train\n for param in self.first_stage_model.parameters():\n param.requires_grad = False\n\n def instantiate_cond_stage(self, config):\n if not self.cond_stage_trainable:\n if config == \"__is_first_stage__\":\n print(\"Using first stage also as cond stage.\")\n self.cond_stage_model = self.first_stage_model\n elif config == \"__is_unconditional__\":\n print(f\"Training {self.__class__.__name__} as an unconditional model.\")\n self.cond_stage_model = None\n # self.be_unconditional = True\n else:\n model = instantiate_from_config(config)\n self.cond_stage_model = model.eval()\n self.cond_stage_model.train = disabled_train\n for param in self.cond_stage_model.parameters():\n param.requires_grad = False\n else:\n assert config != '__is_first_stage__'\n assert config != '__is_unconditional__'\n model = instantiate_from_config(config)\n self.cond_stage_model = model\n\n def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False):\n denoise_row = []\n for zd in tqdm(samples, desc=desc):\n denoise_row.append(self.decode_first_stage(zd.to(self.device),\n force_not_quantize=force_no_decoder_quantization))\n n_imgs_per_row = len(denoise_row)\n denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W\n denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w')\n denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w')\n denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row)\n return denoise_grid\n\n def get_first_stage_encoding(self, encoder_posterior):\n if isinstance(encoder_posterior, DiagonalGaussianDistribution):\n z = encoder_posterior.sample()\n elif isinstance(encoder_posterior, torch.Tensor):\n z = encoder_posterior\n else:\n raise NotImplementedError(f\"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented\")\n return self.scale_factor * z\n\n def get_learned_conditioning(self, c):\n if self.cond_stage_forward is None:\n if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode):\n c = self.cond_stage_model.encode(c)\n if isinstance(c, DiagonalGaussianDistribution):\n c = c.mode()\n else:\n c = self.cond_stage_model(c)\n else:\n assert hasattr(self.cond_stage_model, self.cond_stage_forward)\n c = getattr(self.cond_stage_model, self.cond_stage_forward)(c)\n return c\n\n def meshgrid(self, h, w):\n y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1)\n x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1)\n\n arr = torch.cat([y, x], dim=-1)\n return arr\n\n def delta_border(self, h, w):\n \"\"\"\n :param h: height\n :param w: width\n :return: normalized distance to image border,\n wtith min distance = 0 at border and max dist = 0.5 at image center\n \"\"\"\n lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2)\n arr = self.meshgrid(h, w) / lower_right_corner\n dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0]\n dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0]\n edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0]\n return edge_dist\n\n def get_weighting(self, h, w, Ly, Lx, device):\n weighting = self.delta_border(h, w)\n weighting = torch.clip(weighting, self.split_input_params[\"clip_min_weight\"],\n self.split_input_params[\"clip_max_weight\"], )\n weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device)\n\n if self.split_input_params[\"tie_braker\"]:\n L_weighting = self.delta_border(Ly, Lx)\n L_weighting = torch.clip(L_weighting,\n self.split_input_params[\"clip_min_tie_weight\"],\n self.split_input_params[\"clip_max_tie_weight\"])\n\n L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device)\n weighting = weighting * L_weighting\n return weighting\n\n def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code\n \"\"\"\n :param x: img of size (bs, c, h, w)\n :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1])\n \"\"\"\n bs, nc, h, w = x.shape\n\n # number of crops in image\n Ly = (h - kernel_size[0]) // stride[0] + 1\n Lx = (w - kernel_size[1]) // stride[1] + 1\n\n if uf == 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params)\n\n weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx))\n\n elif uf > 1 and df == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf),\n dilation=1, padding=0,\n stride=(stride[0] * uf, stride[1] * uf))\n fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx))\n\n elif df > 1 and uf == 1:\n fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride)\n unfold = torch.nn.Unfold(**fold_params)\n\n fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df),\n dilation=1, padding=0,\n stride=(stride[0] // df, stride[1] // df))\n fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2)\n\n weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype)\n normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap\n weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx))\n\n else:\n raise NotImplementedError\n\n return fold, unfold, normalization, weighting\n\n @torch.no_grad()\n def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False,\n cond_key=None, return_original_cond=False, bs=None, return_x=False):\n x = super().get_input(batch, k)\n if bs is not None:\n x = x[:bs]\n x = x.to(self.device)\n encoder_posterior = self.encode_first_stage(x)\n z = self.get_first_stage_encoding(encoder_posterior).detach()\n\n if self.model.conditioning_key is not None and not self.force_null_conditioning:\n if cond_key is None:\n cond_key = self.cond_stage_key\n if cond_key != self.first_stage_key:\n if cond_key in ['caption', 'coordinates_bbox', \"txt\"]:\n xc = batch[cond_key]\n elif cond_key in ['class_label', 'cls']:\n xc = batch\n else:\n xc = super().get_input(batch, cond_key).to(self.device)\n else:\n xc = x\n if not self.cond_stage_trainable or force_c_encode:\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n c = self.get_learned_conditioning(xc.to(self.device))\n else:\n c = xc\n if bs is not None:\n c = c[:bs]\n\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n ckey = __conditioning_keys__[self.model.conditioning_key]\n c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y}\n\n else:\n c = None\n xc = None\n if self.use_positional_encodings:\n pos_x, pos_y = self.compute_latent_shifts(batch)\n c = {'pos_x': pos_x, 'pos_y': pos_y}\n out = [z, c]\n if return_first_stage_outputs:\n xrec = self.decode_first_stage(z)\n out.extend([x, xrec])\n if return_x:\n out.extend([x])\n if return_original_cond:\n out.append(xc)\n return out\n\n @torch.no_grad()\n def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False):\n if predict_cids:\n if z.dim() == 4:\n z = torch.argmax(z.exp(), dim=1).long()\n z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None)\n z = rearrange(z, 'b h w c -> b c h w').contiguous()\n\n z = 1. / self.scale_factor * z\n return self.first_stage_model.decode(z)\n\n @torch.no_grad()\n def encode_first_stage(self, x):\n return self.first_stage_model.encode(x)\n\n def shared_step(self, batch, **kwargs):\n x, c = self.get_input(batch, self.first_stage_key)\n loss = self(x, c)\n return loss\n\n def forward(self, x, c, *args, **kwargs):\n t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long()\n if self.model.conditioning_key is not None:\n assert c is not None\n if self.cond_stage_trainable:\n c = self.get_learned_conditioning(c)\n if self.shorten_cond_schedule: # TODO: drop this option\n tc = self.cond_ids[t].to(self.device)\n c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))\n return self.p_losses(x, c, t, *args, **kwargs)\n\n def apply_model(self, x_noisy, t, cond, return_ids=False):\n if isinstance(cond, dict):\n # hybrid case, cond is expected to be a dict\n pass\n else:\n if not isinstance(cond, list):\n cond = [cond]\n key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn'\n cond = {key: cond}\n\n x_recon = self.model(x_noisy, t, **cond)\n\n if isinstance(x_recon, tuple) and not return_ids:\n return x_recon[0]\n else:\n return x_recon\n\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \\\n extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape)\n\n def _prior_bpd(self, x_start):\n \"\"\"\n Get the prior KL term for the variational lower-bound, measured in\n bits-per-dim.\n This term can't be optimized, as it only depends on the encoder.\n :param x_start: the [N x C x ...] tensor of inputs.\n :return: a batch of [N] KL values (in bits), one per batch element.\n \"\"\"\n batch_size = x_start.shape[0]\n t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device)\n qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t)\n kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0)\n return mean_flat(kl_prior) / np.log(2.0)\n\n def p_losses(self, x_start, cond, t, noise=None):\n noise = default(noise, lambda: torch.randn_like(x_start))\n x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise)\n model_output = self.apply_model(x_noisy, t, cond)\n\n loss_dict = {}\n prefix = 'train' if self.training else 'val'\n\n if self.parameterization == \"x0\":\n target = x_start\n elif self.parameterization == \"eps\":\n target = noise\n elif self.parameterization == \"v\":\n target = self.get_v(x_start, noise, t)\n else:\n raise NotImplementedError()\n # change loss here\n #print(cond['c_concat'][0].shape)\n masks = cond['c_concat'][0][:,0,:,:]\n \n loss_simple = self.get_loss(model_output, target, masks, mean=False).sum([1, 2, 3])\n for i in range(len(loss_simple)):\n loss_simple[i] = loss_simple[i]/(torch.count_nonzero(masks[i]) * 4)\n loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()})\n\n logvar_t = self.logvar[t].to(self.device)\n loss = loss_simple / torch.exp(logvar_t) + logvar_t\n # loss = loss_simple / torch.exp(self.logvar) + self.logvar\n if self.learn_logvar:\n loss_dict.update({f'{prefix}/loss_gamma': loss.mean()})\n loss_dict.update({'logvar': self.logvar.data.mean()})\n\n loss = self.l_simple_weight * loss.mean()\n\n loss_vlb = self.get_loss(model_output, target, masks, mean=False).mean(dim=(1, 2, 3))\n loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean()\n loss_dict.update({f'{prefix}/loss_vlb': loss_vlb})\n loss += (self.original_elbo_weight * loss_vlb)\n loss_dict.update({f'{prefix}/loss': loss})\n\n return loss, loss_dict\n\n def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False,\n return_x0=False, score_corrector=None, corrector_kwargs=None):\n t_in = t\n model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids)\n\n if score_corrector is not None:\n assert self.parameterization == \"eps\"\n model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs)\n\n if return_codebook_ids:\n model_out, logits = model_out\n\n if self.parameterization == \"eps\":\n x_recon = self.predict_start_from_noise(x, t=t, noise=model_out)\n elif self.parameterization == \"x0\":\n x_recon = model_out\n else:\n raise NotImplementedError()\n\n if clip_denoised:\n x_recon.clamp_(-1., 1.)\n if quantize_denoised:\n x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon)\n model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t)\n if return_codebook_ids:\n return model_mean, posterior_variance, posterior_log_variance, logits\n elif return_x0:\n return model_mean, posterior_variance, posterior_log_variance, x_recon\n else:\n return model_mean, posterior_variance, posterior_log_variance\n\n @torch.no_grad()\n def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False,\n return_codebook_ids=False, quantize_denoised=False, return_x0=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None):\n b, *_, device = *x.shape, x.device\n outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised,\n return_codebook_ids=return_codebook_ids,\n quantize_denoised=quantize_denoised,\n return_x0=return_x0,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if return_codebook_ids:\n raise DeprecationWarning(\"Support dropped.\")\n model_mean, _, model_log_variance, logits = outputs\n elif return_x0:\n model_mean, _, model_log_variance, x0 = outputs\n else:\n model_mean, _, model_log_variance = outputs\n\n noise = noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n # no noise when t == 0\n nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1)))\n\n if return_codebook_ids:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1)\n if return_x0:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0\n else:\n return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise\n\n @torch.no_grad()\n def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False,\n img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0.,\n score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None,\n log_every_t=None):\n if not log_every_t:\n log_every_t = self.log_every_t\n timesteps = self.num_timesteps\n if batch_size is not None:\n b = batch_size if batch_size is not None else shape[0]\n shape = [batch_size] + list(shape)\n else:\n b = batch_size = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=self.device)\n else:\n img = x_T\n intermediates = []\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation',\n total=timesteps) if verbose else reversed(\n range(0, timesteps))\n if type(temperature) == float:\n temperature = [temperature] * timesteps\n\n for i in iterator:\n ts = torch.full((b,), i, device=self.device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img, x0_partial = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised, return_x0=True,\n temperature=temperature[i], noise_dropout=noise_dropout,\n score_corrector=score_corrector, corrector_kwargs=corrector_kwargs)\n if mask is not None:\n assert x0 is not None\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(x0_partial)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_loop(self, cond, shape, return_intermediates=False,\n x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, start_T=None,\n log_every_t=None):\n\n if not log_every_t:\n log_every_t = self.log_every_t\n device = self.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n intermediates = [img]\n if timesteps is None:\n timesteps = self.num_timesteps\n\n if start_T is not None:\n timesteps = min(timesteps, start_T)\n iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed(\n range(0, timesteps))\n\n if mask is not None:\n assert x0 is not None\n assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match\n\n for i in iterator:\n ts = torch.full((b,), i, device=device, dtype=torch.long)\n if self.shorten_cond_schedule:\n assert self.model.conditioning_key != 'hybrid'\n tc = self.cond_ids[ts].to(cond.device)\n cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond))\n\n img = self.p_sample(img, cond, ts,\n clip_denoised=self.clip_denoised,\n quantize_denoised=quantize_denoised)\n if mask is not None:\n img_orig = self.q_sample(x0, ts)\n img = img_orig * mask + (1. - mask) * img\n\n if i % log_every_t == 0 or i == timesteps - 1:\n intermediates.append(img)\n if callback: callback(i)\n if img_callback: img_callback(img, i)\n\n if return_intermediates:\n return img, intermediates\n return img\n\n @torch.no_grad()\n def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None,\n verbose=True, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, shape=None, **kwargs):\n if shape is None:\n shape = (batch_size, self.channels, self.image_size, self.image_size)\n if cond is not None:\n if isinstance(cond, dict):\n cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else\n list(map(lambda x: x[:batch_size], cond[key])) for key in cond}\n else:\n cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size]\n return self.p_sample_loop(cond,\n shape,\n return_intermediates=return_intermediates, x_T=x_T,\n verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised,\n mask=mask, x0=x0)\n\n @torch.no_grad()\n def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs):\n if ddim:\n ddim_sampler = DDIMSampler(self)\n shape = (self.channels, self.image_size, self.image_size)\n samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size,\n shape, cond, verbose=False, **kwargs)\n\n else:\n samples, intermediates = self.sample(cond=cond, batch_size=batch_size,\n return_intermediates=True, **kwargs)\n\n return samples, intermediates\n\n @torch.no_grad()\n def get_unconditional_conditioning(self, batch_size, null_label=None):\n if null_label is not None:\n xc = null_label\n if isinstance(xc, ListConfig):\n xc = list(xc)\n if isinstance(xc, dict) or isinstance(xc, list):\n c = self.get_learned_conditioning(xc)\n else:\n if hasattr(xc, \"to\"):\n xc = xc.to(self.device)\n c = self.get_learned_conditioning(xc)\n else:\n if self.cond_stage_key in [\"class_label\", \"cls\"]:\n xc = self.cond_stage_model.get_unconditional_conditioning(batch_size, device=self.device)\n return self.get_learned_conditioning(xc)\n else:\n raise NotImplementedError(\"todo\")\n if isinstance(c, list): # in case the encoder gives us a list\n for i in range(len(c)):\n c[i] = repeat(c[i], '1 ... -> b ...', b=batch_size).to(self.device)\n else:\n c = repeat(c, '1 ... -> b ...', b=batch_size).to(self.device)\n return c\n\n @torch.no_grad()\n def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=50, ddim_eta=0., return_keys=None,\n quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True,\n plot_diffusion_rows=True, unconditional_guidance_scale=1., unconditional_guidance_label=None,\n use_ema_scope=True,\n **kwargs):\n ema_scope = self.ema_scope\n use_ddim = ddim_steps is not None\n\n log = dict()\n z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=N)\n N = min(x.shape[0], N)\n n_row = min(x.shape[0], n_row)\n log[\"inputs\"] = x\n log[\"reconstruction\"] = xrec\n if self.model.conditioning_key is not None:\n if hasattr(self.cond_stage_model, \"decode\"):\n xc = self.cond_stage_model.decode(c)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in [\"caption\", \"txt\"]:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[self.cond_stage_key], size=x.shape[2] // 25)\n log[\"conditioning\"] = xc\n elif self.cond_stage_key in ['class_label', \"cls\"]:\n try:\n xc = log_txt_as_img((x.shape[2], x.shape[3]), batch[\"human_label\"], size=x.shape[2] // 25)\n log['conditioning'] = xc\n except KeyError:\n # probably no \"human_label\" in batch\n pass\n elif isimage(xc):\n log[\"conditioning\"] = xc\n if ismap(xc):\n log[\"original_conditioning\"] = self.to_rgb(xc)\n\n if plot_diffusion_rows:\n # get diffusion row\n diffusion_row = list()\n z_start = z[:n_row]\n for t in range(self.num_timesteps):\n if t % self.log_every_t == 0 or t == self.num_timesteps - 1:\n t = repeat(torch.tensor([t]), '1 -> b', b=n_row)\n t = t.to(self.device).long()\n noise = torch.randn_like(z_start)\n z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise)\n diffusion_row.append(self.decode_first_stage(z_noisy))\n\n diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W\n diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w')\n diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w')\n diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0])\n log[\"diffusion_row\"] = diffusion_grid\n\n if sample:\n # get denoise row\n with ema_scope(\"Sampling\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True)\n x_samples = self.decode_first_stage(samples)\n log[\"samples\"] = x_samples\n if plot_denoise_rows:\n denoise_grid = self._get_denoise_row_from_list(z_denoise_row)\n log[\"denoise_row\"] = denoise_grid\n\n if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance(\n self.first_stage_model, IdentityFirstStage):\n # also display when quantizing x0 while sampling\n with ema_scope(\"Plotting Quantized Denoised\"):\n samples, z_denoise_row = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n quantize_denoised=True)\n # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True,\n # quantize_denoised=True)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_x0_quantized\"] = x_samples\n\n if unconditional_guidance_scale > 1.0:\n uc = self.get_unconditional_conditioning(N, unconditional_guidance_label)\n if self.model.conditioning_key == \"crossattn-adm\":\n uc = {\"c_crossattn\": [uc], \"c_adm\": c[\"c_adm\"]}\n with ema_scope(\"Sampling with classifier-free guidance\"):\n samples_cfg, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,\n ddim_steps=ddim_steps, eta=ddim_eta,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=uc,\n )\n x_samples_cfg = self.decode_first_stage(samples_cfg)\n log[f\"samples_cfg_scale_{unconditional_guidance_scale:.2f}\"] = x_samples_cfg\n\n if inpaint:\n # make a simple center square\n b, h, w = z.shape[0], z.shape[2], z.shape[3]\n mask = torch.ones(N, h, w).to(self.device)\n # zeros will be filled in\n mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.\n mask = mask[:, None, ...]\n with ema_scope(\"Plotting Inpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_inpainting\"] = x_samples\n log[\"mask\"] = mask\n\n # outpaint\n mask = 1. - mask\n with ema_scope(\"Plotting Outpaint\"):\n samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim, eta=ddim_eta,\n ddim_steps=ddim_steps, x0=z[:N], mask=mask)\n x_samples = self.decode_first_stage(samples.to(self.device))\n log[\"samples_outpainting\"] = x_samples\n\n if plot_progressive_rows:\n with ema_scope(\"Plotting Progressives\"):\n img, progressives = self.progressive_denoising(c,\n shape=(self.channels, self.image_size, self.image_size),\n batch_size=N)\n prog_row = self._get_denoise_row_from_list(progressives, desc=\"Progressive Generation\")\n log[\"progressive_row\"] = prog_row\n\n if return_keys:\n if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0:\n return log\n else:\n return {key: log[key] for key in return_keys}\n return log\n\n def configure_optimizers(self):\n lr = self.learning_rate\n params = list(self.model.parameters())\n if self.cond_stage_trainable:\n print(f\"{self.__class__.__name__}: Also optimizing conditioner params!\")\n params = params + list(self.cond_stage_model.parameters())\n if self.learn_logvar:\n print('Diffusion model optimizing logvar')\n params.append(self.logvar)\n opt = torch.optim.AdamW(params, lr=lr)\n if self.use_scheduler:\n assert 'target' in self.scheduler_config\n scheduler = instantiate_from_config(self.scheduler_config)\n\n print(\"Setting up LambdaLR scheduler...\")\n scheduler = [\n {\n 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule),\n 'interval': 'step',\n 'frequency': 1\n }]\n return [opt], scheduler\n return opt\n\n @torch.no_grad()\n def to_rgb(self, x):\n x = x.float()\n if not hasattr(self, \"colorize\"):\n self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x)\n x = nn.functional.conv2d(x, weight=self.colorize)\n x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.\n return x" }, { "identifier": "LatentInpaintDiffusion", "path": "ldm/models/diffusion/ddpm.py", "snippet": "class LatentInpaintDiffusion(LatentDiffusion):\n def __init__(\n self,\n concat_keys=(\"mask\", \"masked_image\"),\n masked_image_key=\"masked_image\",\n finetune_keys=None,\n *args,\n **kwargs,\n ):\n super().__init__(*args, **kwargs)\n self.masked_image_key = masked_image_key\n assert self.masked_image_key in concat_keys\n self.concat_keys = concat_keys\n\n\n @torch.no_grad()\n def get_input(\n self, batch, k, cond_key=None, bs=None, return_first_stage_outputs=False\n ):\n # note: restricted to non-trainable encoders currently\n assert (\n not self.cond_stage_trainable\n ), \"trainable cond stages not yet supported for inpainting\"\n z, c, x, xrec, xc = super().get_input(\n batch,\n self.first_stage_key,\n return_first_stage_outputs=True,\n force_c_encode=True,\n return_original_cond=True,\n bs=bs,\n )\n\n assert exists(self.concat_keys)\n c_cat = list()\n for ck in self.concat_keys:\n cc = (\n rearrange(batch[ck], \"b h w c -> b c h w\")\n .to(memory_format=torch.contiguous_format)\n .float()\n )\n if bs is not None:\n cc = cc[:bs]\n cc = cc.to(self.device)\n bchw = z.shape\n if ck != self.masked_image_key:\n cc = torch.nn.functional.interpolate(cc, size=bchw[-2:])\n else:\n cc = self.get_first_stage_encoding(self.encode_first_stage(cc))\n c_cat.append(cc)\n c_cat = torch.cat(c_cat, dim=1)\n all_conds = {\"c_concat\": [c_cat], \"c_crossattn\": [c]}\n if return_first_stage_outputs:\n return z, all_conds, x, xrec, xc\n return z, all_conds" }, { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('font/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None, # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n dynamic_threshold=None,\n ucg_schedule=None,\n **kwargs\n ):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n ctmp = conditioning[list(conditioning.keys())[0]]\n while isinstance(ctmp, list): ctmp = ctmp[0]\n cbs = ctmp.shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n elif isinstance(conditioning, list):\n for ctmp in conditioning:\n if ctmp.shape[0] != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold,\n ucg_schedule=ucg_schedule\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None, dynamic_threshold=None,\n ucg_schedule=None):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * (1. - mask) + mask * img\n\n if ucg_schedule is not None:\n assert len(ucg_schedule) == len(time_range)\n unconditional_guidance_scale = ucg_schedule[i]\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n dynamic_threshold=dynamic_threshold)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,\n dynamic_threshold=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [torch.cat([\n unconditional_conditioning[k][i],\n c[k][i]]) for i in range(len(c[k]))]\n else:\n c_in[k] = torch.cat([\n unconditional_conditioning[k],\n c[k]])\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n model_output = model_uncond + unconditional_guidance_scale * (model_t - model_uncond)\n\n if self.model.parameterization == \"v\":\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\", 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n if self.model.parameterization != \"v\":\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def encode(self, x0, c, t_enc, use_original_steps=False, return_intermediates=None,\n unconditional_guidance_scale=1.0, unconditional_conditioning=None, callback=None):\n num_reference_steps = self.ddpm_num_timesteps if use_original_steps else self.ddim_timesteps.shape[0]\n\n assert t_enc <= num_reference_steps\n num_steps = t_enc\n\n if use_original_steps:\n alphas_next = self.alphas_cumprod[:num_steps]\n alphas = self.alphas_cumprod_prev[:num_steps]\n else:\n alphas_next = self.ddim_alphas[:num_steps]\n alphas = torch.tensor(self.ddim_alphas_prev[:num_steps])\n\n x_next = x0\n intermediates = []\n inter_steps = []\n for i in tqdm(range(num_steps), desc='Encoding Image'):\n t = torch.full((x0.shape[0],), i, device=self.model.device, dtype=torch.long)\n if unconditional_guidance_scale == 1.:\n noise_pred = self.model.apply_model(x_next, t, c)\n else:\n assert unconditional_conditioning is not None\n e_t_uncond, noise_pred = torch.chunk(\n self.model.apply_model(torch.cat((x_next, x_next)), torch.cat((t, t)),\n torch.cat((unconditional_conditioning, c))), 2)\n noise_pred = e_t_uncond + unconditional_guidance_scale * (noise_pred - e_t_uncond)\n\n xt_weighted = (alphas_next[i] / alphas[i]).sqrt() * x_next\n weighted_noise_pred = alphas_next[i].sqrt() * (\n (1 / alphas_next[i] - 1).sqrt() - (1 / alphas[i] - 1).sqrt()) * noise_pred\n x_next = xt_weighted + weighted_noise_pred\n if return_intermediates and i % (\n num_steps // return_intermediates) == 0 and i < num_steps - 1:\n intermediates.append(x_next)\n inter_steps.append(i)\n elif return_intermediates and i >= num_steps - 2:\n intermediates.append(x_next)\n inter_steps.append(i)\n if callback: callback(i)\n\n out = {'x_encoded': x_next, 'intermediate_steps': inter_steps}\n if return_intermediates:\n out.update({'intermediates': intermediates})\n return x_next, out\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 +\n extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise)\n\n @torch.no_grad()\n def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None,\n use_original_steps=False, callback=None):\n\n timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n if callback: callback(i)\n return x_dec" } ]
import einops import torch import torch as th import torch.nn as nn from ldm.modules.diffusionmodules.util import ( conv_nd, linear, zero_module, timestep_embedding, ) from einops import rearrange, repeat from torchvision.utils import make_grid from ldm.modules.attention import SpatialTransformer from ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock from ldm.models.diffusion.ddpm import LatentDiffusion, LatentInpaintDiffusion from ldm.util import log_txt_as_img, exists, instantiate_from_config from ldm.models.diffusion.ddim import DDIMSampler from omegaconf.listconfig import ListConfig
21,272
class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, c_concat=None, only_mid_control=False, **kwargs): hs = [] with torch.no_grad():
class ControlledUnetModel(UNetModel): def forward(self, x, timesteps=None, context=None, control=None, c_concat=None, only_mid_control=False, **kwargs): hs = [] with torch.no_grad():
t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False)
3
2023-11-24 10:19:23+00:00
24k
eth-sri/language-model-arithmetic
src/model_arithmetic/evaluation.py
[ { "identifier": "BaseClass", "path": "src/model_arithmetic/base.py", "snippet": "class BaseClass:\n \"\"\"\n Base class for providing a serialization and deserialization mechanism.\n \"\"\"\n def __init__(self, **kwargs):\n \"\"\"\n Instantiates the base class with keyword arguments\n \n Args:\n kwargs (dict): Keyword arguments\n \"\"\"\n self.kwargs = kwargs\n self.__dict__.update(kwargs)\n\n def generate_list_settings(self, list_):\n \"\"\"\n Converts provided list to a normalized list that can be stored as a json object to serialize.\n \n Args:\n list_ (List): List to be converted\n Returns\n Transformed normal list\n \"\"\"\n normal_list = []\n for item in list_:\n if isinstance(item, BaseClass):\n normal_list.append(item.generate_settings())\n elif isinstance(item, dict):\n normal_list.append(self.generate_kwarg_setting(item))\n elif isinstance(item, (tuple, list)):\n normal_list.append(self.generate_list_settings(item))\n else:\n normal_list.append(item)\n return normal_list\n\n def generate_kwarg_setting(self, kwargs):\n \"\"\"\n Converts provided keyword arguments to normal kwargs in terms of serialization.\n\n Args:\n kwargs (dict): kwargs to be converted.\n \"\"\"\n normal_kwargs = dict()\n for kwarg in kwargs:\n if isinstance(kwargs[kwarg], BaseClass):\n normal_kwargs[kwarg] = kwargs[kwarg].generate_settings()\n elif isinstance(kwargs[kwarg], (list, tuple)):\n normal_kwargs[kwarg] = self.generate_list_settings(kwargs[kwarg])\n elif isinstance(kwargs[kwarg], dict):\n normal_kwargs[kwarg] = self.generate_kwarg_setting(kwargs[kwarg])\n else:\n normal_kwargs[kwarg] = kwargs[kwarg]\n \n return normal_kwargs\n\n\n def generate_settings(self):\n \"\"\"\n Generates settings for the instance of the BaseClass.\n\n Returns\n Settings in dictionary format.\n \"\"\"\n settings = {\n \"class\": self.__class__.__name__, \n **self.generate_kwarg_setting({kwarg: self.__dict__[kwarg] for kwarg in self.kwargs}), \n }\n return settings\n \n def save(self, path):\n \"\"\"\n Saves the generated settings into a JSON file at a specified path.\n \n Args:\n path (string): The file path at which the settings have to be saved.\n \"\"\"\n settings = self.generate_settings()\n\n if os.path.dirname(path) != \"\":\n os.makedirs(os.path.dirname(path), exist_ok=True)\n \n with open(path, \"w\") as f:\n json.dump(settings, f, indent=2)\n\n @classmethod\n def get_all_subclasses(cls):\n \"\"\"\n Returns all subclasses of the BaseClass.\n \"\"\"\n all_subclasses = []\n\n for subclass in cls.__subclasses__():\n all_subclasses.append(subclass)\n all_subclasses.extend(subclass.get_all_subclasses())\n\n return all_subclasses\n\n @staticmethod\n def find_class(cls_name):\n \"\"\"\n Searches for a class that matches the given class name.\n\n Args:\n cls_name (string): Class name to be matched\n \"\"\"\n for possible_cls in BaseClass.get_all_subclasses():\n if possible_cls.__name__ == cls_name:\n return possible_cls\n return None\n\n @staticmethod\n def load_from_list_settings(list_):\n \"\"\"\n Deserializes the list saved settings to instantiate the objects.\n\n Args:\n list_ (List): List of saved settings\n \"\"\"\n output_list = []\n for item in list_:\n if isinstance(item, dict):\n output_list.append(BaseClass.load_from_dict(item))\n elif isinstance(item, (tuple, list)):\n output_list.append(BaseClass.load_from_list_settings(item))\n else:\n output_list.append(item)\n\n return output_list\n \n @staticmethod\n def load_from_dict(dict_):\n \"\"\"\n Deserializes the dictionary saved settings to instantiate the objects.\n\n Args:\n dict_ (dict): Dictionary containing saved settings\n \"\"\"\n other_class = BaseClass.find_class(dict_.get(\"class\", None))\n if other_class is not None:\n return other_class.load_from_settings(dict_)\n \n output_dict = dict()\n for key in dict_:\n if isinstance(dict_[key], dict):\n output_dict[key] = BaseClass.load_from_dict(dict_[key])\n elif isinstance(dict_[key], (tuple, list)):\n output_dict[key] = BaseClass.load_from_list_settings(dict_[key])\n else:\n output_dict[key] = dict_[key]\n\n return output_dict\n\n @staticmethod\n def load_from_settings(settings):\n \"\"\"\n Deserializes the saved settings to instantiate the object.\n\n Args:\n settings (dict): Saved settings\n \"\"\"\n cls = BaseClass.find_class(settings[\"class\"])\n\n if cls is None:\n logger.error(f\"Could not find class {settings['class']} when loading class.\")\n return None\n\n kwargs = dict()\n for kwarg in settings:\n if kwarg == \"class\":\n continue\n if isinstance(settings[kwarg], dict):\n kwargs[kwarg] = BaseClass.load_from_dict(settings[kwarg])\n elif isinstance(settings[kwarg], (tuple, list)):\n kwargs[kwarg] = BaseClass.load_from_list_settings(settings[kwarg])\n else:\n kwargs[kwarg] = settings[kwarg]\n\n return cls(**kwargs)\n\n @classmethod\n def _load(cls, path, **kwargs):\n \"\"\"\n Loads the settings from the JSON file at the specified path.\n \n Args:\n path (string): The file path from which the settings have to be loaded.\n kwargs (dict): Additional keywords arguments\n \"\"\"\n with open(path, \"r\") as f:\n settings = json.load(f)\n for kwarg in kwargs:\n settings[kwarg] = kwargs[kwarg]\n return cls.load_from_settings(settings)\n\n @staticmethod\n def load(path, **kwargs):\n \"\"\"\n Loads the settings of the class from the JSON file.\n\n Args:\n path (string): The file path from which the class settings have to be loaded.\n kwargs (dict): Additional keywords arguments\n \"\"\"\n with open(path, \"r\") as f:\n settings = json.load(f)\n cls = BaseClass.find_class(settings[\"class\"])\n return cls._load(path, **kwargs)\n\n def __str__(self) -> str:\n \"\"\"\n Returns a string representation of the class object.\n \"\"\"\n return f\"{self.__class__.__name__}({self.kwargs})\"\n \n def __eq__(self, o: object) -> bool:\n \"\"\"\n Checks whether the provided object is equal to the current object.\n\n Args:\n o (object): Object to compare\n \"\"\"\n if not isinstance(o, BaseClass):\n return False\n \n other_settings = o.generate_settings()\n settings = self.generate_settings()\n\n return other_settings == settings" }, { "identifier": "CustomDataset", "path": "src/model_arithmetic/dataset.py", "snippet": "class CustomDataset(Dataset):\n \"\"\"\n A custom PyTorch Dataset class for tokenized sequence data.\n\n Uses a tokenizer to convert text data from a DataFrame to input_ids (tokens), \n and optionally attaches label data if present in the DataFrame.\n \"\"\"\n def __init__(self, tokenizer, df, max_tokens=128, min_tokens=1, random_cutoff=False):\n \"\"\"\n Initializes the CustomDataset object.\n\n Args:\n tokenizer (Tokenizer): The tokenizer to be used for the text data.\n df (pandas.DataFrame): DataFrame containing the text data, and optionally labels.\n max_tokens (int, optional): Maximum number of tokens per sequence. Defaults to 128.\n min_tokens (int, optional): Minimum number of tokens per sequence. Defaults to 1.\n random_cutoff (bool, optional): Whether to randomly cut off the number of tokens per sequence. Defaults to False.\n \"\"\"\n super().__init__()\n data = df.dropna()\n self.tokenized_dataset = [\n tokenizer(sentence, return_tensors=\"pt\", truncation=True, max_length=max_tokens).input_ids.view(-1) for sentence in tqdm(data[\"text\"].tolist())\n ]\n\n self.df = data\n self.has_labels = \"label\" in data.columns\n self.min_tokens = min_tokens\n self.labels = None\n if self.has_labels:\n self.labels = data[\"label\"].values\n \n self.random_cutoff = random_cutoff\n\n def __len__(self):\n \"\"\"\n Returns the length of the tokenized dataset, \n i.e., the number of tokenized sequences.\n \n Returns:\n int: Number of tokenized sequences.\n \"\"\"\n return len(self.tokenized_dataset)\n\n def __getitem__(self, idx):\n \"\"\"\n Fetches an item from the dataset at the given index.\n\n If labels are available, also fetches the associated label.\n If `random_cutoff` is true, may truncate sequence length randomly.\n\n Args:\n idx (int): Index of the required sequence.\n\n Returns:\n dict: A dictionary with the following structure-\n {\n \"input_ids\": torch.Tensor (Tokenized sequence),\n \"labels\": torch.Tensor (Associated label, if available)\n }\n \"\"\"\n cutoff = len(self.tokenized_dataset[idx])\n if self.random_cutoff:\n cutoff = torch.randint(min(cutoff, self.min_tokens), cutoff + 1, (1,)).item()\n \n if not self.has_labels:\n return {\"input_ids\": self.tokenized_dataset[idx][:cutoff]}\n else:\n return {\"input_ids\": self.tokenized_dataset[idx][:cutoff], \"labels\": torch.tensor([self.labels[idx]], dtype=torch.long)}" }, { "identifier": "load_model", "path": "src/model_arithmetic/basic_model_loader.py", "snippet": "def load_model(dir_or_model, classification=False, token_classification=False, return_tokenizer=False, dtype=torch.bfloat16, load_dtype=True, \n rl=False, peft_config=None):\n \"\"\"\n This function is used to load a model based on several parameters including the type of task it is targeted to perform.\n \n Args:\n dir_or_model: It can be either a directory containing the pre-training model configuration details or a pretrained model.\n\n classification (bool): If True, loads the model for sequence classification.\n\n token_classification (bool): If True, loads the model for token classification.\n\n return_tokenizer (bool): If True, returns the tokenizer along with the model.\n\n dtype: The data type that PyTorch should use internally to store the model’s parameters and do the computation.\n\n load_dtype (bool): If False, sets dtype as torch.float32 regardless of the passed dtype value.\n\n rl (bool): If True, loads model specifically designed to be used in reinforcement learning environment.\n\n peft_config: Configuration details for Peft models. \n \n Returns:\n It returns a model for the required task along with its tokenizer, if specified.\n \"\"\"\n log(logger.debug, f\"Loading model for {dir_or_model} with {classification}, {dtype}, {load_dtype}\")\n is_lora_dir = os.path.isfile(os.path.join(dir_or_model, \"adapter_config.json\"))\n\n if not load_dtype:\n dtype = torch.float32\n\n if is_lora_dir:\n loaded_json = json.load(open(os.path.join(dir_or_model, \"adapter_config.json\"), \"r\"))\n model_name = loaded_json[\"base_model_name_or_path\"]\n else:\n model_name = dir_or_model\n\n original_model_name = model_name\n\n if classification:\n model = AutoModelForSequenceClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\") # to investigate: calling torch_dtype here fails.\n elif token_classification:\n model = AutoModelForTokenClassification.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\")\n elif rl:\n model = AutoModelForCausalLMWithValueHead.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, \n peft_config=peft_config, device_map=\"auto\")\n else:\n if model_name.endswith(\"GPTQ\") or model_name.endswith(\"GGML\"):\n model = AutoGPTQForCausalLM.from_quantized(model_name,\n use_safetensors=True,\n trust_remote_code=True,\n # use_triton=True, # breaks currently, unfortunately generation time of the GPTQ model is quite slow\n quantize_config=None, device_map=\"auto\")\n else:\n model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True, torch_dtype=dtype, use_auth_token=True, device_map=\"auto\")\n\n if is_lora_dir:\n model = PeftModel.from_pretrained(model, dir_or_model)\n \n try:\n tokenizer = load_tokenizer(original_model_name)\n model.config.pad_token_id = tokenizer.pad_token_id\n except Exception:\n pass\n if return_tokenizer:\n return model, load_tokenizer(original_model_name)\n return model" }, { "identifier": "load_tokenizer", "path": "src/model_arithmetic/basic_model_loader.py", "snippet": "def load_tokenizer(dir_or_model):\n \"\"\"\n This function is used to load the tokenizer for a specific pre-trained model.\n \n Args:\n dir_or_model: It can be either a directory containing the pre-training model configuration details or a pretrained model.\n \n Returns:\n It returns a tokenizer that can convert text to tokens for the specific model input.\n \"\"\"\n log(logger.debug, f\"Loading tokenizer for {dir_or_model}\")\n\n is_lora_dir = os.path.isfile(os.path.join(dir_or_model, \"adapter_config.json\"))\n\n if is_lora_dir:\n loaded_json = json.load(open(os.path.join(dir_or_model, \"adapter_config.json\"), \"r\"))\n model_name = loaded_json[\"base_model_name_or_path\"]\n else:\n model_name = dir_or_model\n \n if os.path.isfile(os.path.join(dir_or_model, \"config.json\")):\n loaded_json = json.load(open(os.path.join(dir_or_model, \"config.json\"), \"r\"))\n model_name = loaded_json[\"_name_or_path\"]\n\n tokenizer = AutoTokenizer.from_pretrained(model_name)\n\n if tokenizer.pad_token is None:\n log(logger.debug, \"Setting pad token to eos token\")\n tokenizer.pad_token = tokenizer.eos_token\n tokenizer.pad_token_id = tokenizer.eos_token_id\n \n return tokenizer" }, { "identifier": "ModelArithmetic", "path": "src/model_arithmetic/model_arithmetic.py", "snippet": "class ModelArithmetic(PreTrainedModel):\n \"\"\"\n Main class for prompt arithmetic. Handles the generation of text based on the formula.\n \"\"\"\n SAVE_FILE = \"prompt_arithmetic.json\"\n _supports_sdpa = True\n\n def __init__(self, formula : Operator, default_model : str = None, dtype=torch.bfloat16, intermediate_argmax : bool = False, epsilon = 1e-12, \n retroactive_operators = [], calculate_statistics=True, needs_input_tokens_lm_eval=False, lm_eval_task=None, tokenizer=None):\n \"\"\"Initializes the prompt arithmetic model.\n\n Args:\n formula (Operator): The formula for which generations need to be made.\n default_model (str, optional): Default model for RunnableOperators that don't have a model associated with them. Defaults to None.\n dtype (torch.dtype, optional): Dtype of the models to load by default. Defaults to torch.bfloat16.\n intermediate_argmax (bool, optional): Something unimportant that was tried out, but now deprecated. Defaults to False.\n epsilon (float, optional): Just some small value. Defaults to 1e-12.\n retroactive_operators (list, optional): The retroactive operators that need to be applied. Defaults to [].\n calculate_statistics (bool, optional): Whether or not to calculate some statistics, can be a tad bit expensive. Defaults to True.\n needs_input_tokens_lm_eval (bool, optional): Whether or not lm eval is used and whether or not the task needs the input tokens. Defaults to False. Only set to true for an lm eval task.\n lm_eval_task (str, optional): Name of the lm eval task. Defaults to None.\n tokenizer (transformers.tokenization_utils_base.PreTrainedTokenizerBase, optional): Tokenizer to use. Defaults to None.\n \"\"\"\n self.formula = formula.clone()\n\n self.default_model = default_model\n self.loaded_models = dict()\n self.model_prediction_history = [] # keeps track of the RunnableOperators predictions for each token (that hasn't finished computing)\n self.logprobs_history = [] # keeps track of the current probability distribution for which each token has been drawn\n self.model_last_token_prediction = [] # keeps track of the last token that has been predicted for each RunnableOperator\n \n self.output_type = namedtuple(\"ModelArithmeticOutput\", [\"logits\", \"logprobs_per_model\"])\n self.intermediate_argmax = intermediate_argmax\n self.retroactive_operators = retroactive_operators\n self.calculate_statistics = calculate_statistics\n\n self.runnable_operators = []\n for runnable_operator in self.formula.runnable_operators():\n if not any([runnable_operator.same_operator(output) for output in self.runnable_operators]):\n self.runnable_operators.append(runnable_operator)\n \n\n # sort the prompts by speculative factor, putting the one with highest speculative factor first\n # => run model with highest speculative factor first, since otherwise the computation might be wasted for the first ones\n # however, we first need to sort by run_priority and then within that by speculative factor\n self.runnable_operators = sorted(self.runnable_operators, key=lambda runnable_operator: (runnable_operator.run_priority, runnable_operator.speculative_factor), reverse=True)\n \n self.load_all_models(dtype=dtype)\n if self.default_model not in self.loaded_models:\n for runnable_operator in self.runnable_operators:\n if isinstance(runnable_operator, PromptedLLM) and runnable_operator.model is not None:\n self.default_model = runnable_operator.model\n break\n if self.default_model is None:\n raise ValueError(\"Default model must be specified if not specified in an llm prompt\")\n\n self.config = self.loaded_models[str(self.default_model)].config\n\n if tokenizer is None:\n self.tokenizer = load_tokenizer(self.default_model)\n else:\n self.tokenizer = tokenizer\n \n self.init_runnable_operators()\n \n self.model_input_tokens = {\n runnable_operator.id(): TokenizedInput(runnable_operator, \n runnable_operator.model, \n self.loaded_models[str(runnable_operator.model)].config,\n self.tokenizer) \n for runnable_operator in self.runnable_operators\n }\n \n self.init_monitor()\n \n self.epsilon = epsilon\n \n self.word_size = len(self.tokenizer)\n \n if Compatibility is not None:\n self.lm_eval_compatibility = Compatibility(\n task_name=lm_eval_task,\n needs_input_tokens_lm_eval=needs_input_tokens_lm_eval,\n tokenizer=self.tokenizer,\n device=self.device,\n max_length=get_max_length(self.config),\n )\n else:\n self.lm_eval_compatibility = None\n \n super().__init__(self.config)\n \n def init_monitor(self):\n \"\"\"\n Initializes the monitor for the prompt arithmetic model.\n \"\"\"\n self.monitor = Monitor(self.runnable_operators)\n \n def init_runnable_operators(self):\n \"\"\"Initializes the runnable operators. This is done after the models have been loaded, because the models are needed for the runnable operators.\n \"\"\"\n for runnable_operator in self.runnable_operators:\n if runnable_operator.model is None:\n runnable_operator.model = self.default_model\n runnable_operator.initialize_after_model_set()\n\n def load_all_models(self, dtype=torch.bfloat16):\n \"\"\"Loads all the models that are needed for the runnable operators. Models are never loaded twice.\n\n Args:\n dtype (torch.dtype, optional): Default Dtype of the models. Defaults to torch.bfloat16.\n \"\"\"\n if self.default_model is None:\n for runnable_operator in self.runnable_operators:\n if isinstance(runnable_operator, PromptedLLM) and runnable_operator.model is not None:\n self.default_model = str(runnable_operator.model)\n break\n \n for runnable_operator in self.runnable_operators:\n if runnable_operator.model is None:\n assert self.default_model is not None, \"Default model must be specified if not specified in prompt\"\n runnable_operator.model = self.default_model\n if runnable_operator.model not in self.loaded_models:\n model = runnable_operator.load_model(dtype=dtype)\n model.eval()\n if model is not None:\n self.loaded_models[str(runnable_operator.model)] = model\n \n if len(self.loaded_models) == 0:\n assert self.default_model is not None, \"Required to at least have one model, for now\"\n self.loaded_models[str(self.default_model)] = load_model(self.default_model, dtype=dtype)\n \n @property\n def device(self):\n \"\"\"Device of the default model. Needed for compatibility with lm_eval\n\n Returns:\n torch.device: Device of the default model.\n \"\"\"\n return self.loaded_models[str(self.default_model)].device\n\n def save_pretrained(self, path : str):\n \"\"\"Saves the model to the specified path.\n\n Args:\n path (str): Path to which to save the model\n \"\"\"\n os.makedirs(path, exist_ok=True)\n all_settings = {\n \"formula\": self.formula.generate_settings(),\n \"default_model\": self.default_model,\n }\n\n with open(os.path.join(path, self.SAVE_FILE), \"w\") as f:\n json.dump(all_settings, f, indent=4, sort_keys=True)\n\n @classmethod\n def from_pretrained(cls, path : str, dtype=torch.bfloat16):\n \"\"\"Loads the model from the specified path.\n\n Args:\n path (str): Path from which to load the model\n dtype (torch.dtype, optional): Default dtype for the models. Defaults to torch.bfloat16.\n\n Returns:\n ModelArithmetic: model arithmetic model\n \"\"\"\n with open(os.path.join(path, cls.SAVE_FILE), \"r\") as f:\n all_settings = json.load(f)\n all_settings[\"formula\"] = Operator.load_from_settings(all_settings[\"formula\"])\n return cls(**all_settings, dtype=dtype)\n\n \n def forward_model(self, runnable_operator, continuation_tokens, model_new_tokens=None, use_cache=False, do_speculation=False):\n \"\"\"Runs a specifc runnable operator on the continuation tokens.\n\n Args:\n runnable_operator (RunnableOperator): The runnable operator to run.\n continuation_tokens (list[list[int]]): List of tokens that need to be continued. The prompt is not included in these tokens\n model_new_tokens (list[int], optional): New tokens for the model. Defaults to None.\n use_cache (bool, optional): Whether or not to allow the model to use cache (eg key-value storage for an LLM). Defaults to False.\n do_speculation (bool, optional): Whether or not to do speculation sampling. Defaults to False.\n\n Returns:\n torch.tensor: logprobs of the model, one logprob distribution for each new token in each sample\n \"\"\"\n start_time = time.time()\n \n tokenized_input_creator = self.model_input_tokens[runnable_operator.id()]\n tokenized_inputs = tokenized_input_creator.add_continuation_tokens(continuation_tokens)\n tokenized_only_input = tokenized_input_creator.get_only_input_tokens()\n \n was_none = model_new_tokens is None\n \n if was_none:\n model_new_tokens = torch.tensor([len(continuation_tokens[i]) + 1 for i in range(len(continuation_tokens))])\n \n if len(self.model_prediction_history) < len(continuation_tokens):\n new_prediction_history = [dict() for _ in range(len(continuation_tokens))]\n else:\n new_prediction_history = [self.model_prediction_history[i].get(self.max_index_prediction_history(i), dict()) for i in range(len(continuation_tokens))]\n \n logprobs = runnable_operator.run(\n loaded_models=self.loaded_models,\n tokenized_inputs=tokenized_inputs,\n model_new_tokens=model_new_tokens,\n new_prediction_history=new_prediction_history,\n other_tokenizer=self.tokenizer,\n tokenized_only_input=tokenized_only_input, \n use_cache=use_cache,\n do_speculation=do_speculation\n )\n \n logprobs = [logprob.to(self.device) for logprob in logprobs]\n \n if was_none:\n logprobs = torch.stack(logprobs, dim=0)\n\n self.monitor.add_result(element=time.time() - start_time, runnable_operator=runnable_operator)\n return logprobs\n \n def group_complete(self, model_history):\n \"\"\"Checks which groups of runnable operators have been completely calculated and which haven't.\n\n Args:\n model_history (dict): Dict mapping the runnable operator id to the logprobs of the model\n\n Returns:\n dict[bool]: Dict mapping the group to whether it has been completely calculated or not\n \"\"\"\n # everything that is a group needs to be either all calculated or all not calculated\n group_calculated = dict()\n groups = set([runnable_operator.group for runnable_operator in self.runnable_operators if runnable_operator.group is not None])\n completed_groups = {group: True for group in groups}\n \n for runnable_operator in self.runnable_operators:\n if runnable_operator.group is not None:\n is_calculated = model_history.get(runnable_operator.id()) is not None\n if runnable_operator.group not in group_calculated:\n group_calculated[runnable_operator.group] = is_calculated\n elif group_calculated[runnable_operator.group] != is_calculated:\n completed_groups[runnable_operator.group] = False\n return completed_groups\n \n def group_model_history(self, model_history):\n \"\"\"Sets the model history on which to evaluate the formula based on the groups. Removes predictions if the group hasn't been completely calculated yet.\n\n Args:\n model_history (dict): Dict mapping the runnable operator id to the logprobs of the model\n\n Returns:\n dict: Adjusted dict mapping\n \"\"\"\n completed_groups = self.group_complete(model_history)\n grouped_model_history = dict()\n for runnable_operator in self.runnable_operators:\n if runnable_operator.group is None or completed_groups[runnable_operator.group]:\n grouped_model_history[runnable_operator.id()] = model_history[runnable_operator.id()]\n else:\n grouped_model_history[runnable_operator.id()] = None\n \n return grouped_model_history\n \n def create_sample_logprobs(self, logprobs, temperature, top_k, top_p):\n \"\"\"Creates the logprobs for each token in each sample.\n\n Args:\n logprobs (torch.tensor): Logprobs of the model\n temperature (float): temperature to use\n top_k (int): top_k to use\n top_p (float): top_p to use\n\n Returns:\n torch.tensor: Logprobs for each token in each sample\n \"\"\"\n if temperature == 0:\n logprobs_argmax = torch.argmax(logprobs, dim=-1)\n logprobs = torch.nn.functional.one_hot(logprobs_argmax, num_classes=logprobs.shape[-1]).float()\n return logprobs\n logprobs = logprobs / temperature\n logprobs = top_k_top_p_filtering(logprobs.unsqueeze(0), top_k=top_k, top_p=top_p)\n return torch.softmax(logprobs, dim=-1).squeeze()\n \n \n\n def process_logprobs(self, model_history):\n \"\"\"Processes model history to get the probability distribution for the token.\n\n Args:\n model_history (dict): Dict mapping the runnable operator id to the logprobs of the model\n\n Returns:\n _type_: _description_\n \"\"\"\n init_time = time.time()\n logprobs_normalized = self.formula.evaluate(model_history)\n self.monitor.add_result(element=time.time() - init_time, indicator=\"formula_evaluation\")\n if not torch.is_tensor(logprobs_normalized):\n return None\n # logprobs_normalized = logprobs_normalized / temperature\n # logprobs_normalized = top_k_top_p_filtering(logprobs_normalized.unsqueeze(0), top_k=top_k, top_p=top_p)\n return logprobs_normalized\n \n def run_retroactive_operators(self, index, tokenized_sentence, temperature, top_k, top_p):\n \"\"\"Runs the retroactive operators on the tokenized sentence. \n\n Args:\n index (int): Index of the sentence in the current batch\n tokenized_sentence (list[int]): Tokenized sentence\n temperature (float): temperature to use\n top_k (int): top_k to use\n top_p (float): top_p to use\n\n Returns:\n list[int]: Adjusted tokenized sentence based on the retroactive operators and whether they accepted it.\n \"\"\"\n for operator in self.retroactive_operators:\n accepted = operator.accept(tokenized_sentence, self.tokenizer)\n if accepted < 0:\n not_accepted_token = tokenized_sentence[accepted]\n self.clear_model_prediction_history(index, tokenized_sentence, from_=len(tokenized_sentence) + accepted)\n tokenized_sentence = tokenized_sentence[:len(tokenized_sentence) + accepted]\n \n self.logprobs_history[index][len(tokenized_sentence)][not_accepted_token] = -torch.inf\n \n if torch.all(self.logprobs_history[index][len(tokenized_sentence)] == -torch.inf):\n self.logprobs_history[index][len(tokenized_sentence)] = torch.zeros_like(self.logprobs_history[index][len(tokenized_sentence)])\n \n probs_to_sample = self.create_sample_logprobs(\n self.logprobs_history[index][len(tokenized_sentence)],\n temperature=temperature,\n top_k=top_k,\n top_p=top_p\n )\n new_token = torch.multinomial(probs_to_sample, 1).item()\n \n tokenized_sentence.append(new_token)\n return self.run_retroactive_operators(index, tokenized_sentence, temperature, top_k, top_p)\n \n return tokenized_sentence\n \n def speculation_sample(self, token, previous_models_probs, new_models_probs):\n \"\"\"Sample a token based on the previous and new model probabilities in the speculative sampling way. Also returns whether the token was accepted or not.\n\n Args:\n token (int): Token that is currently selected\n previous_models_probs (torch.tensor): Model probabilities of the previous models\n new_models_probs (torch.tensor): Model probabilities of the new models\n\n Returns:\n (int, bool): New token and whether or not the input token was accepted\n \"\"\"\n acceptance_prob = torch.minimum(torch.tensor(1.0), new_models_probs[token] / (previous_models_probs[token] + torch.tensor(self.epsilon)))\n # TODO: the next line is taking an enormous amount of time because of asynchronous computing on gpu's and requiring it to be returned immediately\n # Therefore do batch processing\n acceptance_prob = float(acceptance_prob)\n self.monitor.add_result(element=float(acceptance_prob), indicator=\"acceptance_prob\")\n # self.monitor.add_result(element=self.entropy(previous_models_probs).item(), indicator=\"entropy_previous\")\n # self.monitor.add_result(element=previous_models_probs[token].item(), indicator=\"probability_previous\")\n\n if torch.rand(1) < acceptance_prob:\n return token, True\n else:\n new_proba_distrib = torch.relu(new_models_probs - previous_models_probs)\n new_proba_distrib /= torch.sum(new_proba_distrib)\n new_token = torch.multinomial(new_proba_distrib, 1).item()\n return new_token, False\n \n \n def add_new_result(self, generated_tokens, num_new_tokens, runnable_operator, new_model_logprobs, top_p, top_k, temperature):\n \"\"\"Adds a new run of a runnable operator to the model prediction history. Also does speculation sampling if needed.\n\n Args:\n generated_tokens (list[list[int]]): Currently generated tokens by the model\n num_new_tokens (list[int]): Number of new tokens for each sample in the batch\n runnable_operator (RunnableOperator): Runnable operator that was run\n new_model_logprobs (List[torch.tensor]): Output of the run function of the runnable operator\n top_p (flaot): top_p to use\n top_k (int): top_k to use\n temperature (float): temperature to use\n\n Returns:\n list[bool]: For each sample in the batch, whether all tokens in that sample were kept or not\n \"\"\"\n all_kept = []\n for i in range(len(generated_tokens)):\n n_generated_tokens = len(generated_tokens[i])\n kept = True\n for n_token in range(n_generated_tokens - num_new_tokens[i] + 1, n_generated_tokens + 1):\n # initialize the model prediction history\n self.model_prediction_history[i][n_token] = self.model_prediction_history[i].get(n_token, \n {runnable_operator.id(): None for runnable_operator in self.runnable_operators})\n # check if we need to do speculation sampling, only needed when a previous token was sampled\n do_speculation_sample = n_token < n_generated_tokens\n \n # speculation sampling not needed if the model was run before \n if self.model_prediction_history[i][n_token][runnable_operator.id()] is not None:\n do_speculation_sample = False\n \n # speculation sampling not needed if all models have not been run yet: this is the first model on this token\n if all([logprob is None for logprob in self.model_prediction_history[i][n_token].values()]):\n do_speculation_sample = False\n # This means that this token was already fully accepted, so we can just continue (can happen if batch_size > 1 or when end is triggered)\n if self.max_index_prediction_history(i) > n_token:\n continue\n \n # add the new model logprobs\n self.model_prediction_history[i][n_token][runnable_operator.id()] = new_model_logprobs[i][-n_generated_tokens + n_token + num_new_tokens[i] - 1]\n \n group_model_history = self.group_model_history(self.model_prediction_history[i][n_token])\n # group_model_history needs to be separately checked, since it could be that the group is not yet fully calculated\n # also allow no logprobs runnable operators (would lead to errors) if the formula is not finished yet (if it is finished, you need to)\n if all([logprob is None for logprob in group_model_history.values()]) or (not runnable_operator.outputs_logprobs and not self.formula.is_finished(group_model_history)):\n continue\n \n # process the logprobs\n new_model_probs = self.process_logprobs(group_model_history)\n \n if self.intermediate_argmax and not self.formula.is_finished(group_model_history):\n argmax_el = torch.argmax(new_model_probs)\n new_model_probs = torch.zeros_like(new_model_probs)\n new_model_probs[argmax_el] = 1.0\n \n if do_speculation_sample:\n if self.calculate_statistics:\n self.monitor.add_result(self.expected_acceptance_prob(self.create_sample_logprobs(new_model_probs, temperature, top_k, top_p), \n self.create_sample_logprobs(self.logprobs_history[i].get(n_token), temperature, top_k, top_p)), \n indicator=\"expected_acceptance_prob\", runnable_operator=runnable_operator)\n\n new_token, kept = self.speculation_sample(\n token = generated_tokens[i][n_token],\n previous_models_probs=self.create_sample_logprobs(self.logprobs_history[i][n_token], temperature, top_k, top_p),\n new_models_probs=self.create_sample_logprobs(new_model_probs, temperature, top_k, top_p), \n )\n if n_token in self.model_prediction_history[i]:\n self.logprobs_history[i][n_token] = new_model_probs\n \n if not kept:\n # if not kept, we change the generated tokens and remove the model prediction history after that token\n generated_tokens[i][n_token] = new_token\n generated_tokens[i] = generated_tokens[i][:n_token + 1]\n self.clear_model_prediction_history(i, generated_tokens[i], from_=n_token)\n self.trigger_end[i] = False\n \n elif n_token in self.model_prediction_history[i]:\n self.logprobs_history[i][n_token] = new_model_probs\n \n if not kept:\n break\n \n all_kept.append(kept)\n return all_kept\n \n\n def clear_model_prediction_history(self, index, generated_tokens_index, from_=-1):\n \"\"\"Clears the model prediction history for a specific sample in the batch. First deletes all history of finished tokens, then \n deletes history of tokens that were prediction, but then got removed because of speculation\n\n Args:\n index (int): index of the sample in the batch\n generated_tokens_index (list[int]): Generated tokens at the index\n from_ (int, optional): From which token to delete all the history. Defaults to -1.\n \"\"\"\n all_indices = list(self.model_prediction_history[index].keys())\n for token in all_indices:\n all_none = all([logprob is None for logprob in self.model_prediction_history[index][token].values()])\n finished = self.formula.is_finished(self.model_prediction_history[index][token])\n if all_none or finished or (from_ != -1 and token > from_):\n if finished and len(generated_tokens_index) > token and self.calculate_statistics:\n self.add_monitor_token_probs(generated_tokens_index[token], self.model_prediction_history[index][token], self.logprobs_history[index].get(token))\n \n if finished:\n for model_index in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[model_index][index] = max(token + 1, self.model_last_token_prediction[model_index][index])\n \n del self.model_prediction_history[index][token]\n \n if from_ > -1:\n for model_index in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[model_index][index] = min(from_ + 1, self.model_last_token_prediction[model_index][index])\n \n def max_index_prediction_history(self, index):\n \"\"\"Gets the max index of the model prediction history for a specific runnable operator \n\n Args:\n index (int): index of runnable operator in the list of runnable operators\n\n Returns:\n int: max index of its prediction\n \"\"\"\n keys = list(self.model_prediction_history[index].keys())\n if len(keys) == 0:\n return 0\n return max(self.model_prediction_history[index].keys())\n\n def normal_sample(self, probs):\n \"\"\"Samples from a probability distribution\n\n Args:\n probs (torch.tensor): Probability distribution\n\n Returns:\n int: Sampled token\n \"\"\"\n out = torch.multinomial(probs, 1)\n return out\n \n def KL_divergence(self, p, q):\n \"\"\"Compuates KL divergence between two probability distributions\n\n Args:\n p (torch.tensor): probability distribution\n q (torch.tensor): probability distribution\n\n Returns:\n float: KL divergence\n \"\"\"\n return torch.sum(p * torch.log((p + self.epsilon) / (q + self.epsilon)))\n \n def entropy(self, p):\n \"\"\"Computes entropy of a probability distribution\n\n Args:\n p (torch.tensor): probability distribution\n\n Returns:\n float: entropy\n \"\"\"\n return -torch.sum(p * torch.log(p + self.epsilon))\n \n def expected_acceptance_prob(self, p, q):\n \"\"\"\n Calculates the expected acceptance probability of speculative sampling.\n \n Args:\n p (torch.tensor): probability distribution\n q (torch.tensor): probability distribution\n \"\"\"\n return 1 - 1 / 2 * torch.sum(torch.abs(q - p)).item()\n \n def add_monitor_token_probs(self, token, history, history_logprobs):\n \"\"\"Adds some token probabilities to the monitor\n\n Args:\n token (int): Samples token\n history (dict): Model prediction history at the specific index where the token was drawn from\n history_logprobs (torch.tensor): LogProbability distribution from which the token was drawn.\n \"\"\"\n for runnable_operator in self.runnable_operators:\n if runnable_operator.is_finished(history) and runnable_operator.outputs_logprobs:\n evaluated = runnable_operator.evaluate(history)\n self.monitor.add_result(element=torch.softmax(evaluated, dim=-1)[token].item(), runnable_operator=runnable_operator, indicator=\"token_prob\")\n # add logprob as well\n self.monitor.add_result(element=max(evaluated[token].item(), np.log(self.epsilon)), runnable_operator=runnable_operator, indicator=\"token_logprob\")\n # add KL divergence\n if history_logprobs is not None:\n self.monitor.add_result(element=self.KL_divergence(torch.softmax(history_logprobs, dim=-1), torch.softmax(evaluated, dim=-1)).item(), \n runnable_operator=runnable_operator, indicator=\"KL_divergence\")\n \n self.monitor.add_result(element=self.entropy(torch.softmax(history_logprobs, dim=-1)).item(), indicator=\"entropy\")\n\n def next_token_speculative(self, continuation_tokens, \n top_p=1.0, top_k=0, temperature=1.0, speculation=True, use_cache=True):\n \"\"\"Continues one step in the generation process by running the runnable operators that need to be run and then sampling from the probability distribution.\n\n Args:\n continuation_tokens (list[list[int]]): Current continuation tokens\n top_p (float, optional): top_p to use. Defaults to 1.0.\n top_k (int, optional): top_k to use. Defaults to 0.\n temperature (float, optional): temperature to use. Defaults to 1.0.\n speculation (bool, optional): Whether to use speculation. Defaults to True.\n use_cache (bool, optional): Whether to use cache. Defaults to True.\n\n Returns:\n _type_: _description_\n \"\"\"\n models_ran = []\n for i, runnable_operator in enumerate(self.runnable_operators):\n new_tokens = [len(continuation_tokens[j]) - self.model_last_token_prediction[i][j] + 1 for j in range(len(continuation_tokens))]\n if runnable_operator.run_condition(new_tokens, self.trigger_end) or not speculation:\n logprobs = self.forward_model(runnable_operator, continuation_tokens, model_new_tokens=new_tokens, use_cache=use_cache, do_speculation=speculation)\n all_kept = self.add_new_result(continuation_tokens, new_tokens, runnable_operator, logprobs, top_p, top_k, temperature)\n models_ran.append(i)\n \n self.model_last_token_prediction[i] = [len(continuation_tokens[j]) + int(all_kept[j])\n for j in range(len(continuation_tokens))]\n \n if not all(all_kept):\n break\n \n to_sample_indices = [i for i in range(len(continuation_tokens)) if all_kept[i] and not self.trigger_end[i]]\n\n if len(to_sample_indices) > 0:\n # do batch sampling\n all_required_histories = torch.stack([\n self.create_sample_logprobs(\n self.logprobs_history[i][len(continuation_tokens[i])], \n temperature=temperature,\n top_k=top_k,\n top_p=top_p\n ) for i in to_sample_indices\n ])\n new_tokens = self.normal_sample(all_required_histories)\n for i in range(len(to_sample_indices)):\n continuation_tokens[to_sample_indices[i]].append(new_tokens[i].item())\n\n for i in models_ran:\n self.model_last_token_prediction[i] = [len(continuation_tokens[j]) for j in range(len(continuation_tokens))]\n return continuation_tokens\n\n def __call__(self, input_ids, **kwargs):\n \"\"\"Runs the forward pass of the model. This is needed for compatibility with lm-evaluation-harness\n\n Args:\n input_ids (torch.tensor): input ids\n\n Returns:\n namedtuple: Named tuple of the ModelArithmetic model\n \"\"\"\n return self.forward(input_ids, **kwargs)\n \n def forward(self, input_ids, normalize=True, **kwargs):\n \"\"\"Runs the foward pass. This is needed for compatibility with lm-evaluation-harness\n\n Args:\n input_ids (torch.tensor): input ids\n normalize (bool, optional): Whether or not to normalize the output. Defaults to True.\n\n Returns:\n namedtuple: Named tuple of the ModelArithmetic model\n \"\"\"\n ### this is a bit cheeky, but in order to be compatible with lm-evaluation-harness, we need to implement this method\n logprobs_per_model = {runnable_operator.id(): None for runnable_operator in self.runnable_operators}\n if not isinstance(input_ids, list):\n input_shape = input_ids.shape\n continuation_tokens = self.lm_eval_compatibility.forward_preprocessing(input_ids, self.model_input_tokens)\n else:\n input_shape = None\n continuation_tokens = input_ids\n\n for runnable_operator in self.runnable_operators:\n logprobs = self.forward_model(runnable_operator, continuation_tokens)\n if input_shape is not None:\n logprobs = self.lm_eval_compatibility.forward_post_processing(logprobs, input_shape)\n logprobs_per_model[runnable_operator.id()] = logprobs\n\n output = self.formula.evaluate(logprobs_per_model, normalize=normalize)\n return [output]\n\n def get_decoded_tokens(self, next_tokens_batch):\n \"\"\"Gets decoded tokens from the next tokens\n\n Args:\n next_tokens_batch (list[list[int]]): New tokens for each sample in the batch\n\n Returns:\n list[str]: Decoded tokens\n \"\"\"\n # adding eos token for compatibility with sentencepiece tokenizer\n encoded_sentences = [[self.tokenizer.eos_token_id] + next_tokens for next_tokens in next_tokens_batch]\n decoded_sentences = [self.tokenizer.decode(encoded_sentence, add_special_tokens=False) for encoded_sentence in encoded_sentences]\n decoded_next_tokens = [decoded_sentence[len(self.tokenizer.eos_token):] for decoded_sentence in decoded_sentences]\n return decoded_next_tokens\n \n def clear_memory(self):\n \"\"\"Deletes all loaded models and clears the cache\n \"\"\"\n for runnable_operator in self.runnable_operators:\n runnable_operator.delete_cache()\n self.loaded_models = dict()\n torch.cuda.empty_cache()\n\n def generate_text(self, sentences, max_length=1024, stop_texts=None, batch_size=None,\n temperature=1.0, top_p=1.0, top_k=0, num_return_sequences=1, do_speculation=False, use_cache=True, **kwargs):\n \"\"\"Generates text based on the input params\n\n Args:\n sentences (list[str]): List of input sentences\n max_length (int, optional): Max generation length. Defaults to 128.\n stop_texts (list[str], optional): Strings at which to stop generation. Defaults to None.\n batch_size (int, optional): Batch size. Defaults to None (all at once).\n temperature (float, optional): temperature to use. Defaults to 1.0.\n top_p (float, optional): top_p to use. Defaults to 1.0.\n top_k (int, optional): top_k to use. Defaults to 0.\n num_return_sequences (int, optional): Number of return sequences per sentence. Defaults to 1.\n do_speculation (bool, optional): Whether or not to do speculation. Defaults to True.\n use_cache (bool, optional): Whether or not to use cache. Defaults to True.\n\n Returns:\n list[str]: List of generated texts\n \"\"\"\n assert not do_speculation or any([runnable_operator.speculative_factor == 1 for runnable_operator in self.runnable_operators])\n if isinstance(sentences, str):\n sentences = [sentences]\n if batch_size is None:\n batch_size = len(sentences)\n \n # duplicate each sentence num_return_sequences times, but keep the same sentences next to each other\n sentences = [sentence for sentence in sentences for _ in range(num_return_sequences)]\n\n self.model_prediction_history = [dict() for _ in range(batch_size)]\n self.logprobs_history = [dict() for _ in range(batch_size)]\n self.model_last_token_prediction = [[0 for _ in range(batch_size)] for _ in range(len(self.runnable_operators))]\n self.trigger_end = [False for _ in range(batch_size)]\n self.init_monitor()\n \n if stop_texts is None:\n stop_texts = []\n stop_texts.append(self.tokenizer.eos_token)\n\n start_sentences = sentences[:]\n\n log(logger.debug, f\"Generating {len(sentences)} sentences\")\n\n generated_texts = [\"\" for _ in range(len(sentences))]\n generated_tokens = [[] for _ in range(len(sentences))]\n current_indices = [i for i in range(0, min(len(sentences), batch_size))]\n next_index = len(current_indices)\n \n for runnable_operator_id in self.model_input_tokens:\n self.model_input_tokens[runnable_operator_id].set_inputs([start_sentences[index] for index in current_indices])\n \n total_done = 0\n while len(current_indices) > 0:\n start_time = time.time()\n generated_tokens_batch = [generated_tokens[index] for index in current_indices]\n next_tokens = self.next_token_speculative(generated_tokens_batch, top_p, top_k, \n temperature, speculation=do_speculation, use_cache=use_cache)\n for i in range(len(next_tokens)):\n next_tokens[i] = self.run_retroactive_operators(i, next_tokens[i], temperature, top_k, top_p)\n self.clear_model_prediction_history(i, next_tokens[i])\n decoded_tokens = self.get_decoded_tokens(next_tokens)\n\n for i, index in enumerate(current_indices):\n generated_tokens[index] = next_tokens[i]\n generated_texts[index] = decoded_tokens[i]\n\n indices_to_remove = []\n for i in range(len(current_indices)):\n sentences[current_indices[i]] = start_sentences[current_indices[i]] + generated_texts[current_indices[i]]\n if any([stop_text in generated_texts[current_indices[i]] for stop_text in stop_texts]) or len(generated_tokens[current_indices[i]]) >= max_length:\n if len(self.model_prediction_history[i]) == 0:\n indices_to_remove.append(i)\n else:\n self.trigger_end[i] = True\n \n for i in indices_to_remove[::-1]:\n self.monitor.add_result(element=len(generated_tokens[current_indices[i]]), indicator=\"length\")\n del current_indices[i]\n self.model_prediction_history = self.model_prediction_history[:i] + self.model_prediction_history[i + 1:]\n self.logprobs_history = self.logprobs_history[:i] + self.logprobs_history[i + 1:]\n for j in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[j] = self.model_last_token_prediction[j][:i] + self.model_last_token_prediction[j][i + 1:]\n self.trigger_end = self.trigger_end[:i] + self.trigger_end[i + 1:]\n \n for runnable_operator in self.runnable_operators:\n runnable_operator.delete_cache(index=i)\n\n if next_index < len(sentences):\n current_indices.append(next_index)\n self.model_prediction_history.append(dict())\n self.logprobs_history.append(dict())\n self.trigger_end.append(False)\n \n for j in range(len(self.model_last_token_prediction)):\n self.model_last_token_prediction[j].append(0)\n \n next_index += 1\n total_done += 1\n if total_done % 30 == 0:\n log(logger.debug, f\"Progress: {total_done / len(sentences):.3f}\")\n \n for runnable_operator_id in self.model_input_tokens:\n self.model_input_tokens[runnable_operator_id].set_inputs([start_sentences[index] for index in current_indices])\n\n self.monitor.add_result(element=time.time() - start_time)\n \n return generated_texts\n\n def generate(self, input_ids, attention_mask=None, do_sample=False, max_new_tokens=1024, \n stopping_criteria=None, temperature=1.0, top_p=1.0, top_k=0, use_cache=True, eos_token_id=None, pad_token_id=None, **kwargs):\n \"\"\"Generates text based on the input params. Needed for compatibility with lm-evaluation-harness\n\n Args:\n input_ids (torch.tensor): input ids\n attention_mask (torch.tensor, optional): attention mask. Defaults to None.\n do_sample (bool, optional): Whether or not to sample. Defaults to False.\n max_new_tokens (int, optional): Max new number of tokens. Defaults to 128.\n stopping_criteria (_type_, optional): Stopping criteria to use. Defaults to None.\n temperature (float, optional): Temperature to. Defaults to 1.0.\n top_p (float, optional): top_p to use. Defaults to 1.0.\n top_k (int, optional): top_k to use. Defaults to 0.\n use_cache (bool, optional): Whether or not to use cache. Defaults to True.\n eos_token_id (int, optional): eos token id. Defaults to None.\n pad_token_id (int, optional): pad token id. Defaults to None.\n\n Returns:\n list[str]: Generated texts\n \"\"\"\n if not do_sample:\n top_k = 1\n \n batch_size = input_ids.shape[0]\n input_texts = [self.tokenizer.decode(ids, skip_special_tokens=True) for ids in input_ids]\n stopping_sequences = [self.tokenizer.eos_token]\n if stopping_criteria is not None:\n stopping_sequences += [criteria.sequence for criteria in stopping_criteria]\n if eos_token_id is not None:\n stopping_sequences += [self.tokenizer.decode([eos_token_id])]\n \n texts = self.generate_text(input_texts, max_length=max_new_tokens, stop_texts=stopping_sequences,\n batch_size=batch_size, temperature=temperature, top_p=top_p, top_k=top_k, use_cache=use_cache)\n encoded_texts = self.tokenizer.batch_encode_plus(texts, add_special_tokens=False, return_tensors=\"pt\").input_ids.to(self.device)\n # concatenate the input_ids with the encoded_texts\n all_encoded = torch.cat([input_ids, encoded_texts], dim=-1)\n return all_encoded" }, { "identifier": "ENABLE_LOGGING", "path": "src/model_arithmetic/utils.py", "snippet": "ENABLE_LOGGING = False" }, { "identifier": "log", "path": "src/model_arithmetic/utils.py", "snippet": "def log(function, message):\n \"\"\"\n Logs the given message using the provided function if logging is enabled.\n \n Parameters:\n function (callable): The logging function to use.\n message (str): The message to be logged.\n \"\"\"\n if ENABLE_LOGGING:\n function(message)" } ]
from .base import BaseClass from loguru import logger from transformers import set_seed, Trainer, TrainingArguments, DataCollatorWithPadding from .dataset import CustomDataset from sklearn.model_selection import train_test_split from .basic_model_loader import load_model, load_tokenizer from .model_arithmetic import ModelArithmetic from googleapiclient import discovery from dotenv import load_dotenv from torch.utils.data import DataLoader from .utils import ENABLE_LOGGING, log from lm_eval import evaluator import pandas as pd import numpy as np import torch import os import json import time
16,094
self.dataset.at[index, "perspective"] = value scores.append(value) time.sleep(0.1) result = np.mean(scores) log(logger.info, f"Perspective is {result:.6f}") self.output["perspective"] = { "mean": result, "std": np.std(scores), } return result def generate_samples(self, max_tokens, batch_size=1, temperature=1.0, top_p=1.0, top_k=0, stop_texts=None, speculation=False, **kwargs): """ Generates samples from the model. Args: max_tokens (int): The maximum number of tokens to be used for evaluation. batch_size (int, optional): The batch size to be used for evaluation. temperature (float, optional): The temperature to be used for sampling. top_p (float, optional): The top-p value to be used for sampling. top_k (int, optional): The top-k value to be used for sampling. stop_texts (list, optional): The list of texts at which sampling should be stopped speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ start_time = time.time() if "generated" not in self.dataset.columns: texts = self.generator.generate_text(self.dataset["input"].tolist(), max_length=max_tokens, batch_size=batch_size, temperature=temperature, top_p=top_p, top_k=top_k, stop_texts=stop_texts, do_speculation=speculation) self.dataset["generated"] = texts end_time = time.time() self.output["time"] = { "total_time": end_time - start_time, "time_per_sample": (end_time - start_time) / len(self.dataset), "dataset_size": len(self.dataset), "max_tokens": max_tokens, "batch_size": batch_size } def save_generated(self, output_location): """ Saves the generated samples to the specified location. Args: output_location (string): The location to save the generated samples. """ log(logger.debug, f"Saving generated samples to {output_location}") self.dataset.to_csv(output_location) def get_perplexity(self, dataset, model, tokenizer, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model (PreTrainedModel): The model to be evaluated. tokenizer (Tokenizer): The tokenizer to be used for tokenizing the sentences. **kwargs: Additional keyword arguments. """ perplexities = [] sum_nllos = 0 n_tokens = 0 for index, sample in dataset.iterrows(): input_sentence = sample['input'] sentence = sample['generated'] if len(sentence) == 0: continue combined_sentence = input_sentence + sentence encodings = tokenizer(combined_sentence, return_tensors='pt') input_ids = encodings['input_ids'].to(model.device) attention_mask = encodings['attention_mask'].to(model.device) input_encodings = tokenizer(input_sentence, return_tensors='pt') input_ids_inputs = input_encodings['input_ids'] input_length = input_ids_inputs.size(1) with torch.no_grad(): output = model(input_ids, labels=input_ids, attention_mask=attention_mask) logprobs = output.logits[0, :].log_softmax(dim=-1) loss_func = torch.nn.NLLLoss(ignore_index=-100, reduction='sum') loss = loss_func(logprobs[..., input_length:-1, :].contiguous(), input_ids[0, :][..., input_length+1:].contiguous()) loss = loss.to(torch.float32).detach().cpu().numpy() n_tokens_here = input_ids.shape[-1] - input_length - 1 if n_tokens_here > 0: perplexity = np.exp(loss / n_tokens_here) sum_nllos += loss n_tokens += n_tokens_here if not np.isnan(perplexity): perplexities.append(perplexity) average = np.mean(perplexities) median = np.median(perplexities) real = np.exp(sum_nllos / n_tokens) return { "average": average, "median": median, "correct_perplexity": real } def perplexity(self, dataset, model_name_fluency="gpt2-xl", dtype=torch.float16, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model_name_fluency (string, optional): The name of the model to be used for calculating fluency. dtype (torch.dtype, optional): The data type to be used for the model. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating fluency") if "perplexity" in self.output: log(logger.info, f"Reloading perplexity. Perplexity is {self.output['perplexity']}") return self.output["perplexity"]
load_dotenv() class Evaluation(BaseClass): """ This class is used for evaluating a model's performance on a given dataset. It includes methods for preparing the dataset, evaluating the model, generating samples, calculating perplexity and faithfulness of the model. """ def __init__(self, generator=None, dataset_location=None, dataset=None, train_dataset=None, train_dataset_location=None, n_input_words=5, bleurt_checkpoint="../models/BLEURT-20", **kwargs): """ Initialize the Evaluation class with the given parameters. Args: generator (ModelArithmetic, optional): The model to be evaluated. dataset_location (string, optional): The location of the dataset to be used for evaluation. Either this or dataset should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) dataset (pd.DataFrame, optional): The dataset to be used for evaluation. Either this or dataset_location should be provided. Dataset should contain column "text", "input", "output and "label" ("label", "input", "output" optional) train_dataset (pd.DataFrame, optional): The dataset to be used for training the model. Only used when calculating the faithfulness of the model and when the downstream model still needs to be finetuned. train_dataset_location (string, optional): The location of the dataset to be used for training the model. n_input_words (int, optional): The number of input words to be used in the generator. Only used if the dataset does not contain the column "input". bleurt_checkpoint (string, optional): The location of the BLEURT model checkpoint. **kwargs: Additional keyword arguments. """ self.has_input_task = True self.dataset = None if dataset is not None: self.dataset = dataset.copy() elif dataset_location is not None: self.dataset = pd.read_csv(dataset_location, escapechar='\\', lineterminator="\n") if train_dataset is not None: self.train_dataset = train_dataset elif train_dataset_location is not None: self.train_dataset = pd.read_csv(train_dataset_location, escapechar='\\', lineterminator="\n") else: self.train_dataset = None if self.dataset is not None: self.prepare_dataset(n_input_words) super().__init__(**kwargs, dataset_location=dataset_location, generator=generator, has_input_task=self.has_input_task, output=dict(), extra_kwargs=None, bleurt_checkpoint=bleurt_checkpoint, train_dataset_location=None) if isinstance(generator, ModelArithmetic): # If we don't do it this way, we can't store the evaluator because ModelArithmetic is not serializable del self.kwargs["generator"] self.kwargs["formula"] = generator.formula self.formula = generator.formula def prepare_dataset(self, n_input_words=5): """ Prepares the dataset for evaluation. If the dataset does not have an input column, it assumes the input is the first n_input_words words of the output. If the dataset does not have a label column, it assumes all labels are 1. Args: n_input_words (int): The number of input words to be used. """ log(logger.debug, "Preparing dataset") if "input" not in self.dataset.columns: log(logger.debug, f"No input column found, assuming input is the first {n_input_words} words of the output") self.dataset["input"] = self.dataset["text"].apply(lambda x: " ".join(x.split()[:n_input_words])) self.dataset["output"] = self.dataset["text"].apply(lambda x: " " + " ".join(x.split()[n_input_words:])) self.has_input_task = False if "label" not in self.dataset.columns: log(logger.debug, "No label column found, assuming all labels are 1") self.dataset["label"] = 1 def evaluate_lm_eval(self, model, task_name, batch_size, num_fewshot, model_args, no_cache=False, limit=None, write_out=False, output_folder=None, **kwargs): """ Evaluates the model using the lm_eval package. Args: model (PreTrainedModel): The model to be evaluated. task_name (string): The name of the task for evaluation. batch_size (int): The batch size to be used for evaluation. num_fewshot (int): The number of fewshot examples to be used for evaluation. model_args (dict): The arguments to be passed to the model. no_cache (bool, optional): Whether to use cached results or not. limit (int, optional): The maximum number of examples to be used for evaluation. write_out (bool, optional): Whether to write out the results or not. output_folder (string, optional): The folder to write out the results. **kwargs: Additional keyword arguments. """ try: except ImportError: raise ImportError("Please install lm_eval to run this function") results = evaluator.simple_evaluate( model=model, model_args=model_args, tasks=[task_name], num_fewshot=num_fewshot, batch_size=batch_size, device="cuda" if torch.cuda.is_available() else "cpu", no_cache=no_cache, limit=limit, write_out=write_out, output_base_path=output_folder ) if "lm_eval" in self.output: self.output["lm_eval"][task_name] = results else: self.output["lm_eval"] = {task_name: results} def evaluate(self, max_tokens=128, store_file=None, reload=True, dataset_file=None, reload_data=True, preserve_memory=False, batch_size=1, do_perspective=True, speculation=False, only_faithfulness=False, **kwargs): """ Evaluates the model on the dataset and calculates the perplexity and faithfulness Args: max_tokens (int, optional): The maximum number of tokens to be used for evaluation. store_file (string, optional): The file to store the evaluation results. reload (bool, optional): Whether to reload the dataset or not if it was stored before. dataset_file (string, optional): The file containing the dataset. If path exists, dataset is loaded from path. If path does not exist, dataset is saved to path. reload_data (bool, optional): Whether to reload the data or not if it was stored before. preserve_memory (bool, optional): Whether to preserve memory or not. batch_size (int, optional): The batch size to be used for evaluation. do_perspective (bool, optional): Whether to calculate the perspective score or not. speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ if store_file is not None: os.makedirs(os.path.dirname(store_file), exist_ok=True) if dataset_file is not None: os.makedirs(os.path.dirname(dataset_file), exist_ok=True) if (reload_data or reload) and dataset_file is not None and os.path.isfile(dataset_file): log(logger.debug, f"Reloading dataset from {dataset_file}") self.dataset = pd.read_csv(dataset_file, escapechar='\\', lineterminator="\n") self.dataset.fillna("", inplace=True) else: log(logger.debug,"Generating samples") self.generate_samples(max_tokens, batch_size=batch_size, speculation=speculation, **kwargs) if dataset_file is not None: log(logger.info, f"Saving dataset to {dataset_file}") self.dataset.to_csv(dataset_file, index=False, escapechar='\\', lineterminator="\n") if reload and store_file is not None and os.path.isfile(store_file): self.output = json.load(open(store_file))["output"] log(logger.info, f"Reloading output. Output is {self.output}") if not reload: self.output = dict() self.dataset.dropna(inplace=True) if preserve_memory and hasattr(self.generator, "clear_memory"): # can have weird side effects! self.generator.clear_memory() if not only_faithfulness: self.perplexity(self.dataset, **kwargs) else: del self.output['faithfulness'] self.faithfulness_multiple(self.dataset, **kwargs) if do_perspective and not only_faithfulness: self.perspective(**kwargs) if dataset_file is not None: log(logger.info, f"Saving dataset to {dataset_file}") self.dataset.to_csv(dataset_file, index=False, escapechar='\\', lineterminator="\n") self.extra_kwargs = {kwarg: str(kwargs[kwarg]) for kwarg in kwargs} self.extra_kwargs["max_tokens"] = max_tokens if store_file is not None: log(logger.info, f"Saving output to {store_file}") self.save(store_file) return self.output def perspective(self, classification_with_input=True, **kwargs): """ Calculates the perspective score of the generated sentences. Args: classification_with_input (bool, optional): Whether to use the input of the sentence for calculating the perspective score or not. **kwargs: Additional keyword arguments. """ retries = 0 if "perspective" in self.output: log(logger.info, f"Reloading perspective. Perspective is {self.output['perspective']}") return self.output["perspective"] log(logger.info, "Calculating perspective") service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=os.environ["PERSPECTIVE_API_KEY"], discoveryServiceUrl="https://commentanalyzer.googleapis.com/$discovery/rest?version=v1alpha1", static_discovery=False) scores = [] self.dataset["perspective"] = [None] * len(self.dataset) for index, sample in self.dataset.iterrows(): sentence = sample['generated'] if classification_with_input: sentence = sample['input'] + sentence analyze_request = { 'comment': { 'text': sentence }, 'languages': ["en"], 'requestedAttributes': {'TOXICITY': {}} } while retries < 10 ** 5: try: response = service.comments().analyze(body=analyze_request).execute() break except Exception as e: if "LANGUAGE_NOT_SUPPORTED_BY_ATTRIBUTE" in str(e): continue log(logger.warning, f"Exception {e} occurred, retrying...") retries += 1 time.sleep(10) if retries == 10 ** 5: log(logger.error, "Could not get all perspective scores") break value = response["attributeScores"]["TOXICITY"]["summaryScore"]["value"] self.dataset.at[index, "perspective"] = value scores.append(value) time.sleep(0.1) result = np.mean(scores) log(logger.info, f"Perspective is {result:.6f}") self.output["perspective"] = { "mean": result, "std": np.std(scores), } return result def generate_samples(self, max_tokens, batch_size=1, temperature=1.0, top_p=1.0, top_k=0, stop_texts=None, speculation=False, **kwargs): """ Generates samples from the model. Args: max_tokens (int): The maximum number of tokens to be used for evaluation. batch_size (int, optional): The batch size to be used for evaluation. temperature (float, optional): The temperature to be used for sampling. top_p (float, optional): The top-p value to be used for sampling. top_k (int, optional): The top-k value to be used for sampling. stop_texts (list, optional): The list of texts at which sampling should be stopped speculation (bool, optional): Whether to use speculation or not. **kwargs: Additional keyword arguments. """ start_time = time.time() if "generated" not in self.dataset.columns: texts = self.generator.generate_text(self.dataset["input"].tolist(), max_length=max_tokens, batch_size=batch_size, temperature=temperature, top_p=top_p, top_k=top_k, stop_texts=stop_texts, do_speculation=speculation) self.dataset["generated"] = texts end_time = time.time() self.output["time"] = { "total_time": end_time - start_time, "time_per_sample": (end_time - start_time) / len(self.dataset), "dataset_size": len(self.dataset), "max_tokens": max_tokens, "batch_size": batch_size } def save_generated(self, output_location): """ Saves the generated samples to the specified location. Args: output_location (string): The location to save the generated samples. """ log(logger.debug, f"Saving generated samples to {output_location}") self.dataset.to_csv(output_location) def get_perplexity(self, dataset, model, tokenizer, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model (PreTrainedModel): The model to be evaluated. tokenizer (Tokenizer): The tokenizer to be used for tokenizing the sentences. **kwargs: Additional keyword arguments. """ perplexities = [] sum_nllos = 0 n_tokens = 0 for index, sample in dataset.iterrows(): input_sentence = sample['input'] sentence = sample['generated'] if len(sentence) == 0: continue combined_sentence = input_sentence + sentence encodings = tokenizer(combined_sentence, return_tensors='pt') input_ids = encodings['input_ids'].to(model.device) attention_mask = encodings['attention_mask'].to(model.device) input_encodings = tokenizer(input_sentence, return_tensors='pt') input_ids_inputs = input_encodings['input_ids'] input_length = input_ids_inputs.size(1) with torch.no_grad(): output = model(input_ids, labels=input_ids, attention_mask=attention_mask) logprobs = output.logits[0, :].log_softmax(dim=-1) loss_func = torch.nn.NLLLoss(ignore_index=-100, reduction='sum') loss = loss_func(logprobs[..., input_length:-1, :].contiguous(), input_ids[0, :][..., input_length+1:].contiguous()) loss = loss.to(torch.float32).detach().cpu().numpy() n_tokens_here = input_ids.shape[-1] - input_length - 1 if n_tokens_here > 0: perplexity = np.exp(loss / n_tokens_here) sum_nllos += loss n_tokens += n_tokens_here if not np.isnan(perplexity): perplexities.append(perplexity) average = np.mean(perplexities) median = np.median(perplexities) real = np.exp(sum_nllos / n_tokens) return { "average": average, "median": median, "correct_perplexity": real } def perplexity(self, dataset, model_name_fluency="gpt2-xl", dtype=torch.float16, **kwargs): """ Calculates the perplexity of the generated sentences. Args: dataset (pd.DataFrame): The dataset to be used for evaluation. Has columns "input" (for input text), "generated" (for generated text). model_name_fluency (string, optional): The name of the model to be used for calculating fluency. dtype (torch.dtype, optional): The data type to be used for the model. **kwargs: Additional keyword arguments. """ log(logger.info, "Calculating fluency") if "perplexity" in self.output: log(logger.info, f"Reloading perplexity. Perplexity is {self.output['perplexity']}") return self.output["perplexity"]
tokenizer = load_tokenizer(model_name_fluency)
3
2023-11-21 20:01:08+00:00
24k
HeliosZhao/Animate124
dnerf/utils.py
[ { "identifier": "save_tensor2image", "path": "nerf/utils.py", "snippet": "def save_tensor2image(x: torch.Tensor, path, channel_last=False, quality=75, **kwargs):\n # assume the input x is channel last\n # ipdb.set_trace()\n # if x.ndim == 4:\n # if channel_last:\n # x = x.permute(0, 3, 1, 2) \n # TF.to_pil_image(make_grid(x, value_range=(0, 1), **kwargs)).save(path, quality=quality)\n if x.ndim == 5:\n ## video\n # ipdb.set_trace()\n path = os.path.splitext(path)[0] + '.mp4' # convert image to mp4\n # B,F,C,H,W or B,F,H,W,C\n if channel_last: # B,F,H,W,C\n x = rearrange(x, \"b f h w c -> b f c h w\")\n save_videos_grid(x, path, **kwargs)\n else:\n if channel_last:\n x = x.permute(0, 3, 1, 2) \n TF.to_pil_image(make_grid(x, value_range=(0, 1), **kwargs)).save(path, quality=quality)" }, { "identifier": "nonzero_normalize_depth", "path": "nerf/utils.py", "snippet": "def nonzero_normalize_depth(depth, mask=None):\n if mask is not None:\n if (depth[mask]>0).sum() > 0:\n nonzero_depth_min = depth[mask][depth[mask]>0].min()\n else:\n nonzero_depth_min = 0\n else:\n if (depth>0).sum() > 0:\n nonzero_depth_min = depth[depth>0].min()\n else:\n nonzero_depth_min = 0\n if nonzero_depth_min == 0:\n return depth\n else:\n depth = (depth - nonzero_depth_min) \n depth = depth / (depth.max()+1e-6)\n return depth.clamp(0, 1)" }, { "identifier": "Trainer", "path": "nerf/utils.py", "snippet": "class Trainer(object):\n def __init__(self,\n\t\t argv, # command line args\n name, # name of this experiment\n opt, # extra conf\n model, # network\n guidance, # guidance network\n criterion=None, # loss function, if None, assume inline implementation in train_step\n optimizer=None, # optimizer\n ema_decay=None, # if use EMA, set the decay\n lr_scheduler=None, # scheduler\n metrics=[], # metrics for evaluation, if None, use val_loss to measure performance, else use the first metric.\n local_rank=0, # which GPU am I\n world_size=1, # total num of GPUs\n device=None, # device to use, usually setting to None is OK. (auto choose device)\n mute=False, # whether to mute all print\n fp16=False, # amp optimize level\n max_keep_ckpt=1, # max num of saved ckpts in disk\n workspace='workspace', # workspace to save logs & ckpts\n best_mode='min', # the smaller/larger result, the better\n use_loss_as_metric=True, # use loss as the first metric\n report_metric_at_train=False, # also report metrics at training\n use_checkpoint=\"latest\", # which ckpt to use at init time\n use_tensorboard=True, # whether to use tensorboard for logging\n scheduler_update_every_step=False, # whether to call scheduler.step() after every train step\n **kwargs\n ):\n\n self.argv = argv\n self.name = name\n self.opt = opt\n self.mute = mute\n self.metrics = metrics\n self.local_rank = local_rank\n self.world_size = world_size\n self.workspace = workspace\n self.ema_decay = ema_decay\n self.fp16 = fp16\n self.best_mode = best_mode\n self.use_loss_as_metric = use_loss_as_metric\n self.report_metric_at_train = report_metric_at_train\n self.max_keep_ckpt = opt.get(\"max_keep_ckpt\", max_keep_ckpt)\n self.use_checkpoint = use_checkpoint\n self.use_tensorboard = use_tensorboard\n self.time_stamp = time.strftime(\"%Y-%m-%d_%H-%M-%S\")\n self.scheduler_update_every_step = scheduler_update_every_step\n self.device = device if device is not None else torch.device(f'cuda:{local_rank}' if torch.cuda.is_available() else 'cpu')\n self.console = Console()\n\n model.to(self.device)\n if self.world_size > 1:\n model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)\n model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[local_rank])\n self.model = model\n\n # guide model\n self.guidance = guidance\n self.embeddings = {}\n\n # text prompt / images\n if self.guidance is not None:\n for key in self.guidance:\n for p in self.guidance[key].parameters():\n p.requires_grad = False\n self.embeddings[key] = {}\n self.prepare_embeddings()\n\n if isinstance(criterion, nn.Module):\n criterion.to(self.device)\n self.criterion = criterion\n\n if optimizer is None:\n self.optimizer = optim.Adam(self.model.parameters(), lr=0.001, weight_decay=5e-4) # naive adam\n else:\n self.optimizer = optimizer(self.model)\n\n if lr_scheduler is None: ## scheduler is all one, for nerf model other than vanilla nerf\n self.lr_scheduler = optim.lr_scheduler.LambdaLR(self.optimizer, lr_lambda=lambda epoch: 1) # fake scheduler\n else:\n self.lr_scheduler = lr_scheduler(self.optimizer) \n\n if ema_decay:\n self.ema = ExponentialMovingAverage(\n self.model.parameters(), decay=ema_decay)\n else:\n self.ema = None\n\n self.scaler = torch.cuda.amp.GradScaler(enabled=self.fp16)\n\n # variable init\n self.total_train_t = 0\n self.epoch = 0\n self.global_step = 0\n self.local_step = 0\n self.novel_view_step = 0\n self.stats = {\n \"loss\": [],\n \"valid_loss\": [],\n \"results\": [], # metrics[0], or valid_loss\n \"checkpoints\": [], # record path of saved ckpt, to automatically remove old ckpt\n \"best_result\": None,\n }\n self.loss_meter = AverageMeters()\n # auto fix\n if len(metrics) == 0 or self.use_loss_as_metric:\n self.best_mode = 'min'\n\n logger.info(f'[INFO] cmdline: {self.argv}')\n logger.info(f'args:\\n{self.opt}')\n logger.info(\n f'[INFO] Trainer: {self.name} | {self.time_stamp} | {self.device} | {\"fp16\" if self.fp16 else \"fp32\"} | {self.workspace}')\n logger.info(\n f'[INFO] #parameters: {sum([p.numel() for p in model.parameters() if p.requires_grad])}')\n logger.info(f'[INFO] #Optimizer: \\n{self.optimizer}')\n logger.info(f'[INFO] #Scheduler: \\n{self.lr_scheduler}')\n\n if self.workspace is not None:\n if self.use_checkpoint == \"scratch\":\n logger.info(\"[INFO] Training from scratch ...\")\n elif self.use_checkpoint == \"latest\":\n logger.info(\"[INFO] Loading latest checkpoint ...\")\n self.load_checkpoint()\n elif self.use_checkpoint == \"latest_model\":\n logger.info(\"[INFO] Loading latest checkpoint (model only)...\")\n self.load_checkpoint(model_only=True)\n elif self.use_checkpoint == \"best\":\n if os.path.exists(self.opt.best_path):\n logger.info(\"[INFO] Loading best checkpoint ...\")\n self.load_checkpoint(self.opt.best_path)\n else:\n logger.info(\n f\"[INFO] {self.opt.best_path} not found, loading latest ...\")\n self.load_checkpoint()\n else: # path to ckpt\n logger.info(f\"[INFO] Loading {self.use_checkpoint} ...\")\n self.load_checkpoint(self.use_checkpoint)\n\n # calculate the text embs.\n @torch.no_grad()\n def prepare_embeddings(self):\n\n # text embeddings (stable-diffusion)\n if self.opt.text is not None:\n assert not self.opt.text_emb_all\n dir_texts = ['front', 'side', 'back']\n if 'SD' in self.guidance:\n if self.opt.text_emb_all:\n self.embeddings['SD']['default'] = self.guidance['SD'].get_all_text_embeds([self.opt.text])\n neg_embedding = self.guidance['SD'].get_all_text_embeds([self.opt.negative])\n else:\n self.embeddings['SD']['default'] = self.guidance['SD'].get_text_embeds([self.opt.text])\n neg_embedding = self.guidance['SD'].get_text_embeds([self.opt.negative])\n \n self.embeddings['SD']['default'] = torch.cat((neg_embedding, self.embeddings['SD']['default']), dim=0)\n\n for idx, d in enumerate(dir_texts):\n text = f\"{self.opt.text}, {d} view\"\n if self.opt.text_emb_all:\n self.embeddings['SD'][d] = self.guidance['SD'].get_all_text_embeds([text])\n else:\n self.embeddings['SD'][d] = self.guidance['SD'].get_text_embeds([text])\n if self.opt.dir_texts_neg:\n text_neg = self.opt.negative + ', '.join([text+' view' for i, text in enumerate(dir_texts) if i != idx]) \n logger.info(f'dir_texts of {d}\\n postive text: {text},\\n negative text: {text_neg}')\n if self.opt.text_emb_all:\n neg_embedding= self.guidance['SD'].get_all_text_embeds([text_neg])\n else:\n neg_embedding= self.guidance['SD'].get_text_embeds([text_neg])\n self.embeddings['SD'][d] = torch.cat((neg_embedding, self.embeddings['SD'][d]), dim=0)\n\n\n if 'IF' in self.guidance:\n self.embeddings['IF']['default'] = self.guidance['IF'].get_text_embeds([self.opt.text])\n neg_embedding = self.guidance['IF'].get_text_embeds([self.opt.negative])\n\n for idx, d in enumerate(dir_texts):\n text = f\"{self.opt.text}, {d} view\"\n self.embeddings['IF'][d] = self.guidance['IF'].get_text_embeds([text])\n if self.opt.dir_texts_neg:\n text_neg = self.opt.negative + ', '.join([text+' view' for i, text in enumerate(dir_texts) if i != idx]) \n logger.info(f'dir_texts of {d}\\n postive text: {text},\\n negative text: {text_neg}')\n neg_embedding= self.guidance['IF'].get_text_embeds([text_neg])\n self.embeddings['IF'][d] = torch.cat((neg_embedding, self.embeddings['IF'][d]), dim=0)\n \n # if 'clip' in self.guidance:\n # self.embeddings['clip']['text'] = self.guidance['clip'].get_text_embeds(self.opt.text)\n\n if self.opt.cn_text is not None:\n # ipdb.set_trace()\n assert 'CN' in self.guidance\n dir_texts = ['front', 'side', 'back']\n self.embeddings['CN']['default'] = self.guidance['CN'].get_text_embeds([self.opt.cn_text])\n neg_embedding = self.guidance['CN'].get_text_embeds([self.opt.negative])\n self.embeddings['CN']['default'] = torch.cat((neg_embedding, self.embeddings['CN']['default']), dim=0)\n\n ## embedding for controlnet -> best quality\n self.embeddings['CN']['CN'] = self.guidance['CN'].get_text_embeds([self.opt.cn_cn_text])\n self.embeddings['CN']['CN'] = torch.cat((neg_embedding, self.embeddings['CN']['CN']), dim=0)\n\n for idx, d in enumerate(dir_texts):\n text = f\"{self.opt.cn_text}, {d} view\"\n self.embeddings['CN'][d] = self.guidance['CN'].get_text_embeds([text])\n if self.opt.dir_texts_neg:\n text_neg = self.opt.negative + ', '.join([text+' view' for i, text in enumerate(dir_texts) if i != idx]) \n logger.info(f'dir_texts of {d}\\n postive text: {text},\\n negative text: {text_neg}')\n neg_embedding= self.guidance['CN'].get_text_embeds([text_neg])\n self.embeddings['CN'][d] = torch.cat((neg_embedding, self.embeddings['CN'][d]), dim=0)\n\n\n if self.opt.images is not None:\n\n h = int(self.opt.known_view_scale * self.opt.h)\n w = int(self.opt.known_view_scale * self.opt.w)\n\n # load processed image and remove edges\n rgbas = []\n rgbas_hw = []\n mask_no_edges = []\n for image in self.opt.images:\n rgba = cv2.cvtColor(cv2.imread(image, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGRA2RGBA)\n rgbas.append(rgba)\n rgba_hw = cv2.resize(rgba, (w, h), interpolation=cv2.INTER_AREA).astype(np.float32) / 255\n rgbas_hw.append(rgba_hw)\n if self.opt.rm_edge:\n alpha = np.uint8(rgba_hw[..., 3] * 255.)\n dilate = cv2.dilate(alpha, np.ones((self.opt.edge_width, self.opt.edge_width), np.uint8))\n edge = cv2.absdiff(alpha, dilate).astype(np.float32) / 255\n mask_no_edge = rgba_hw[..., 3] > 0.5\n mask_no_edge[edge>self.opt.edge_threshold] = False\n mask_no_edges.append(mask_no_edge)\n rgba_hw = np.stack(rgbas_hw)\n mask = rgba_hw[..., 3] > 0.5\n if len(mask_no_edges) > 0:\n mask_no_edge = np.stack(mask_no_edges)\n else:\n mask_no_edge = mask\n \n # breakpoint() \n # rgb\n rgb_hw = rgba_hw[..., :3] * rgba_hw[..., 3:] + (1 - rgba_hw[..., 3:]) \n self.rgb = torch.from_numpy(rgb_hw).permute(0,3,1,2).contiguous().to(self.device)\n self.mask = torch.from_numpy(mask).to(self.device)\n self.opacity = torch.from_numpy(mask_no_edge).to(self.device).to(torch.float32).unsqueeze(0)\n print(f'[INFO] dataset: load image prompt {self.opt.images} {self.rgb.shape}')\n\n # load depth\n depth_paths = [image.replace('rgba', 'depth') for image in self.opt.images]\n if os.path.exists(depth_paths[0]):\n depths = [cv2.imread(depth_path, cv2.IMREAD_UNCHANGED) for depth_path in depth_paths]\n depth = np.stack([cv2.resize(depth, (w, h), interpolation=cv2.INTER_AREA) for depth in depths])\n self.depth = 1 - torch.from_numpy(depth.astype(np.float32) / 255).to(self.device) ## why use inverse?? MiDas predict depth larger value, more close, while nerf should be small value more close\n # self.depth = torch.from_numpy(depth.astype(np.float32) / 255).to(self.device)\n # ipdb.set_trace()\n if len(self.depth.shape) == 4 and self.depth.shape[-1] > 1:\n self.depth = self.depth[..., 0]\n logger.info(f'[WARN] dataset: {depth_paths[0]} has more than one channel, only use the first channel')\n if self.opt.normalize_depth:\n self.depth = nonzero_normalize_depth(self.depth, self.mask)\n save_tensor2image(self.depth, os.path.join(self.workspace, 'depth_resized.jpg'))\n self.depth = self.depth[self.mask]\n print(f'[INFO] dataset: load depth prompt {depth_paths} {self.depth.shape}')\n else:\n self.depth = None\n logger.info(f'[WARN] dataset: {depth_paths[0]} is not found')\n \n # load normal\n normal_paths = [image.replace('rgba', 'normal') for image in self.opt.images]\n if os.path.exists(normal_paths[0]):\n normals = []\n for normal_path in normal_paths:\n normal = cv2.imread(normal_path, cv2.IMREAD_UNCHANGED)\n if normal.shape[-1] == 4:\n normal = cv2.cvtColor(normal, cv2.COLOR_BGRA2RGB)\n normals.append(normal)\n normal = np.stack([cv2.resize(normal, (w, h), interpolation=cv2.INTER_AREA) for normal in normals])\n self.normal = torch.from_numpy(normal.astype(np.float32) / 255).to(self.device)\n save_tensor2image(self.normal, os.path.join(self.workspace, 'normal_resized.jpg'), channel_last=True)\n print(f'[INFO] dataset: load normal prompt {normal_paths} {self.normal.shape}')\n self.normal = self.normal[self.mask]\n else:\n self.normal = None\n logger.info(f'[WARN] dataset: {normal_paths[0]} is not found')\n\n # save for debug\n save_tensor2image(self.rgb, os.path.join(self.workspace, 'rgb_resized.png'), channel_last=False)\n save_tensor2image(self.opacity, os.path.join(self.workspace, 'opacity_resized.png'), channel_last=False)\n\n # encode embeddings for zero123\n if 'zero123' in self.guidance:\n rgba_256 = np.stack([cv2.resize(rgba, (256, 256), interpolation=cv2.INTER_AREA).astype(np.float32) / 255 for rgba in rgbas])\n rgbs_256 = rgba_256[..., :3] * rgba_256[..., 3:] + (1 - rgba_256[..., 3:])\n rgb_256 = torch.from_numpy(rgbs_256).permute(0,3,1,2).contiguous().to(self.device)\n # import ipdb\n # ipdb.set_trace()\n guidance_embeds = self.guidance['zero123'].get_img_embeds(rgb_256)\n self.embeddings['zero123']['default'] = {\n 'zero123_ws' : self.opt.zero123_ws,\n 'c_crossattn' : guidance_embeds[0],\n 'c_concat' : guidance_embeds[1],\n 'ref_polars' : self.opt.ref_polars,\n 'ref_azimuths' : self.opt.ref_azimuths,\n 'ref_radii' : self.opt.ref_radii,\n }\n\n # if 'clip' in self.guidance:\n # self.embeddings['clip']['image'] = self.guidance['clip'].get_img_embeds(self.rgb)\n # encoder image for clip\n if self.opt.use_clip:\n self.rgb_clip_embed = self.guidance.get_clip_img_embeds(self.rgb)\n # debug.\n scaler = torch.cuda.amp.GradScaler()\n image = torch.randn((1,3,512,512), device=self.device, requires_grad=True)\n with torch.autocast(device_type='cuda', dtype=torch.float16):\n loss = self.guidance.clip_loss(self.rgb_clip_embed, image)\n scaler.scale(loss).backward()\n else:\n self.rgb_clip_embed = None\n\n\n # ------------------------------\n @torch.no_grad()\n def match_known(self, **kwargs):\n self.model.eval()\n data = self.default_view_data\n rays_o = data['rays_o'] # [B, N, 3]\n rays_d = data['rays_d'] # [B, N, 3]\n mvp = data['mvp'] # [B, 4, 4]\n\n B, N = rays_o.shape[:2]\n H, W = data['H'], data['W']\n\n ambient_ratio = 1.0\n shading = self.opt.known_shading\n binarize = False\n bg_color = self.get_bg_color(\n self.opt.bg_color_known, B*N, rays_o.device)\n\n # add camera noise to avoid grid-like artifect\n # * (1 - self.global_step / self.opt.iters)\n noise_scale = self.opt.known_view_noise_scale\n rays_o = rays_o + torch.randn(3, device=self.device) * noise_scale\n rays_d = rays_d + torch.randn(3, device=self.device) * noise_scale\n\n outputs = self.model.render(rays_o, rays_d, mvp, H, W, staged=False, perturb=True,\n bg_color=bg_color, ambient_ratio=ambient_ratio, shading=shading, binarize=binarize)\n pred_rgb = outputs['image'].reshape(B, H, W, 3).permute(\n 0, 3, 1, 2).contiguous() # [1, 3, H, W]\n pred_mask = outputs['weights_sum'].reshape(B, 1, H, W)\n\n rgb_loss = self.opt.lambda_rgb * \\\n F.mse_loss(pred_rgb*self.opacity,\n self.rgb*self.opacity)\n mask_loss = self.opt.lambda_mask * \\\n F.mse_loss(pred_mask, self.mask.to(torch.float32).unsqueeze(0))\n return pred_rgb, pred_mask, rgb_loss, mask_loss\n\n def get_bg_color(self, bg_type, N, device):\n if bg_type is None:\n return None\n elif isinstance(bg_type, str):\n if bg_type == 'pixelnoise':\n bg_color = torch.rand((N, 3), device=device)\n elif bg_type == 'noise':\n bg_color = torch.rand((1, 3), device=device).repeat(N, 1)\n elif bg_type == 'white':\n bg_color = torch.ones((N, 3), device=device)\n return bg_color\n elif isinstance(bg_type, Tensor):\n bg_color = bg_color.to(device)\n return bg_color\n else:\n raise NotImplementedError(f\"{bg_type} is not implemented\")\n\n def train_step(self, data):\n # perform RGBD loss instead of SDS if is image-conditioned\n do_rgbd_loss = self.opt.images is not None and \\\n ((self.global_step < self.opt.known_iters) or (self.global_step % self.opt.known_view_interval == 0))\n\n # override random camera with fixed known camera\n if do_rgbd_loss:\n data = self.default_view_data\n\n # progressively relaxing view range\n if self.opt.progressive_view:\n r = min(1.0, 0.2 + self.global_step / (0.5 * self.opt.iters))\n self.opt.phi_range = [self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[0] * r,\n self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[1] * r]\n self.opt.theta_range = [self.opt.default_polar * (1 - r) + self.opt.full_theta_range[0] * r,\n self.opt.default_polar * (1 - r) + self.opt.full_theta_range[1] * r]\n self.opt.radius_range = [self.opt.default_radius * (1 - r) + self.opt.full_radius_range[0] * r,\n self.opt.default_radius * (1 - r) + self.opt.full_radius_range[1] * r]\n self.opt.fovy_range = [self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[0] * r,\n self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[1] * r]\n\n # progressively increase max_level\n if self.opt.progressive_level:\n self.model.max_level = min(1.0, 0.25 + self.global_step / (0.5 * self.opt.iters))\n\n rays_o = data['rays_o'] # [B, N, 3]\n rays_d = data['rays_d'] # [B, N, 3]\n mvp = data['mvp'] # [B, 4, 4]\n\n B, N = rays_o.shape[:2]\n H, W = data['H'], data['W']\n\n # When ref_data has B images > opt.batch_size\n if B > self.opt.batch_size:\n # choose batch_size images out of those B images\n choice = torch.randperm(B)[:self.opt.batch_size]\n B = self.opt.batch_size\n rays_o = rays_o[choice]\n rays_d = rays_d[choice]\n mvp = mvp[choice]\n\n if do_rgbd_loss:\n ambient_ratio = 1.0\n shading = 'lambertian' # use lambertian instead of albedo to get normal\n as_latent = False\n binarize = False\n bg_color = self.get_bg_color(\n self.opt.bg_color_known, B*N, rays_o.device)\n\n # add camera noise to avoid grid-like artifact\n if self.opt.known_view_noise_scale > 0:\n noise_scale = self.opt.known_view_noise_scale #* (1 - self.global_step / self.opt.iters)\n rays_o = rays_o + torch.randn(3, device=self.device) * noise_scale\n rays_d = rays_d + torch.randn(3, device=self.device) * noise_scale\n\n elif self.global_step < (self.opt.latent_iter_ratio * self.opt.iters): ## 0\n ambient_ratio = 1.0\n shading = 'normal'\n as_latent = True\n binarize = False\n bg_color = None\n\n else:\n if self.global_step < (self.opt.normal_iter_ratio * self.opt.iters): # 0.2\n ambient_ratio = 1.0\n shading = 'normal'\n elif self.global_step < (self.opt.textureless_iter_ratio * self.opt.iters): # 0\n ambient_ratio = 0.1 + 0.9 * random.random()\n shading = 'textureless'\n elif self.global_step < (self.opt.albedo_iter_ratio * self.opt.iters): # 0\n ambient_ratio = 1.0\n shading = 'albedo'\n else:\n # random shading\n ambient_ratio = 0.1 + 0.9 * random.random()\n rand = random.random()\n if rand > 0.8:\n shading = 'textureless'\n else:\n shading = 'lambertian'\n\n as_latent = False\n\n # random weights binarization (like mobile-nerf) [NOT WORKING NOW]\n # binarize_thresh = min(0.5, -0.5 + self.global_step / self.opt.iters)\n # binarize = random.random() < binarize_thresh\n binarize = False\n\n # random background\n rand = random.random()\n # ipdb.set_trace()\n if self.opt.bg_radius > 0 and rand > 0.5:\n bg_color = None # use bg_net\n else:\n bg_color = torch.rand(3).to(self.device) # single color random bg\n\n outputs = self.model.render(rays_o, rays_d, mvp, H, W, staged=False, perturb=True, bg_color=bg_color, ambient_ratio=ambient_ratio, shading=shading, binarize=binarize)\n pred_depth = outputs['depth'].reshape(B, 1, H, W)\n if self.opt.normalize_depth: \n pred_depth = nonzero_normalize_depth(pred_depth)\n pred_mask = outputs['weights_sum'].reshape(B, 1, H, W)\n if 'normal_image' in outputs:\n pred_normal = outputs['normal_image'].reshape(B, H, W, 3)\n else:\n pred_normal = None \n\n if as_latent:\n # abuse normal & mask as latent code for faster geometry initialization (ref: fantasia3D)\n pred_rgb = torch.cat([outputs['image'], outputs['weights_sum'].unsqueeze(-1)], dim=-1).reshape(B, H, W, 4).permute(0, 3, 1, 2).contiguous() # [B, 4, H, W]\n else:\n pred_rgb = outputs['image'].reshape(B, H, W, 3).permute(0, 3, 1, 2).contiguous() # [B, 3, H, W]\n \n # ipdb.set_trace()\n if 'image_wo_bg' in outputs:\n image_wo_bg = outputs['image_wo_bg'] + (1 - outputs['weights_sum']).unsqueeze(-1) * 1 # B,1,N,3\n if as_latent:\n # abuse normal & mask as latent code for faster geometry initialization (ref: fantasia3D)\n pred_rgb_wobg = torch.cat([image_wo_bg, outputs['weights_sum'].unsqueeze(-1)], dim=-1).reshape(B, H, W, 4).permute(0, 3, 1, 2).contiguous() # [B, 4, H, W]\n else:\n pred_rgb_wobg = image_wo_bg.reshape(B, H, W, 3).permute(0, 3, 1, 2).contiguous() # [B, 3, H, W]\n\n out_dict = {\n 'rgb': pred_rgb,\n 'depth': pred_depth,\n 'mask': pred_mask,\n 'normal': pred_normal,\n 'pred_rgb_wobg': pred_rgb_wobg,\n }\n\n # Loss\n # known view loss\n loss_rgb, loss_mask, loss_normal, loss_depth, loss_sds, loss_if, loss_zero123, loss_clip, loss_entropy, loss_opacity, loss_orient, loss_smooth, loss_smooth2d, loss_smooth3d, loss_mesh_normal, loss_mesh_lap = torch.zeros(16, device=self.device)\n # known view loss\n if do_rgbd_loss:\n gt_mask = self.mask # [B, H, W]\n gt_rgb = self.rgb # [B, 3, H, W]\n gt_opacity = self.opacity # [B, 1, H, W]\n gt_normal = self.normal # [B, H, W, 3]\n gt_depth = self.depth # [B, H, W]\n\n if len(gt_rgb) > self.opt.batch_size:\n gt_mask = gt_mask[choice]\n gt_rgb = gt_rgb[choice]\n gt_opacity = gt_opacity[choice]\n gt_normal = gt_normal[choice]\n gt_depth = gt_depth[choice]\n\n # color loss\n loss_rgb = self.opt.lambda_rgb * \\\n F.mse_loss(pred_rgb*gt_opacity, gt_rgb*gt_opacity)\n\n # mask loss\n loss_mask = self.opt.lambda_mask * F.mse_loss(pred_mask, gt_mask.to(torch.float32).unsqueeze(0))\n\n # normal loss\n if self.opt.lambda_normal > 0 and 'normal_image' in outputs and self.normal is not None:\n pred_normal = pred_normal[self.mask]\n lambda_normal = self.opt.lambda_normal * \\\n min(1, self.global_step / self.opt.iters) \n loss_normal = lambda_normal * \\\n (1 - F.cosine_similarity(pred_normal, self.normal).mean())/2\n\n # relative depth loss\n if self.opt.lambda_depth > 0 and self.depth is not None:\n valid_pred_depth = pred_depth[:, 0][self.mask]\n loss_depth = self.opt.lambda_depth * (1 - pearson_corrcoef(valid_pred_depth, self.depth))/2\n \n loss = loss_rgb + loss_mask + loss_normal + loss_depth\n # novel view loss\n else:\n save_guidance_path = os.path.join(self.opt.workspace, 'guidance', f'train_step{self.global_step}_guidance.jpg') if self.opt.save_guidance_every > 0 and self.global_step % self.opt.save_guidance_every ==0 else None\n if 'SD' in self.guidance:\n # interpolate text_z\n azimuth = data['azimuth'] # [-180, 180]\n\n # ENHANCE: remove loop to handle batch size > 1\n text_z = [] \n for b in range(azimuth.shape[0]):\n if azimuth[b] >= -90 and azimuth[b] < 90:\n if azimuth[b] >= 0:\n r = 1 - azimuth[b] / 90\n else:\n r = 1 + azimuth[b] / 90\n start_z = self.embeddings['SD']['front']\n end_z = self.embeddings['SD']['side']\n else:\n if azimuth[b] >= 0:\n r = 1 - (azimuth[b] - 90) / 90\n else:\n r = 1 + (azimuth[b] + 90) / 90\n start_z = self.embeddings['SD']['side']\n end_z = self.embeddings['SD']['back']\n text_z.append(r * start_z + (1 - r) * end_z)\n text_z = torch.stack(text_z, dim=0).transpose(0, 1).flatten(0, 1)\n # text_z_sds = text_z[:, :-1]\n text_z_sds = text_z \n loss_sds, _ = self.guidance['SD'].train_step(text_z_sds, pred_rgb, as_latent=as_latent, guidance_scale=self.opt.guidance_scale['SD'], grad_scale=self.opt.lambda_guidance['SD'],\n density=pred_mask if self.opt.gudiance_spatial_weighting else None, \n save_guidance_path=save_guidance_path\n )\n\n \n if 'IF' in self.guidance:\n # interpolate text_z\n azimuth = data['azimuth'] # [-180, 180]\n\n # ENHANCE: remove loop to handle batch size > 1\n # ENHANCE: remove loop to handle batch size > 1\n text_z = [] \n for b in range(azimuth.shape[0]):\n if azimuth[b] >= -90 and azimuth[b] < 90:\n if azimuth[b] >= 0:\n r = 1 - azimuth[b] / 90\n else:\n r = 1 + azimuth[b] / 90\n start_z = self.embeddings['IF']['front']\n end_z = self.embeddings['IF']['side']\n else:\n if azimuth[b] >= 0:\n r = 1 - (azimuth[b] - 90) / 90\n else:\n r = 1 + (azimuth[b] + 90) / 90\n start_z = self.embeddings['IF']['side']\n end_z = self.embeddings['IF']['back']\n text_z.append(r * start_z + (1 - r) * end_z)\n text_z = torch.stack(text_z, dim=0).transpose(0, 1).flatten(0, 1)\n text_z = torch.cat(text_z, dim=1).reshape(B, 2, start_z.shape[-2]-1, start_z.shape[-1]).transpose(0, 1).flatten(0, 1)\n loss_if = self.guidance['IF'].train_step(text_z, pred_rgb, guidance_scale=self.opt.guidance_scale['IF'], grad_scale=self.opt.lambda_guidance['IF'])\n\n if 'zero123' in self.guidance:\n\n polar = data['polar']\n azimuth = data['azimuth']\n radius = data['radius']\n\n # ipdb.set_trace()\n input_3dprior = pred_rgb\n loss_zero123 = self.guidance['zero123'].train_step(self.embeddings['zero123']['default'], input_3dprior, polar, azimuth, radius, guidance_scale=self.opt.guidance_scale['zero123'],\n as_latent=as_latent, grad_scale=self.opt.lambda_guidance['zero123'], save_guidance_path=save_guidance_path)\n\n if 'clip' in self.guidance:\n\n # empirical, far view should apply smaller CLIP loss\n lambda_guidance = 10 * (1 - abs(azimuth) / 180) * self.opt.lambda_guidance['clip']\n loss_clip = self.guidance['clip'].train_step(self.embeddings['clip'], pred_rgb, grad_scale=lambda_guidance)\n loss = loss_sds + loss_if + loss_zero123 + loss_clip\n\n # regularizations\n if not self.opt.dmtet:\n\n if self.opt.lambda_opacity > 0: # 0\n loss_opacity = self.opt.lambda_opacity * (outputs['weights_sum'] ** 2).mean()\n\n if self.opt.lambda_entropy > 0: # 1e-3\n lambda_entropy = self.opt.lambda_entropy * \\\n min(1, 2 * self.global_step / self.opt.iters)\n alphas = outputs['weights'].clamp(1e-5, 1 - 1e-5)\n # alphas = alphas ** 2 # skewed entropy, favors 0 over 1\n loss_entropy = lambda_entropy * (- alphas * torch.log2(alphas) -\n (1 - alphas) * torch.log2(1 - alphas)).mean()\n\n if self.opt.lambda_normal_smooth > 0 and 'normal_image' in outputs: # 0.5 # no image in sd-dreamfusion should be 0\n pred_vals = outputs['normal_image'].reshape(B, H, W, 3)\n # total-variation\n loss_smooth = (pred_vals[:, 1:, :, :] - pred_vals[:, :-1, :, :]).square().mean() + \\\n (pred_vals[:, :, 1:, :] -\n pred_vals[:, :, :-1, :]).square().mean()\n loss_smooth = self.opt.lambda_normal_smooth * loss_smooth\n\n if self.opt.lambda_normal_smooth2d > 0 and 'normal_image' in outputs: # 0.5 # no image in sd-dreamfusion should be 0\n pred_vals = outputs['normal_image'].reshape(\n B, H, W, 3).permute(0, 3, 1, 2).contiguous()\n smoothed_vals = TF.gaussian_blur(pred_vals, kernel_size=9)\n loss_smooth2d = self.opt.lambda_normal_smooth2d * F.mse_loss(pred_vals, smoothed_vals)\n\n if self.opt.lambda_orient > 0 and 'loss_orient' in outputs: # 1e-2\n loss_orient = self.opt.lambda_orient * outputs['loss_orient']\n \n if self.opt.lambda_3d_normal_smooth > 0 and 'loss_normal_perturb' in outputs: # 0\n loss_smooth3d = self.opt.lambda_3d_normal_smooth * outputs['loss_normal_perturb']\n\n loss += loss_opacity + loss_entropy + loss_smooth + loss_smooth2d + loss_orient + loss_smooth3d\n \n else:\n if self.opt.lambda_mesh_normal > 0:\n loss_mesh_normal = self.opt.lambda_mesh_normal * \\\n outputs['loss_normal']\n\n if self.opt.lambda_mesh_lap > 0:\n loss_mesh_lap = self.opt.lambda_mesh_lap * outputs['loss_lap']\n loss += loss_mesh_normal + loss_mesh_lap\n\n losses_dict = {\n 'loss': loss.item(),\n 'loss_sds': loss_sds.item(),\n 'loss_if': loss_if.item(),\n 'loss_zero123': loss_zero123.item(),\n 'loss_clip': loss_clip.item(),\n 'loss_rgb': loss_rgb.item(),\n 'loss_mask': loss_mask.item(),\n 'loss_normal': loss_normal.item(),\n 'loss_depth': loss_depth.item(),\n 'loss_opacity': loss_opacity.item(),\n 'loss_entropy': loss_entropy.item(),\n 'loss_smooth': loss_smooth.item(),\n 'loss_smooth2d': loss_smooth2d.item(),\n 'loss_smooth3d': loss_smooth3d.item(),\n 'loss_orient': loss_orient.item(),\n 'loss_mesh_normal': loss_mesh_normal.item(),\n 'loss_mesh_lap': loss_mesh_lap.item(),\n }\n\n \n if 'normal' in out_dict:\n out_dict['normal'] = out_dict['normal'].permute(0, 3, 1, 2).contiguous()\n\n # save for debug purpose\n if self.opt.save_train_every > 0 and self.global_step % self.opt.save_train_every == 0:\n image_save_path = os.path.join(self.workspace, 'train_debug',)\n os.makedirs(image_save_path, exist_ok=True)\n for key, value in out_dict.items():\n if value is not None:\n value = ((value - value.min()) / (value.max() - value.min() + 1e-6)).detach().mul(255).to(torch.uint8)\n try:\n save_tensor2image(value, os.path.join(image_save_path, f'train_{self.global_step:06d}_{key}.jpg'), channel_last=False) \n except:\n pass\n return loss, losses_dict, out_dict \n\n def post_train_step(self):\n\n # unscale grad before modifying it!\n # ref: https://pytorch.org/docs/stable/notes/amp_examples.html#gradient-clipping\n self.scaler.unscale_(self.optimizer)\n\n # clip grad\n if self.opt.grad_clip >= 0:\n torch.nn.utils.clip_grad_value_(self.model.parameters(), self.opt.grad_clip)\n\n if not self.opt.dmtet and self.opt.backbone == 'grid':\n\n if self.opt.lambda_tv > 0:\n lambda_tv = min(1.0, self.global_step / (0.5 * self.opt.iters)) * self.opt.lambda_tv\n self.model.encoder.grad_total_variation(lambda_tv, None, self.model.bound)\n if self.opt.lambda_wd > 0:\n self.model.encoder.grad_weight_decay(self.opt.lambda_wd)\n\n\n def eval_step(self, data):\n\n rays_o = data['rays_o'] # [B, N, 3]\n rays_d = data['rays_d'] # [B, N, 3]\n mvp = data['mvp']\n\n B, N = rays_o.shape[:2]\n H, W = data['H'], data['W']\n\n shading = data['shading'] if 'shading' in data else 'lambertian' \n ambient_ratio = data['ambient_ratio'] if 'ambient_ratio' in data else 1.0\n light_d = data['light_d'] if 'light_d' in data else None\n\n outputs = self.model.render(rays_o, rays_d, mvp, H, W, staged=True, perturb=False, bg_color=None, light_d=light_d, ambient_ratio=ambient_ratio, shading=shading)\n pred_rgb = outputs['image'].reshape(B, H, W, 3)\n pred_depth = outputs['depth'].reshape(B, H, W, 1)\n if self.opt.normalize_depth: \n pred_depth = nonzero_normalize_depth(pred_depth)\n if 'normal_image' in outputs:\n pred_normal = outputs['normal_image'].reshape(B, H, W, 3)\n else:\n pred_normal = None \n out_dict = {\n shading: pred_rgb,\n 'depth': pred_depth,\n 'normal_image': pred_normal,\n }\n # dummy\n loss = torch.zeros([1], device=pred_rgb.device, dtype=pred_rgb.dtype)\n return out_dict, loss\n\n def test_step(self, data, bg_color=None, perturb=False, shading='lambertian'):\n rays_o = data['rays_o'] # [B, N, 3]\n rays_d = data['rays_d'] # [B, N, 3]\n mvp = data['mvp']\n\n B, N = rays_o.shape[:2]\n H, W = data['H'], data['W']\n\n bg_color = self.get_bg_color(bg_color, B*N, rays_o.device)\n\n shading = data['shading'] if 'shading' in data else shading \n ambient_ratio = data['ambient_ratio'] if 'ambient_ratio' in data else 1.0\n light_d = data['light_d'] if 'light_d' in data else None\n\n outputs = self.model.render(rays_o, rays_d, mvp, H, W, staged=True, perturb=perturb, light_d=light_d, ambient_ratio=ambient_ratio, shading=shading, bg_color=bg_color)\n\n pred_rgb = outputs['image'].reshape(B, H, W, 3)\n pred_depth = outputs['depth'].reshape(B, H, W, 1)\n pred_mask = outputs['weights_sum'].reshape(B, H, W, 1)\n # if self.opt.normalize_depth: \n pred_depth = nonzero_normalize_depth(pred_depth)\n if 'normal_image' in outputs:\n pred_normal = outputs['normal_image'].reshape(B, H, W, 3)\n pred_normal = pred_normal * pred_mask + (1.0 - pred_mask) \n else:\n pred_normal = None \n out_dict = {\n shading: pred_rgb,\n 'depth': pred_depth,\n 'normal_image': pred_normal,\n 'mask': pred_mask,\n }\n return out_dict\n\n def save_mesh(self, loader=None, save_path=None):\n\n if save_path is None:\n save_path = os.path.join(self.workspace, 'mesh')\n\n logger.info(f\"==> Saving mesh to {save_path}\")\n\n os.makedirs(save_path, exist_ok=True)\n\n self.model.export_mesh(save_path, resolution=self.opt.mcubes_resolution, decimate_target=self.opt.decimate_target)\n\n logger.info(f\"==> Finished saving mesh.\")\n\n ### ------------------------------\n\n def train(self, train_loader, valid_loader, test_loader, max_epochs):\n\n if self.use_tensorboard and self.local_rank == 0:\n self.writer = SummaryWriter(\n os.path.join(self.workspace, \"run\", self.name))\n\n # init from nerf should be performed after Shap-E, since Shap-E will rescale dmtet\n if self.opt.dmtet and (self.opt.init_ckpt and os.path.exists(self.opt.init_ckpt)):\n reset_scale = False if self.opt.use_shape else True\n old_sdf = self.model.get_sdf_from_nerf(reset_scale)\n if not self.opt.tet_mlp:\n self.model.dmtet.init_tet_from_sdf(old_sdf)\n self.test(valid_loader, name=f'init_ckpt', write_video=False, save_each_frame=False, subfolder='check_init')\n else:\n old_sdf = None\n \n if self.opt.use_shape and self.opt.dmtet:\n os.makedirs(os.path.join(self.opt.workspace, 'shape'), exist_ok=True)\n best_loss = torch.inf\n best_idx = 0\n for idx, (sdf, color) in enumerate(zip(self.opt.rpsts, self.opt.colors)):\n self.model.init_tet_from_sdf_color(sdf)\n pred_rgb, pred_mask, rgb_loss, mask_loss = self.match_known()\n best_loss = min(best_loss, mask_loss)\n if best_loss == mask_loss:\n best_idx = idx\n logger.info(f\"==> Current best match shape known sdf idx: {best_idx}\")\n save_tensor2image(pred_mask, os.path.join(self.opt.workspace, 'shape', f\"match_shape_known_{idx}_rgb.jpg\"), channel_last=False)\n self.test(valid_loader, name=f'idx_{idx}', write_video=False, save_each_frame=False, subfolder='check_init')\n \n sdf = self.opt.rpsts[best_idx]\n self.model.init_tet_from_sdf_color(sdf, self.opt.colors[best_idx])\n self.test(valid_loader, name=f'shape_only', write_video=False, save_each_frame=False, subfolder='check_init')\n\n # Enable mixture model\n if self.opt.base_mesh:\n logger.info(f\"==> Enable mixture model with base mesh {self.opt.base_mesh}\")\n mesh_sdf = self.model.dmtet.get_sdf_from_mesh(self.opt.base_mesh)\n sdf = (mesh_sdf.clamp(0, 1) + sdf.clamp(0,1) ).clamp(0, 1)\n\n if old_sdf is not None:\n sdf = (sdf.clamp(0, 1) + old_sdf.clamp(0, 1)).clamp(0, 1)\n\n self.model.init_tet_from_sdf_color(sdf, self.opt.colors[best_idx])\n self.test(valid_loader, name=f'shape_merge', write_video=False, save_each_frame=False, subfolder='check_init')\n\n del best_loss, best_idx, pred_rgb, pred_mask, rgb_loss, mask_loss\n self.opt.rpsts = None\n gc.collect()\n torch.cuda.empty_cache()\n\n\n start_t = time.time()\n\n for epoch in range(self.epoch + 1, max_epochs + 1):\n self.epoch = epoch\n\n self.train_one_epoch(train_loader, max_epochs)\n\n if self.workspace is not None and self.local_rank == 0:\n if self.epoch % self.opt.save_interval == 0:\n self.save_checkpoint(full=True, best=False)\n\n if self.epoch % self.opt.eval_interval == 0:\n self.evaluate_one_epoch(valid_loader) \n # best_save = True if self.epoch % self.opt.save_interval else False\n self.save_checkpoint(full=False, best=True)\n\n if self.epoch % self.opt.test_interval == 0 or self.epoch == max_epochs:\n self.test(test_loader, img_folder='images' if self.epoch == max_epochs else f'images_ep{self.epoch:04d}')\n\n end_t = time.time()\n\n self.total_train_t = end_t - start_t + self.total_train_t\n\n logger.info(f\"[INFO] training takes {(self.total_train_t)/ 60:.4f} minutes.\")\n\n if self.use_tensorboard and self.local_rank == 0:\n self.writer.close()\n\n def evaluate(self, loader, name=None):\n self.use_tensorboard, use_tensorboard = False, self.use_tensorboard\n self.evaluate_one_epoch(loader, name)\n self.use_tensorboard = use_tensorboard\n\n def test(self, loader, save_path=None, name=None, \n write_video=True, save_each_frame=True, shading='lambertian', \n subfolder='results', img_folder='images'\n ):\n\n if save_path is None:\n save_path = os.path.join(self.workspace, subfolder)\n image_save_path = os.path.join(self.workspace, subfolder, img_folder)\n\n if name is None:\n name = f'{self.name}_ep{self.epoch:04d}'\n\n os.makedirs(save_path, exist_ok=True)\n os.makedirs(image_save_path, exist_ok=True)\n\n logger.info(f\"==> Start Test, saving {shading} results to {save_path}\")\n\n pbar = tqdm.tqdm(total=len(loader) * loader.batch_size, bar_format='{percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]')\n self.model.eval()\n\n all_outputs = {} \n with torch.no_grad():\n for i, data in enumerate(loader):\n # if i > 20:\n # break\n with torch.cuda.amp.autocast(enabled=self.fp16):\n outputs = self.test_step(data, bg_color=self.opt.bg_color_test, shading=shading)\n for key, value in outputs.items():\n if value is not None:\n value = ((value - value.min()) / (value.max() - value.min() + 1e-6)).detach().mul(255).to(torch.uint8)\n if save_each_frame:\n save_tensor2image(value, os.path.join(image_save_path, f'{name}_{i:04d}_{key}.jpg'), channel_last=True) \n if key not in all_outputs.keys():\n all_outputs[key] = []\n all_outputs[key].append(value)\n pbar.update(loader.batch_size)\n\n for key, value in all_outputs.items():\n all_outputs[key] = torch.cat(value, dim=0) # B,H,W,C, B is all the pose results\n # if video -> B,F,H,W,C \n \n if write_video:\n for key, value in all_outputs.items():\n # current version torchvision does not support writing a single-channel video\n # torchvision.io.write_video(os.path.join(save_path, f'{name}_{key}.mp4'), all_outputs[key].detach().cpu(), fps=25)\n # imageio.mimwrite(os.path.join(save_path, f'{name}_{key}.mp4'), all_outputs[key].detach().cpu().numpy(), fps=25, quality=8, macro_block_size=1)\n one_video_save(os.path.join(save_path, f'{name}_{key}.mp4'), all_outputs[key])\n\n for key, value in all_outputs.items():\n save_tensor2image(value, os.path.join(save_path, f'{name}_{key}_grid.jpg'), channel_last=True)\n logger.info(f\"==> Finished Test.\")\n\n # [GUI] train text step.\n def train_gui(self, train_loader, step=16):\n\n self.model.train()\n\n total_loss = torch.tensor([0], dtype=torch.float32, device=self.device)\n\n loader = iter(train_loader)\n\n for _ in range(step):\n\n # mimic an infinite loop dataloader (in case the total dataset is smaller than step)\n try:\n data = next(loader)\n except StopIteration:\n loader = iter(train_loader)\n data = next(loader)\n\n # update grid every 16 steps\n if self.model.cuda_ray and self.global_step % self.opt.update_extra_interval == 0:\n with torch.cuda.amp.autocast(enabled=self.fp16):\n self.model.update_extra_state()\n\n self.global_step += 1\n\n self.optimizer.zero_grad()\n\n with torch.cuda.amp.autocast(enabled=self.fp16):\n loss, loss_dicts, outputs = self.train_step(data)\n\n self.scaler.scale(loss).backward()\n self.post_train_step()\n self.scaler.step(self.optimizer)\n self.scaler.update()\n\n if self.scheduler_update_every_step:\n self.lr_scheduler.step()\n\n self.loss_meter.update(loss_dicts)\n \n if self.ema is not None:\n self.ema.update()\n\n average_loss = self.loss_meter.meters['loss'].avg\n\n if not self.scheduler_update_every_step:\n if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):\n self.lr_scheduler.step(average_loss)\n else:\n self.lr_scheduler.step()\n\n outputs = {\n 'loss': average_loss,\n 'lr': self.optimizer.param_groups[0]['lr'],\n }\n\n return outputs\n\n\n # [GUI] test on a single image\n def test_gui(self, pose, intrinsics, mvp, W, H, bg_color=None, spp=1, downscale=1, light_d=None, ambient_ratio=1.0, shading='albedo'):\n\n # render resolution (may need downscale to for better frame rate)\n rH = int(H * downscale)\n rW = int(W * downscale)\n intrinsics = intrinsics * downscale\n\n pose = torch.from_numpy(pose).unsqueeze(0).to(self.device)\n mvp = torch.from_numpy(mvp).unsqueeze(0).to(self.device)\n\n rays = get_rays(pose, intrinsics, rH, rW, -1)\n\n # from degree theta/phi to 3D normalized vec\n light_d = np.deg2rad(light_d)\n light_d = np.array([\n np.sin(light_d[0]) * np.sin(light_d[1]),\n np.cos(light_d[0]),\n np.sin(light_d[0]) * np.cos(light_d[1]),\n ], dtype=np.float32)\n light_d = torch.from_numpy(light_d).to(self.device)\n\n data = {\n 'rays_o': rays['rays_o'],\n 'rays_d': rays['rays_d'],\n 'mvp': mvp,\n 'H': rH,\n 'W': rW,\n 'light_d': light_d,\n 'ambient_ratio': ambient_ratio,\n 'shading': shading,\n }\n\n self.model.eval()\n\n if self.ema is not None:\n self.ema.store()\n self.ema.copy_to()\n\n with torch.no_grad():\n with torch.cuda.amp.autocast(enabled=self.fp16):\n # here spp is used as perturb random seed!\n outputs = self.test_step(\n data, bg_color=bg_color, perturb=False if spp == 1 else spp)\n\n if self.ema is not None:\n self.ema.restore()\n\n # interpolation to the original resolution\n if downscale != 1:\n # have to permute twice with torch...\n outputs[shading] = F.interpolate(outputs[shading].permute(0, 3, 1, 2), size=(\n H, W), mode='nearest').permute(0, 2, 3, 1).contiguous()\n outputs['depth'] = F.interpolate(outputs['depth'].unsqueeze(\n 1), size=(H, W), mode='nearest').squeeze(1)\n\n if outputs['normal_imagea'] is not None:\n outputs['normal_image'] = F.interpolate(outputs['normal_image'].unsqueeze(1), size=(H, W), mode='nearest').squeeze(1)\n\n return outputs\n\n def train_one_epoch(self, loader, max_epochs):\n logger.info(f\"==> [{time.strftime('%Y-%m-%d_%H-%M-%S')}] Start Training {self.workspace} Epoch {self.epoch}/{max_epochs}, lr={self.optimizer.param_groups[0]['lr']:.6f} ...\")\n\n if self.local_rank == 0 and self.report_metric_at_train:\n for metric in self.metrics:\n metric.clear()\n\n self.model.train()\n\n # distributedSampler: must call set_epoch() to shuffle indices across multiple epochs\n # ref: https://pytorch.org/docs/stable/data.html\n if self.world_size > 1:\n loader.sampler.set_epoch(self.epoch)\n\n self.local_step = 0\n\n for data in loader:\n\n # update grid every 16 steps\n if (self.model.cuda_ray or self.model.taichi_ray) and self.global_step % self.opt.update_extra_interval == 0:\n with torch.cuda.amp.autocast(enabled=self.fp16):\n self.model.update_extra_state()\n \n # Update grid\n if self.opt.grid_levels_mask > 0:\n if self.global_step > self.opt.grid_levels_mask_iters:\n self.model.grid_levels_mask = 0\n else:\n self.model.grid_levels_mask = self.opt.grid_levels_mask\n\n self.local_step += 1\n self.global_step += 1\n\n ## update optimizer\n if self.global_step == self.opt.lr_time_iter:\n # ipdb.set_trace()\n grad_vars = self.model.get_params(self.opt.lr, self.opt.lr_scale_time)\n self.optimizer = torch.optim.Adam(\n grad_vars, betas=(0.9, 0.99), eps=1e-15\n )\n self.lr_scheduler.optimizer = self.optimizer\n\n self.optimizer.zero_grad()\n # ipdb.set_trace()\n with torch.cuda.amp.autocast(enabled=self.fp16):\n loss, losses_dict, outputs = self.train_step(data)\n\n # hooked grad clipping for RGB space\n if self.opt.grad_clip_rgb >= 0:\n def _hook(grad):\n if self.opt.fp16:\n # correctly handle the scale\n grad_scale = self.scaler._get_scale_async()\n return grad.clamp(grad_scale * -self.opt.grad_clip_rgb, grad_scale * self.opt.grad_clip_rgb)\n else:\n return grad.clamp(-self.opt.grad_clip_rgb, self.opt.grad_clip_rgb)\n outputs['rgb'].register_hook(_hook)\n # if (self.global_step <= self.opt.known_iters or self.global_step % self.opt.known_view_interval == 0) and self.opt.image is not None and self.opt.joint_known_unknown and known_rgbs is not None:\n # known_rgbs.register_hook(_hook)\n # pred_rgbs.retain_grad()\n\n self.scaler.scale(loss).backward()\n # ipdb.set_trace()\n self.post_train_step()\n self.scaler.step(self.optimizer)\n self.scaler.update()\n\n if self.scheduler_update_every_step:\n self.lr_scheduler.step()\n \n\n self.loss_meter.update(losses_dict)\n\n # last_losses_dict = losses_dict\n # last_grad = [layer.weight.grad.data.detach() for layer in self.model.deformation_net.net]\n\n if self.local_rank == 0:\n # if self.report_metric_at_train:\n # for metric in self.metrics:\n # metric.update(preds, truths)\n\n if self.use_tensorboard:\n\n for key, val in losses_dict.items():\n self.writer.add_scalar(\n f\"train/{key}\", val, self.global_step) \n\n self.writer.add_scalar(\n \"train/lr\", self.optimizer.param_groups[0]['lr'], self.global_step)\n\n if self.global_step % self.opt.log_every == 0:\n strings = f\"==> Train [Step] {self.global_step}/{self.opt.iters}\"\n for key, value in losses_dict.items():\n strings += f\", {key}={value:.4f}\"\n logger.info(strings)\n strings = f\"==> Train [Avg] {self.global_step}/{self.opt.iters}\"\n for key in self.loss_meter.meters.keys():\n strings += f\", {key}={self.loss_meter.meters[key].avg:.4f}\"\n logger.info(strings)\n\n if self.ema is not None:\n self.ema.update()\n \n average_loss = self.loss_meter.meters['loss'].avg\n self.stats[\"loss\"].append(average_loss)\n\n if self.local_rank == 0:\n # pbar.close()\n if self.report_metric_at_train:\n for metric in self.metrics:\n logger.info(metric.report(), style=\"red\")\n if self.use_tensorboard:\n metric.write(self.writer, self.epoch, prefix=\"train\")\n metric.clear()\n\n if not self.scheduler_update_every_step:\n if isinstance(self.lr_scheduler, torch.optim.lr_scheduler.ReduceLROnPlateau):\n self.lr_scheduler.step(average_loss)\n else:\n self.lr_scheduler.step()\n\n\n # Visualize Training\n if self.local_rank == 0:\n # save image\n save_path = os.path.join(\n self.workspace, 'training')\n os.makedirs(save_path, exist_ok=True)\n name = f'train_{self.name}_ep{self.epoch:04d}'\n for key, value in outputs.items():\n value = ((value - value.min()) / (value.max() - value.min() + 1e-6)).detach().mul(255).to(torch.uint8)\n save_tensor2image(value, os.path.join(save_path, f'{name}_{key}.jpg'), channel_last=False) \n gpu_mem = get_GPU_mem()[0]\n logger.info(f\"==> [Finished Epoch {self.epoch}/{max_epochs}. GPU={gpu_mem:.1f}GB.\")\n\n def evaluate_one_epoch(self, loader, name=None):\n logger.info(f\"++> Evaluate {self.workspace} at epoch {self.epoch} ...\")\n\n if name is None:\n name = f'{self.name}_ep{self.epoch:04d}'\n\n total_loss = 0\n if self.local_rank == 0:\n for metric in self.metrics:\n metric.clear()\n\n self.model.eval()\n \n if self.ema is not None:\n self.ema.store()\n self.ema.copy_to()\n\n if self.local_rank == 0:\n pbar = tqdm.tqdm(total=len(loader) * loader.batch_size, bar_format='{desc}: {percentage:3.0f}% {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]')\n\n with torch.no_grad():\n self.local_step = 0\n \n all_outputs = {} \n for data in loader:\n self.local_step += 1\n\n with torch.cuda.amp.autocast(enabled=self.fp16):\n outputs, loss = self.eval_step(data)\n\n # all_gather/reduce the statistics (NCCL only support all_*)\n if self.world_size > 1:\n dist.all_reduce(loss, op=dist.ReduceOp.SUM)\n loss = loss / self.world_size\n \n for key, value in outputs.items():\n if value is not None:\n dist.all_gather(outputs[key])\n outputs[key] = torch.cat(outputs[key], dim=0)\n \n loss_val = loss.item()\n total_loss += loss_val\n\n # only rank = 0 will perform evaluation.\n if self.local_rank == 0:\n\n # save image\n save_path = os.path.join(\n self.workspace, 'validation')\n\n # logger.info(f\"==> Saving validation image to {save_path}\")\n os.makedirs(save_path, exist_ok=True)\n\n for key, value in outputs.items():\n if value is not None:\n value = ((value - value.min()) / (value.max() - value.min() + 1e-6)).detach().mul(255).to(torch.uint8)\n # save_tensor2image(value, os.path.join(save_path, f'{name}_{self.local_step:04d}_{key}.jpg')) \n if key not in all_outputs.keys():\n all_outputs[key] = []\n all_outputs[key].append(value)\n\n pbar.set_description(\n f\"loss={loss_val:.4f} ({total_loss/self.local_step:.4f})\")\n pbar.update(loader.batch_size)\n\n\n average_loss = total_loss / self.local_step\n self.stats[\"valid_loss\"].append(average_loss)\n\n if self.local_rank == 0:\n pbar.close()\n if not self.use_loss_as_metric and len(self.metrics) > 0:\n result = self.metrics[0].measure()\n self.stats[\"results\"].append(result if self.best_mode == 'min' else - result) # if max mode, use -result\n else:\n self.stats[\"results\"].append(average_loss) # if no metric, choose best by min loss\n\n for metric in self.metrics:\n logger.info(metric.report(), style=\"blue\")\n if self.use_tensorboard:\n metric.write(self.writer, self.epoch, prefix=\"evaluate\")\n metric.clear()\n \n for key, value in all_outputs.items():\n all_outputs[key] = torch.cat(value, dim=0)\n save_tensor2image(all_outputs[key], os.path.join(save_path, f'{name}_{key}.jpg'), channel_last=True)\n if self.ema is not None:\n self.ema.restore()\n\n logger.info(f\"++> Evaluate epoch {self.epoch} Finished.\")\n\n def save_checkpoint(self, name=None, full=False, best=False):\n\n if name is None:\n name = f'{self.name}_ep{self.epoch:04d}'\n\n state = {\n 'epoch': self.epoch,\n 'global_step': self.global_step,\n 'stats': self.stats,\n }\n\n if self.model.cuda_ray:\n state['mean_density'] = self.model.mean_density\n\n if self.opt.dmtet:\n state['tet_scale'] = self.model.dmtet.tet_scale.cpu().numpy()\n\n if full:\n state['optimizer'] = self.optimizer.state_dict()\n state['lr_scheduler'] = self.lr_scheduler.state_dict()\n state['scaler'] = self.scaler.state_dict()\n if self.ema is not None:\n state['ema'] = self.ema.state_dict()\n\n if not best:\n\n state['model'] = self.model.state_dict()\n\n file_path = f\"{name}.pth\"\n\n self.stats[\"checkpoints\"].append(file_path)\n\n if len(self.stats[\"checkpoints\"]) > self.max_keep_ckpt:\n old_ckpt = os.path.join(\n self.opt.ckpt_path, self.stats[\"checkpoints\"].pop(0))\n if os.path.exists(old_ckpt):\n os.remove(old_ckpt)\n\n torch.save(state, os.path.join(self.opt.ckpt_path, file_path))\n\n \n \n\n else:\n if len(self.stats[\"results\"]) > 0:\n # always save best since loss cannot reflect performance.\n if True:\n # logger.info(f\"[INFO] New best result: {self.stats['best_result']} --> {self.stats['results'][-1]}\")\n # self.stats[\"best_result\"] = self.stats[\"results\"][-1]\n\n # save ema results\n if self.ema is not None:\n self.ema.store()\n self.ema.copy_to()\n\n state['model'] = self.model.state_dict()\n\n if self.ema is not None:\n self.ema.restore()\n\n torch.save(state, self.opt.best_path)\n\n\n else:\n logger.info(\n f\"[WARN] no evaluated results found, skip saving best checkpoint.\")\n\n def load_checkpoint(self, checkpoint=None, model_only=False):\n if checkpoint is None:\n checkpoint_list = sorted(glob.glob(f'{self.opt.ckpt_path}/*.pth'))\n if checkpoint_list:\n checkpoint = checkpoint_list[-1]\n logger.info(f\"[INFO] Latest checkpoint is {checkpoint}\")\n else:\n logger.info(\n \"[WARN] No checkpoint found, model randomly initialized.\")\n return\n\n checkpoint_dict = torch.load(checkpoint, map_location=self.device)\n\n if 'model' not in checkpoint_dict:\n self.model.load_state_dict(checkpoint_dict)\n logger.info(\"[INFO] loaded model.\")\n return\n\n missing_keys, unexpected_keys = self.model.load_state_dict(checkpoint_dict['model'], strict=False)\n logger.info(\"[INFO] loaded model.\")\n if len(missing_keys) > 0:\n logger.info(f\"[WARN] missing keys: {missing_keys}\")\n if len(unexpected_keys) > 0:\n logger.info(f\"[WARN] unexpected keys: {unexpected_keys}\")\n\n if self.ema is not None and 'ema' in checkpoint_dict:\n try:\n self.ema.load_state_dict(checkpoint_dict['ema'])\n logger.info(\"[INFO] loaded EMA.\")\n except:\n logger.info(\"[WARN] failed to loaded EMA.\")\n\n if self.model.cuda_ray:\n if 'mean_density' in checkpoint_dict:\n self.model.mean_density = checkpoint_dict['mean_density']\n\n if self.opt.dmtet:\n if 'tet_scale' in checkpoint_dict:\n new_scale = torch.from_numpy(\n checkpoint_dict['tet_scale']).to(self.device)\n self.model.dmtet.verts *= new_scale / self.model.dmtet.tet_scale\n self.model.dmtet.tet_scale = new_scale\n # self.model.init_tet() \n if model_only:\n return\n\n self.stats = checkpoint_dict['stats']\n self.epoch = checkpoint_dict['epoch']\n self.global_step = checkpoint_dict['global_step']\n logger.info(\n f\"[INFO] load at epoch {self.epoch}, global step {self.global_step}\")\n\n if self.optimizer and 'optimizer' in checkpoint_dict:\n try:\n self.optimizer.load_state_dict(checkpoint_dict['optimizer'])\n logger.info(\"[INFO] loaded optimizer.\")\n except:\n logger.info(\"[WARN] Failed to load optimizer.\")\n\n if self.lr_scheduler and 'lr_scheduler' in checkpoint_dict:\n try:\n self.lr_scheduler.load_state_dict(checkpoint_dict['lr_scheduler'])\n logger.info(\"[INFO] loaded scheduler.\")\n except:\n logger.info(\"[WARN] Failed to load scheduler.\")\n\n if self.scaler and 'scaler' in checkpoint_dict:\n try:\n self.scaler.load_state_dict(checkpoint_dict['scaler'])\n logger.info(\"[INFO] loaded scaler.\")\n except:\n logger.info(\"[WARN] Failed to load scaler.\")" }, { "identifier": "custom_meshgrid", "path": "nerf/utils.py", "snippet": "def custom_meshgrid(*args):\n # ref: https://pytorch.org/docs/stable/generated/torch.meshgrid.html?highlight=meshgrid#torch.meshgrid\n if pver.parse(torch.__version__) < pver.parse('1.10'):\n return torch.meshgrid(*args)\n else:\n return torch.meshgrid(*args, indexing='ij')" }, { "identifier": "safe_normalize", "path": "nerf/utils.py", "snippet": "def safe_normalize(x, eps=1e-20):\n return x / torch.sqrt(torch.clamp(torch.sum(x * x, -1, keepdim=True), min=eps))" } ]
import os import glob import tqdm import random import logging import gc import numpy as np import imageio, imageio_ffmpeg import time import cv2 import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F import torch.distributed as dist import torchvision.transforms.functional as TF import ipdb import copy from torch import Tensor from torch.utils.tensorboard import SummaryWriter from torchvision.utils import make_grid from torchmetrics.functional import pearson_corrcoef from nerf.utils import save_tensor2image, nonzero_normalize_depth, Trainer from einops import rearrange from nerf.utils import custom_meshgrid, safe_normalize from dnerf.network_4dgrid import NeRFNetwork
21,277
save_guidance_zero123_path = os.path.join(self.opt.workspace, 'guidance_zero123', f'train_step{self.global_step}_guidance.jpg') if self.opt.save_guidance_every > 0 and self.global_step % self.opt.save_guidance_every ==0 else None polar = data['polar'] azimuth = data['azimuth'] radius = data['radius'] # input_3dprior B,3,H,W # ipdb.set_trace() input_3dprior = pred_rgb[:,0] loss_zero123 = self.guidance['zero123'].train_step(self.embeddings['zero123']['default'], input_3dprior, polar, azimuth, radius, guidance_scale=self.opt.guidance_scale['zero123'], as_latent=as_latent, grad_scale=self.opt.lambda_guidance['zero123'], save_guidance_path=save_guidance_zero123_path) if 'clip' in self.guidance: # empirical, far view should apply smaller CLIP loss lambda_guidance = 10 * (1 - abs(azimuth) / 180) * self.opt.lambda_guidance['clip'] loss_clip = self.guidance['clip'].train_step(self.embeddings['clip'], pred_rgb, grad_scale=lambda_guidance) loss += loss_sds + loss_if + loss_zero123 + loss_clip + loss_sr + loss_cn # regularizations if not self.opt.dmtet: if self.opt.lambda_opacity > 0: # 0 loss_opacity = self.opt.lambda_opacity * (outputs['weights_sum'] ** 2).mean() reg_losses_dict['loss_opacity'] = loss_opacity.item() if self.opt.lambda_entropy > 0: # 1e-3 lambda_entropy = self.opt.lambda_entropy * \ min(1, 2 * self.global_step / self.opt.iters) alphas = outputs['weights'].clamp(1e-5, 1 - 1e-5) # alphas = alphas ** 2 # skewed entropy, favors 0 over 1 loss_entropy = lambda_entropy * (- alphas * torch.log2(alphas) - (1 - alphas) * torch.log2(1 - alphas)).mean() reg_losses_dict['loss_entropy'] = loss_entropy.item() if self.opt.lambda_normal_smooth > 0 and 'normal_image' in outputs: # 0.5 # no image in sd-dreamfusion should be 0 pred_vals = outputs['normal_image'].reshape(-1, H, W, 3) # BF,H,W,3 # total-variation loss_smooth = (pred_vals[:, 1:, :, :] - pred_vals[:, :-1, :, :]).square().mean() + \ (pred_vals[:, :, 1:, :] - pred_vals[:, :, :-1, :]).square().mean() loss_smooth = self.opt.lambda_normal_smooth * loss_smooth reg_losses_dict['loss_smooth'] = loss_smooth.item() if self.opt.lambda_normal_smooth2d > 0 and 'normal_image' in outputs: # 0.5 # no image in sd-dreamfusion should be 0 pred_vals = outputs['normal_image'].reshape( -1, H, W, 3).permute(0,3,1,2).contiguous() # BF,3,H,W smoothed_vals = TF.gaussian_blur(pred_vals, kernel_size=9) loss_smooth2d = self.opt.lambda_normal_smooth2d * F.mse_loss(pred_vals, smoothed_vals) reg_losses_dict['loss_smooth2d'] = loss_smooth2d.item() if self.opt.lambda_orient > 0 and 'loss_orient' in outputs: # 1e-2 loss_orient = self.opt.lambda_orient * outputs['loss_orient'].mean() reg_losses_dict['loss_orient'] = loss_orient.item() if self.opt.lambda_3d_normal_smooth > 0 and 'loss_normal_perturb' in outputs: # 0 loss_smooth3d = self.opt.lambda_3d_normal_smooth * outputs['loss_normal_perturb'].mean() reg_losses_dict['loss_smooth3d'] = loss_smooth3d.item() if self.opt.lambda_time_tv > 0: if self.opt.backbone == 'grid4d': loss_time_tv = self.opt.lambda_time_tv * self.model.TV_loss() reg_losses_dict['loss_time_tv'] = loss_time_tv.item() loss += loss_opacity + loss_entropy + loss_smooth + loss_smooth2d + loss_orient + loss_smooth3d + loss_time_tv + loss_canonical else: if self.opt.lambda_mesh_normal > 0: loss_mesh_normal = self.opt.lambda_mesh_normal * \ outputs['loss_normal'].mean() reg_losses_dict['loss_mesh_normal'] = loss_mesh_normal.item() if self.opt.lambda_mesh_lap > 0: loss_mesh_lap = self.opt.lambda_mesh_lap * outputs['loss_lap'].mean() reg_losses_dict['loss_mesh_lap'] = loss_mesh_lap.item() loss += loss_mesh_normal + loss_mesh_lap losses_dict = { 'loss': loss.item(), 'loss_sds': loss_sds.item(), 'loss_sr': loss_sr.item(), 'loss_cn': loss_cn.item(), # 'loss_if': loss_if.item(), 'loss_zero123': loss_zero123.item(), # 'loss_clip': loss_clip.item(), 'loss_rgb': loss_rgb.item(), 'loss_mask': loss_mask.item(), 'loss_normal': loss_normal.item(), 'loss_depth': loss_depth.item(), # 'loss_opacity': loss_opacity.item(), # 'loss_entropy': loss_entropy.item(), # 'loss_smooth': loss_smooth.item(), # 'loss_smooth2d': loss_smooth2d.item(), # 'loss_smooth3d': loss_smooth3d.item(), # 'loss_orient': loss_orient.item(), # 'loss_mesh_normal': loss_mesh_normal.item(), # 'loss_mesh_lap': loss_mesh_lap.item(), } losses_dict.update(reg_losses_dict) # if loss_guidance_dict: # for key, val in loss_guidance_dict.items(): # losses_dict[key] = val.item() if isinstance(val, torch.Tensor) else val if 'normal' in out_dict: out_dict['normal'] = rearrange(out_dict['normal'], "b f h w c -> b f c h w").contiguous() # B,F,H,W,3 -> B,F,3,H,W if torch.isnan(loss): ipdb.set_trace() # save for debug purpose if self.opt.save_train_every > 0 and self.global_step % self.opt.save_train_every == 0: image_save_path = os.path.join(self.workspace, 'train_debug',) os.makedirs(image_save_path, exist_ok=True) for key, value in out_dict.items(): if value is not None: value = ((value - value.min()) / (value.max() - value.min() + 1e-6)).detach().mul(255).to(torch.uint8) # 0-255 try:
logger = logging.getLogger(__name__) class DTrainer(Trainer): def __init__(self, argv, name, opt, model, guidance, criterion=None, optimizer=None, ema_decay=None, lr_scheduler=None, metrics=[], local_rank=0, world_size=1, device=None, mute=False, fp16=False, max_keep_ckpt=1, workspace='workspace', best_mode='min', use_loss_as_metric=True, report_metric_at_train=False, use_checkpoint="latest", use_tensorboard=True, scheduler_update_every_step=False, **kwargs): super().__init__(argv, name, opt, model, guidance, criterion, optimizer, ema_decay, lr_scheduler, metrics, local_rank, world_size, device, mute, fp16, max_keep_ckpt, workspace, best_mode, use_loss_as_metric, report_metric_at_train, use_checkpoint, use_tensorboard, scheduler_update_every_step, **kwargs) self.rgbd_scale = opt.get("rgbd_scale", 1.0) self.fix_dynamic = opt.fix_dynamic if self.fix_dynamic: assert opt.backbone == 'grid4d' self.dynamic_model = NeRFNetwork(opt) # ipdb.set_trace() model_state_dict = self.model.state_dict() self.dynamic_model.load_state_dict(model_state_dict) for p in self.dynamic_model.parameters(): p.requires_grad = False self.dynamic_model.train() self.dynamic_model.to(opt.device) @torch.no_grad() def eval_static_step(self, data, shading): rays_o = data['rays_o'] # [B, N, 3] / B,F,N,3 rays_d = data['rays_d'] # [B, N, 3] / B,F,N,3 mvp = data['mvp'] # B,4,4 / B,F,4,4 if rays_o.ndim == 4: rays_o = rays_o[:, 0] rays_d = rays_d[:, 0] mvp = mvp[:, 0] B, N = rays_o.shape[:2] H, W = data['H'], data['W'] ambient_ratio = data['ambient_ratio'] if 'ambient_ratio' in data else 1.0 light_d = data['light_d'] if 'light_d' in data else None # ipdb.set_trace() outputs = self.static_model.render(rays_o, rays_d, mvp, H, W, staged=True, perturb=False, bg_color=None, light_d=light_d, ambient_ratio=ambient_ratio, shading=shading) pred_rgb = outputs['image'].reshape(B, H, W, 3) pred_depth = outputs['depth'].reshape(B, H, W, 1) if self.opt.normalize_depth: pred_depth = nonzero_normalize_depth(pred_depth) if 'normal_image' in outputs: # eval mode no normal image pred_normal = outputs['normal_image'].reshape(B, H, W, 3) else: pred_normal = None pred_mask = outputs['weights_sum'].reshape(B, H, W, 1) out_dict = { 'rgb': pred_rgb, 'depth': pred_depth, 'normal_image': pred_normal, 'mask': pred_mask, } return out_dict def train_step(self, data): # perform RGBD loss instead of SDS if is image-conditioned do_rgbd_loss = self.opt.images is not None and \ ((self.global_step < self.opt.known_iters) or (self.global_step % self.opt.known_view_interval == 0)) # ipdb.set_trace() # override random camera with fixed known camera if do_rgbd_loss: data = self.default_view_data # progressively relaxing view range if self.opt.progressive_view: r = min(1.0, 0.2 + self.global_step / (0.5 * self.opt.iters)) self.opt.phi_range = [self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[0] * r, self.opt.default_azimuth * (1 - r) + self.opt.full_phi_range[1] * r] self.opt.theta_range = [self.opt.default_polar * (1 - r) + self.opt.full_theta_range[0] * r, self.opt.default_polar * (1 - r) + self.opt.full_theta_range[1] * r] self.opt.radius_range = [self.opt.default_radius * (1 - r) + self.opt.full_radius_range[0] * r, self.opt.default_radius * (1 - r) + self.opt.full_radius_range[1] * r] self.opt.fovy_range = [self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[0] * r, self.opt.default_fovy * (1 - r) + self.opt.full_fovy_range[1] * r] # progressively increase max_level if self.opt.progressive_level: self.model.max_level = min(1.0, 0.25 + self.global_step / (0.5 * self.opt.iters)) rays_o = data['rays_o'] # [B, N, 3] # B,F,N,3 rays_d = data['rays_d'] # [B, N, 3] # B,F,N,3 mvp = data['mvp'] # [B, 4, 4] / [B,F,4,4] time = data['time'] # [B,T] use_dynamic_cam = (rays_o.ndim == 4) B = rays_o.size(0) # ipdb.set_trace() N = rays_o.size(1) if not use_dynamic_cam else rays_o.size(2) H, W = data['H'], data['W'] # ipdb.set_trace() start_from_zero = data.get('start_from_zero', True) if start_from_zero: assert time[0,0] == 0 # When ref_data has B images > opt.batch_size if B > self.opt.batch_size: # choose batch_size images out of those B images choice = torch.randperm(B)[:self.opt.batch_size] B = self.opt.batch_size rays_o = rays_o[choice] rays_d = rays_d[choice] mvp = mvp[choice] if do_rgbd_loss: ambient_ratio = 1.0 shading = 'lambertian' # use lambertian instead of albedo to get normal as_latent = False binarize = False bg_color = self.get_bg_color( self.opt.bg_color_known, B*N, rays_o.device) # add camera noise to avoid grid-like artifact if self.opt.known_view_noise_scale > 0: noise_scale = self.opt.known_view_noise_scale #* (1 - self.global_step / self.opt.iters) rays_o = rays_o + torch.randn(3, device=self.device) * noise_scale rays_d = rays_d + torch.randn(3, device=self.device) * noise_scale elif self.global_step < (self.opt.latent_iter_ratio * self.opt.iters): ## 0 ambient_ratio = 1.0 shading = 'normal' as_latent = True binarize = False bg_color = None else: if self.global_step < (self.opt.normal_iter_ratio * self.opt.iters): # 0.2 ambient_ratio = 1.0 shading = 'normal' elif self.global_step < (self.opt.textureless_iter_ratio * self.opt.iters): # 0 ambient_ratio = 0.1 + 0.9 * random.random() shading = 'textureless' elif self.global_step < (self.opt.albedo_iter_ratio * self.opt.iters): # 0 ambient_ratio = 1.0 shading = 'albedo' else: # random shading ambient_ratio = 0.1 + 0.9 * random.random() rand = random.random() if rand < self.opt.textureless_rate: # 0.2 shading = 'textureless' else: shading = 'lambertian' as_latent = False # random weights binarization (like mobile-nerf) [NOT WORKING NOW] # binarize_thresh = min(0.5, -0.5 + self.global_step / self.opt.iters) # binarize = random.random() < binarize_thresh binarize = False # random background rand = random.random() # ipdb.set_trace() if self.opt.bg_radius > 0 and rand > 0.5: bg_color = None # use bg_net else: bg_color = torch.rand(3).to(self.device) # single color random bg ## NOTE if bg_radius < 0 -> the way magic123 use # The bg color is always random video_outputs = [] num_frames = time.size(1) light_d = safe_normalize(rays_o + torch.randn(3, device=rays_o.device)) outputs = self.model.render(rays_o, rays_d, mvp, H, W, staged=False, perturb=True, bg_color=bg_color, ambient_ratio=ambient_ratio, shading=shading, binarize=binarize, time=time, do_rgbd_loss=do_rgbd_loss, light_d=light_d) # ipdb.set_trace() pred_depth = outputs['depth'].reshape(B, num_frames, 1, H, W) if self.opt.normalize_depth: pred_depth = nonzero_normalize_depth(pred_depth) pred_mask = outputs['weights_sum'].reshape(B, num_frames, 1, H, W) if 'normal_image' in outputs: pred_normal = outputs['normal_image'].reshape(B, num_frames, H, W, 3) else: pred_normal = None if as_latent: # abuse normal & mask as latent code for faster geometry initialization (ref: fantasia3D) pred_rgb = torch.cat([outputs['image'], outputs['weights_sum'].unsqueeze(-1)], dim=-1).reshape(B, num_frames, H, W, 4).permute(0,1,4,2,3).contiguous() # [B, F, 4, H, W] else: pred_rgb = outputs['image'].reshape(B, num_frames, H, W, 3).permute(0,1,4,2,3).contiguous() # [B, F, 3, H, W] # ipdb.set_trace() if 'image_wo_bg' in outputs: image_wo_bg = outputs['image_wo_bg'] + (1 - outputs['weights_sum']).unsqueeze(-1) * 1 # B,F,N,3 if as_latent: # abuse normal & mask as latent code for faster geometry initialization (ref: fantasia3D) pred_rgb_wobg = torch.cat([image_wo_bg, outputs['weights_sum'].unsqueeze(-1)], dim=-1).reshape(B, num_frames, H, W, 4).permute(0,1,4,2,3).contiguous() # [B, 4, H, W] else: pred_rgb_wobg = image_wo_bg.reshape(B, num_frames, H, W, 3).permute(0,1,4,2,3).contiguous() # [B, 3, H, W] out_dict = { 'rgb': pred_rgb, # B,F,3,H,W 'depth': pred_depth, # B,F,1,H,W 'mask': pred_mask, # B,F,1,H,W 'normal': pred_normal, # B,F,H,W,3 'pred_rgb_wobg': pred_rgb_wobg } # Loss # known view loss loss_rgb, loss_mask, loss_normal, loss_depth, loss_sds, loss_if, loss_zero123, loss_clip, loss_entropy, loss_opacity, loss_orient, loss_smooth, loss_smooth2d, loss_smooth3d, loss_mesh_normal, loss_mesh_lap, loss_time_tv, loss_canonical, loss_sr, loss_cn = torch.zeros(20, device=self.device) # known view loss # assert not do_rgbd_loss reg_losses_dict = {} loss = 0 if do_rgbd_loss: ## NOTE this only applied to the first frame, # ipdb.set_trace() gt_mask = self.mask # [B, H, W] bool gt_rgb = self.rgb # [B, 3, H, W] gt_opacity = self.opacity # [B, 1, H, W] # float version of mask gt_normal = self.normal # [B, H, W, 3] # None gt_depth = self.depth # N -> only mask true depth if len(gt_rgb) > self.opt.batch_size: gt_mask = gt_mask[choice] gt_rgb = gt_rgb[choice] gt_opacity = gt_opacity[choice] gt_normal = gt_normal[choice] gt_depth = gt_depth[choice] # color loss loss_rgb = self.opt.lambda_rgb * self.rgbd_scale * \ F.mse_loss(pred_rgb[:,0]*gt_opacity, gt_rgb*gt_opacity) # B,3,H,W # mask loss loss_mask = self.opt.lambda_mask * self.rgbd_scale * F.mse_loss(pred_mask[:,0], gt_mask.to(torch.float32).unsqueeze(0)) # normal loss if self.opt.lambda_normal > 0 and 'normal_image' in outputs and self.normal is not None: pred_normal = pred_normal[:,0][self.mask] lambda_normal = self.opt.lambda_normal * \ min(1, self.global_step / self.opt.iters) loss_normal = lambda_normal * self.rgbd_scale * \ (1 - F.cosine_similarity(pred_normal, self.normal).mean())/2 # relative depth loss if self.opt.lambda_depth > 0 and self.depth is not None: valid_pred_depth = pred_depth[:, 0, 0][self.mask] loss_depth = self.opt.lambda_depth * self.rgbd_scale * (1 - pearson_corrcoef(valid_pred_depth, self.depth))/2 loss = (loss_rgb + loss_mask + loss_normal + loss_depth) # novel view loss else: # ipdb.set_trace() static_rgb = None save_guidance_path = os.path.join(self.opt.workspace, 'guidance', f'train_step{self.global_step}_guidance.jpg') if self.opt.save_guidance_every > 0 and self.global_step % self.opt.save_guidance_every ==0 else None if 'SD' in self.guidance: # interpolate text_z azimuth = data['azimuth'] # [-180, 180] # ipdb.set_trace() ## NOTE here should I remove the view information? ## add mid frame view information if 'frame_azimuth' in data and use_dynamic_cam: idx = num_frames//2 azimuth = data['frame_azimuth'][idx:idx+1] # 1,3 # ENHANCE: remove loop to handle batch size > 1 text_z = [] for b in range(azimuth.shape[0]): if self.opt.no_view_text and use_dynamic_cam: text_z.append(self.embeddings['SD']['default']) continue if azimuth[b] >= -90 and azimuth[b] < 90: if azimuth[b] >= 0: r = 1 - azimuth[b] / 90 else: r = 1 + azimuth[b] / 90 start_z = self.embeddings['SD']['front'] end_z = self.embeddings['SD']['side'] else: if azimuth[b] >= 0: r = 1 - (azimuth[b] - 90) / 90 else: r = 1 + (azimuth[b] + 90) / 90 start_z = self.embeddings['SD']['side'] end_z = self.embeddings['SD']['back'] text_z.append(r * start_z + (1 - r) * end_z) text_z = torch.stack(text_z, dim=0).transpose(0, 1).flatten(0, 1) # text_z_sds = text_z[:, :-1] # this is to remove the cls token... text_z_sds = text_z loss_sds, _ = self.guidance['SD'].train_step(text_z_sds, pred_rgb, as_latent=as_latent, guidance_scale=self.opt.guidance_scale['SD'], grad_scale=self.opt.lambda_guidance['SD'], density=pred_mask if self.opt.gudiance_spatial_weighting else None, save_guidance_path=save_guidance_path, step=self.global_step, ) if 'CN' in self.guidance: # ipdb.set_trace() save_guidance_CN_path = os.path.join(self.opt.workspace, 'guidance_CN', f'train_step{self.global_step}_guidance.jpg') if self.opt.save_guidance_every > 0 and self.global_step % self.opt.save_guidance_every ==0 else None # ipdb.set_trace() ## NOTE here should not use text_z_sds, if the SR model use different text encoder? ## get image index for part frames update index = torch.arange(0, self.opt.num_frames, step=1) # default, choose all if self.opt.cn_frames < self.opt.num_frames: if self.opt.cn_frame_method == 'even': assert self.opt.num_frames % self.opt.cn_frames == 0 interval = self.opt.num_frames // self.opt.cn_frames index = torch.arange(0, self.opt.num_frames, step=interval) elif self.opt.cn_frame_method == 'random': index = torch.randperm(self.opt.num_frames)[:self.opt.cn_frames] else: raise NotImplementedError azimuth = data['azimuth'] # [-180, 180] # ipdb.set_trace() ## NOTE here should I remove the view information? if 'frame_azimuth' in data and use_dynamic_cam: azimuth = data['frame_azimuth'][index] # N,3 # ENHANCE: remove loop to handle batch size > 1 text_z = [] for b in range(azimuth.shape[0]): if self.opt.no_view_text and use_dynamic_cam: text_z.append(self.embeddings['CN']['default']) continue if azimuth[b] >= -90 and azimuth[b] < 90: if azimuth[b] >= 0: r = 1 - azimuth[b] / 90 else: r = 1 + azimuth[b] / 90 start_z = self.embeddings['CN']['front'] end_z = self.embeddings['CN']['side'] else: if azimuth[b] >= 0: r = 1 - (azimuth[b] - 90) / 90 else: r = 1 + (azimuth[b] + 90) / 90 start_z = self.embeddings['CN']['side'] end_z = self.embeddings['CN']['back'] text_z.append(r * start_z + (1 - r) * end_z) text_z = torch.stack(text_z, dim=0).transpose(0, 1).flatten(0, 1) # TODO check B,2,77,C -> 2B,77,C? # text_z_sds = text_z[:, :-1] # this is to remove the cls token... text_cn_sds = text_z text_cn_cn = self.embeddings['CN']['CN'] if self.opt.cn_cn_text else text_cn_sds ## NOTE here we use online prediction, will this lead to error accumulation -> Yes ## get the condition images cn_cn_pred_rgb = pred_rgb.detach() cn_pred_rgb = pred_rgb # pred_rgb B,F,3,H,W # ipdb.set_trace() if self.fix_dynamic: ## NOTE dynamic render is not applied to the inference model, so the render should be the training model with torch.no_grad(): outputs_dyn = self.dynamic_model.render(rays_o, rays_d, mvp, H, W, staged=False, perturb=True, bg_color=bg_color, ambient_ratio=ambient_ratio, shading=shading, binarize=binarize, time=time, do_rgbd_loss=do_rgbd_loss, light_d=light_d) # ipdb.set_trace() pred_depth_dyn = outputs_dyn['depth'].reshape(B, num_frames, 1, H, W) if self.opt.normalize_depth: pred_depth_dyn = nonzero_normalize_depth(pred_depth_dyn) pred_rgb_dyn = outputs_dyn['image'].reshape(B, num_frames, H, W, 3).permute(0,1,4,2,3).contiguous() # [B, F, 3, H, W] ## use the dynamic rendered from the fixed model for controlnet input cn_cn_pred_rgb = pred_rgb_dyn ## select image with index cn_pred_rgb = pred_rgb[:,index] cn_cn_pred_rgb = cn_cn_pred_rgb[:,index] loss_cn, _ = self.guidance['CN'].train_step(text_cn_sds, text_cn_cn, cn_pred_rgb, cn_cn_pred_rgb, as_latent=as_latent, guidance_scale=self.opt.guidance_scale['CN'], grad_scale=self.opt.lambda_guidance['CN'], density=None, save_guidance_path=save_guidance_CN_path, step=self.global_step,) if 'IF' in self.guidance: # interpolate text_z azimuth = data['azimuth'] # [-180, 180] # ENHANCE: remove loop to handle batch size > 1 # ENHANCE: remove loop to handle batch size > 1 text_z = [] for b in range(azimuth.shape[0]): if azimuth[b] >= -90 and azimuth[b] < 90: if azimuth[b] >= 0: r = 1 - azimuth[b] / 90 else: r = 1 + azimuth[b] / 90 start_z = self.embeddings['IF']['front'] end_z = self.embeddings['IF']['side'] else: if azimuth[b] >= 0: r = 1 - (azimuth[b] - 90) / 90 else: r = 1 + (azimuth[b] + 90) / 90 start_z = self.embeddings['IF']['side'] end_z = self.embeddings['IF']['back'] text_z.append(r * start_z + (1 - r) * end_z) text_z = torch.stack(text_z, dim=0).transpose(0, 1).flatten(0, 1) text_z = torch.cat(text_z, dim=1).reshape(B, 2, start_z.shape[-2]-1, start_z.shape[-1]).transpose(0, 1).flatten(0, 1) loss_if = self.guidance['IF'].train_step(text_z, pred_rgb, guidance_scale=self.opt.guidance_scale['IF'], grad_scale=self.opt.lambda_guidance['IF']) if 'zero123' in self.guidance and start_from_zero: # raise NotImplementedError save_guidance_zero123_path = os.path.join(self.opt.workspace, 'guidance_zero123', f'train_step{self.global_step}_guidance.jpg') if self.opt.save_guidance_every > 0 and self.global_step % self.opt.save_guidance_every ==0 else None polar = data['polar'] azimuth = data['azimuth'] radius = data['radius'] # input_3dprior B,3,H,W # ipdb.set_trace() input_3dprior = pred_rgb[:,0] loss_zero123 = self.guidance['zero123'].train_step(self.embeddings['zero123']['default'], input_3dprior, polar, azimuth, radius, guidance_scale=self.opt.guidance_scale['zero123'], as_latent=as_latent, grad_scale=self.opt.lambda_guidance['zero123'], save_guidance_path=save_guidance_zero123_path) if 'clip' in self.guidance: # empirical, far view should apply smaller CLIP loss lambda_guidance = 10 * (1 - abs(azimuth) / 180) * self.opt.lambda_guidance['clip'] loss_clip = self.guidance['clip'].train_step(self.embeddings['clip'], pred_rgb, grad_scale=lambda_guidance) loss += loss_sds + loss_if + loss_zero123 + loss_clip + loss_sr + loss_cn # regularizations if not self.opt.dmtet: if self.opt.lambda_opacity > 0: # 0 loss_opacity = self.opt.lambda_opacity * (outputs['weights_sum'] ** 2).mean() reg_losses_dict['loss_opacity'] = loss_opacity.item() if self.opt.lambda_entropy > 0: # 1e-3 lambda_entropy = self.opt.lambda_entropy * \ min(1, 2 * self.global_step / self.opt.iters) alphas = outputs['weights'].clamp(1e-5, 1 - 1e-5) # alphas = alphas ** 2 # skewed entropy, favors 0 over 1 loss_entropy = lambda_entropy * (- alphas * torch.log2(alphas) - (1 - alphas) * torch.log2(1 - alphas)).mean() reg_losses_dict['loss_entropy'] = loss_entropy.item() if self.opt.lambda_normal_smooth > 0 and 'normal_image' in outputs: # 0.5 # no image in sd-dreamfusion should be 0 pred_vals = outputs['normal_image'].reshape(-1, H, W, 3) # BF,H,W,3 # total-variation loss_smooth = (pred_vals[:, 1:, :, :] - pred_vals[:, :-1, :, :]).square().mean() + \ (pred_vals[:, :, 1:, :] - pred_vals[:, :, :-1, :]).square().mean() loss_smooth = self.opt.lambda_normal_smooth * loss_smooth reg_losses_dict['loss_smooth'] = loss_smooth.item() if self.opt.lambda_normal_smooth2d > 0 and 'normal_image' in outputs: # 0.5 # no image in sd-dreamfusion should be 0 pred_vals = outputs['normal_image'].reshape( -1, H, W, 3).permute(0,3,1,2).contiguous() # BF,3,H,W smoothed_vals = TF.gaussian_blur(pred_vals, kernel_size=9) loss_smooth2d = self.opt.lambda_normal_smooth2d * F.mse_loss(pred_vals, smoothed_vals) reg_losses_dict['loss_smooth2d'] = loss_smooth2d.item() if self.opt.lambda_orient > 0 and 'loss_orient' in outputs: # 1e-2 loss_orient = self.opt.lambda_orient * outputs['loss_orient'].mean() reg_losses_dict['loss_orient'] = loss_orient.item() if self.opt.lambda_3d_normal_smooth > 0 and 'loss_normal_perturb' in outputs: # 0 loss_smooth3d = self.opt.lambda_3d_normal_smooth * outputs['loss_normal_perturb'].mean() reg_losses_dict['loss_smooth3d'] = loss_smooth3d.item() if self.opt.lambda_time_tv > 0: if self.opt.backbone == 'grid4d': loss_time_tv = self.opt.lambda_time_tv * self.model.TV_loss() reg_losses_dict['loss_time_tv'] = loss_time_tv.item() loss += loss_opacity + loss_entropy + loss_smooth + loss_smooth2d + loss_orient + loss_smooth3d + loss_time_tv + loss_canonical else: if self.opt.lambda_mesh_normal > 0: loss_mesh_normal = self.opt.lambda_mesh_normal * \ outputs['loss_normal'].mean() reg_losses_dict['loss_mesh_normal'] = loss_mesh_normal.item() if self.opt.lambda_mesh_lap > 0: loss_mesh_lap = self.opt.lambda_mesh_lap * outputs['loss_lap'].mean() reg_losses_dict['loss_mesh_lap'] = loss_mesh_lap.item() loss += loss_mesh_normal + loss_mesh_lap losses_dict = { 'loss': loss.item(), 'loss_sds': loss_sds.item(), 'loss_sr': loss_sr.item(), 'loss_cn': loss_cn.item(), # 'loss_if': loss_if.item(), 'loss_zero123': loss_zero123.item(), # 'loss_clip': loss_clip.item(), 'loss_rgb': loss_rgb.item(), 'loss_mask': loss_mask.item(), 'loss_normal': loss_normal.item(), 'loss_depth': loss_depth.item(), # 'loss_opacity': loss_opacity.item(), # 'loss_entropy': loss_entropy.item(), # 'loss_smooth': loss_smooth.item(), # 'loss_smooth2d': loss_smooth2d.item(), # 'loss_smooth3d': loss_smooth3d.item(), # 'loss_orient': loss_orient.item(), # 'loss_mesh_normal': loss_mesh_normal.item(), # 'loss_mesh_lap': loss_mesh_lap.item(), } losses_dict.update(reg_losses_dict) # if loss_guidance_dict: # for key, val in loss_guidance_dict.items(): # losses_dict[key] = val.item() if isinstance(val, torch.Tensor) else val if 'normal' in out_dict: out_dict['normal'] = rearrange(out_dict['normal'], "b f h w c -> b f c h w").contiguous() # B,F,H,W,3 -> B,F,3,H,W if torch.isnan(loss): ipdb.set_trace() # save for debug purpose if self.opt.save_train_every > 0 and self.global_step % self.opt.save_train_every == 0: image_save_path = os.path.join(self.workspace, 'train_debug',) os.makedirs(image_save_path, exist_ok=True) for key, value in out_dict.items(): if value is not None: value = ((value - value.min()) / (value.max() - value.min() + 1e-6)).detach().mul(255).to(torch.uint8) # 0-255 try:
save_tensor2image(value, os.path.join(image_save_path, f'train_{self.global_step:06d}_{key}.jpg'), channel_last=False)
0
2023-11-23 10:34:08+00:00
24k