inputs
stringlengths
312
52k
targets
stringlengths
1
3.1k
block_type
stringclasses
11 values
scenario
stringclasses
7 values
<filename>UniRef/detectron2/structures/boxes.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import math import numpy as np from enum import IntEnum, unique from typing import List, Tuple, Union import torch from torch import device _RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray] @unique class BoxMode(IntEnum): """ Enum of different ways to represent a box. """ XYXY_ABS = 0 """ (x0, y0, x1, y1) in absolute floating points coordinates. The coordinates in range [0, width or height]. """ XYWH_ABS = 1 """ (x0, y0, w, h) in absolute floating points coordinates. """ XYXY_REL = 2 """ Not yet supported! (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image. """ XYWH_REL = 3 """ Not yet supported! (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image. """ XYWHA_ABS = 4 """ (xc, yc, w, h, a) in absolute floating points coordinates. (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw. """ @staticmethod def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType: """ Args: box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5 from_mode, to_mode (BoxMode) Returns: The converted box of the same type. """ if from_mode == to_mode: return box original_type = type(box) is_numpy = isinstance(box, np.ndarray) single_box = isinstance(box, (list, tuple)) if single_box: assert len(box) == 4 or len(box) == 5, ( "BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor," " where k == 4 or 5" ) arr = torch.tensor(box)[None, :] else: # avoid modifying the input box if is_numpy: arr = torch.from_numpy(np.asarray(box)).clone() else: arr = box.clone() assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [ BoxMode.XYXY_REL, BoxMode.XYWH_REL, ], "Relative mode not yet supported!" if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS: assert ( arr.shape[-1] == 5 ), "The last dimension of input shape must be 5 for XYWHA format" original_dtype = arr.dtype arr = arr.double() w = arr[:, 2] h = arr[:, 3] a = arr[:, 4] c = torch.abs(torch.cos(a * math.pi / 180.0)) s = torch.abs(torch.sin(a * math.pi / 180.0)) # This basically computes the horizontal bounding rectangle of the rotated box new_w = c * w + s * h new_h = c * h + s * w # convert center to top-left corner arr[:, 0] -= new_w / 2.0 arr[:, 1] -= new_h / 2.0 # bottom-right corner arr[:, 2] = arr[:, 0] + new_w arr[:, 3] = arr[:, 1] + new_h arr = arr[:, :4].to(dtype=original_dtype) elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS: original_dtype = arr.dtype arr = arr.double() arr[:, 0] += arr[:, 2] / 2.0 arr[:, 1] += arr[:, 3] / 2.0 angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype) arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype) else: if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS: arr[:, 2] += arr[:, 0] arr[:, 3] += arr[:, 1] elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS: arr[:, 2] -= arr[:, 0] arr[:, 3] -= arr[:, 1] else: raise NotImplementedError( "Conversion from BoxMode {} to {} is not supported yet".format( from_mode, to_mode ) ) if single_box: return original_type(arr.flatten().tolist()) if is_numpy: return arr.numpy() else: return arr class Boxes: """ This structure stores a list of boxes as a Nx4 torch.Tensor. It supports some common methods about boxes (`area`, `clip`, `nonempty`, etc), and also behaves like a Tensor (support indexing, `to(device)`, `.device`, and iteration over all boxes) Attributes: tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2). """ def __init__(self, tensor: torch.Tensor): """ Args: tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2). """ device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) if tensor.numel() == 0: # Use reshape, so we don't end up creating a new tensor that does not depend on # the inputs (and consequently confuses jit) tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device) assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size() self.tensor = tensor def clone(self) -> "Boxes": """ Clone the Boxes. Returns: Boxes """ return Boxes(self.tensor.clone()) def to(self, device: torch.device): # Boxes are assumed float32 and does not support to(dtype) return Boxes(self.tensor.to(device=device)) def area(self) -> torch.Tensor: """ Computes the area of all the boxes. Returns: torch.Tensor: a vector with areas of each box. """ box = self.tensor area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]) return area def clip(self, box_size: Tuple[int, int]) -> None: """ Clip (in place) the boxes by limiting x coordinates to the range [0, width] and y coordinates to the range [0, height]. Args: box_size (height, width): The clipping box's size. """ assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!" h, w = box_size x1 = self.tensor[:, 0].clamp(min=0, max=w) y1 = self.tensor[:, 1].clamp(min=0, max=h) x2 = self.tensor[:, 2].clamp(min=0, max=w) y2 = self.tensor[:, 3].clamp(min=0, max=h) self.tensor = torch.stack((x1, y1, x2, y2), dim=-1) def nonempty(self, threshold: float = 0.0) -> torch.Tensor: """ Find boxes that are non-empty. A box is considered empty, if either of its side is no larger than threshold. Returns: Tensor: a binary vector which represents whether each box is empty (False) or non-empty (True). """ box = self.tensor widths = box[:, 2] - box[:, 0] heights = box[:, 3] - box[:, 1] keep = (widths > threshold) & (heights > threshold) return keep def __getitem__(self, item) -> "Boxes": """ Args: item: int, slice, or a BoolTensor Returns: Boxes: Create a new :class:`Boxes` by indexing. The following usage are allowed: 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box. 2. `new_boxes = boxes[2:10]`: return a slice of boxes. 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor with `length = len(boxes)`. Nonzero elements in the vector will be selected. Note that the returned Boxes might share storage with this Boxes, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return Boxes(self.tensor[item].view(1, -1)) b = self.tensor[item] assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item) return Boxes(b) def __len__(self) -> int: return self.tensor.shape[0] def __repr__(self) -> str: return "Boxes(" + str(self.tensor) + ")" def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor: """ Args: box_size (height, width): Size of the reference box. boundary_threshold (int): Boxes that extend beyond the reference box boundary by more than boundary_threshold are considered "outside". Returns: a binary vector, indicating whether each box is inside the reference box. """ height, width = box_size inds_inside = ( (self.tensor[..., 0] >= -boundary_threshold) & (self.tensor[..., 1] >= -boundary_threshold) & (self.tensor[..., 2] < width + boundary_threshold) & (self.tensor[..., 3] < height + boundary_threshold) ) return inds_inside def get_centers(self) -> torch.Tensor: """ Returns: The box centers in a Nx2 array of (x, y). """ return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2 def scale(self, scale_x: float, scale_y: float) -> None: """ Scale the box with horizontal and vertical scaling factors """ self.tensor[:, 0::2] *= scale_x self.tensor[:, 1::2] *= scale_y @classmethod def cat(cls, boxes_list: List["Boxes"]) -> "Boxes": """ Concatenates a list of Boxes into a single Boxes Arguments: boxes_list (list[Boxes]) Returns: Boxes: the concatenated Boxes """ assert isinstance(boxes_list, (list, tuple)) if len(boxes_list) == 0: return cls(torch.empty(0)) assert all([isinstance(box, Boxes) for box in boxes_list]) # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0)) return cat_boxes @property def device(self) -> device: return self.tensor.device # type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript # https://github.com/pytorch/pytorch/issues/18627 @torch.jit.unused def __iter__(self): """ Yield a box as a Tensor of shape (4,) at a time. """ yield from self.tensor def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Given two lists of boxes of size N and M, compute the intersection area between __all__ N x M pairs of boxes. The box order must be (xmin, ymin, xmax, ymax) Args: boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. Returns: Tensor: intersection, sized [N,M]. """ boxes1, boxes2 = boxes1.tensor, boxes2.tensor width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max( boxes1[:, None, :2], boxes2[:, :2] ) # [N,M,2] width_height.clamp_(min=0) # [N,M,2] intersection = width_height.prod(dim=2) # [N,M] return intersection # implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py # with slight modifications def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Given two lists of boxes of size N and M, compute the IoU (intersection over union) between **all** N x M pairs of boxes. The box order must be (xmin, ymin, xmax, ymax). Args: boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. Returns: Tensor: IoU, sized [N,M]. """ area1 = boxes1.area() # [N] area2 = boxes2.area() # [<fim_suffix>M] inter = pairwise_intersection(boxes1, boxes2) # handle empty boxes iou = torch.where( inter > 0, inter / (area1[:, None] + area2 - inter), torch.zeros(1, dtype=inter.dtype, device=inter.device), ) return iou def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area). Args: boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. Returns: Tensor: IoA, sized [N,M]. """ area2 = boxes2.area() # [M] inter = pairwise_intersection(boxes1, boxes2) # handle empty boxes ioa = torch.where( inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device) ) return ioa def pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes): """ Pairwise distance between N points and M boxes. The distance between a point and a box is represented by the distance from the point to 4 edges of the box. Distances are all positive when the point is inside the box. Args: points: Nx2 coordinates. Each row is (x, y) boxes: M boxes Returns: Tensor: distances of size (N, M, 4). The 4 values are distances from the point to the left, top, right, bottom of the box. """ x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1) x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M) return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2) def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Compute pairwise intersection over union (IOU) of two sets of matched boxes that have the same number of boxes. Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix. Args: boxes1 (Boxes): bounding boxes, sized [N,4]. boxes2 (Boxes): same length as boxes1 Returns: Tensor: iou, sized [N]. """ assert len(boxes1) == len( boxes2 ), "boxlists should have the same" "number of entries, got {}, {}".format( len(boxes1), len(boxes2) ) area1 = boxes1.area() # [N] area2 = boxes2.area() # [N] box1, box2 = boxes1.tensor, boxes2.tensor lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2] rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2] wh = (rb - lt).clamp(min=0) # [N,2] inter = wh[:, 0] * wh[:, 1] # [N] iou = inter / (area1 + area2 - inter) # [N] return iou <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/structures/boxes.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import math import numpy as np from enum import IntEnum, unique from typing import List, Tuple, Union import torch from torch import device _RawBoxType = Union[List[float], Tuple[float, ...], torch.Tensor, np.ndarray] @unique class BoxMode(IntEnum): """ Enum of different ways to represent a box. """ XYXY_ABS = 0 """ (x0, y0, x1, y1) in absolute floating points coordinates. The coordinates in range [0, width or height]. """ XYWH_ABS = 1 """ (x0, y0, w, h) in absolute floating points coordinates. """ XYXY_REL = 2 """ Not yet supported! (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image. """ XYWH_REL = 3 """ Not yet supported! (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image. """ XYWHA_ABS = 4 """ (xc, yc, w, h, a) in absolute floating points coordinates. (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw. """ @staticmethod def convert(box: _RawBoxType, from_mode: "BoxMode", to_mode: "BoxMode") -> _RawBoxType: """ Args: box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5 from_mode, to_mode (BoxMode) Returns: The converted box of the same type. """ if from_mode == to_mode: return box original_type = type(box) is_numpy = isinstance(box, np.ndarray) single_box = isinstance(box, (list, tuple)) if single_box: assert len(box) == 4 or len(box) == 5, ( "BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor," " where k == 4 or 5" ) arr = torch.tensor(box)[None, :] else: # avoid modifying the input box if is_numpy: arr = torch.from_numpy(np.asarray(box)).clone() else: arr = box.clone() assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [ BoxMode.XYXY_REL, BoxMode.XYWH_REL, ], "Relative mode not yet supported!" if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS: assert ( arr.shape[-1] == 5 ), "The last dimension of input shape must be 5 for XYWHA format" original_dtype = arr.dtype arr = arr.double() w = arr[:, 2] h = arr[:, 3] a = arr[:, 4] c = torch.abs(torch.cos(a * math.pi / 180.0)) s = torch.abs(torch.sin(a * math.pi / 180.0)) # This basically computes the horizontal bounding rectangle of the rotated box new_w = c * w + s * h new_h = c * h + s * w # convert center to top-left corner arr[:, 0] -= new_w / 2.0 arr[:, 1] -= new_h / 2.0 # bottom-right corner arr[:, 2] = arr[:, 0] + new_w arr[:, 3] = arr[:, 1] + new_h arr = arr[:, :4].to(dtype=original_dtype) elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS: original_dtype = arr.dtype arr = arr.double() arr[:, 0] += arr[:, 2] / 2.0 arr[:, 1] += arr[:, 3] / 2.0 angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype) arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype) else: if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS: arr[:, 2] += arr[:, 0] arr[:, 3] += arr[:, 1] elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS: arr[:, 2] -= arr[:, 0] arr[:, 3] -= arr[:, 1] else: raise NotImplementedError( "Conversion from BoxMode {} to {} is not supported yet".format( from_mode, to_mode ) ) if single_box: return original_type(arr.flatten().tolist()) if is_numpy: return arr.numpy() else: return arr class Boxes: """ This structure stores a list of boxes as a Nx4 torch.Tensor. It supports some common methods about boxes (`area`, `clip`, `nonempty`, etc), and also behaves like a Tensor (support indexing, `to(device)`, `.device`, and iteration over all boxes) Attributes: tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2). """ def __init__(self, tensor: torch.Tensor): """ Args: tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2). """ device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) if tensor.numel() == 0: # Use reshape, so we don't end up creating a new tensor that does not depend on # the inputs (and consequently confuses j<fim_suffix>it) tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device) assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size() self.tensor = tensor def clone(self) -> "Boxes": """ Clone the Boxes. Returns: Boxes """ return Boxes(self.tensor.clone()) def to(self, device: torch.device): # Boxes are assumed float32 and does not support to(dtype) return Boxes(self.tensor.to(device=device)) def area(self) -> torch.Tensor: """ Computes the area of all the boxes. Returns: torch.Tensor: a vector with areas of each box. """ box = self.tensor area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1]) return area def clip(self, box_size: Tuple[int, int]) -> None: """ Clip (in place) the boxes by limiting x coordinates to the range [0, width] and y coordinates to the range [0, height]. Args: box_size (height, width): The clipping box's size. """ assert torch.isfinite(self.tensor).all(), "Box tensor contains infinite or NaN!" h, w = box_size x1 = self.tensor[:, 0].clamp(min=0, max=w) y1 = self.tensor[:, 1].clamp(min=0, max=h) x2 = self.tensor[:, 2].clamp(min=0, max=w) y2 = self.tensor[:, 3].clamp(min=0, max=h) self.tensor = torch.stack((x1, y1, x2, y2), dim=-1) def nonempty(self, threshold: float = 0.0) -> torch.Tensor: """ Find boxes that are non-empty. A box is considered empty, if either of its side is no larger than threshold. Returns: Tensor: a binary vector which represents whether each box is empty (False) or non-empty (True). """ box = self.tensor widths = box[:, 2] - box[:, 0] heights = box[:, 3] - box[:, 1] keep = (widths > threshold) & (heights > threshold) return keep def __getitem__(self, item) -> "Boxes": """ Args: item: int, slice, or a BoolTensor Returns: Boxes: Create a new :class:`Boxes` by indexing. The following usage are allowed: 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box. 2. `new_boxes = boxes[2:10]`: return a slice of boxes. 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor with `length = len(boxes)`. Nonzero elements in the vector will be selected. Note that the returned Boxes might share storage with this Boxes, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return Boxes(self.tensor[item].view(1, -1)) b = self.tensor[item] assert b.dim() == 2, "Indexing on Boxes with {} failed to return a matrix!".format(item) return Boxes(b) def __len__(self) -> int: return self.tensor.shape[0] def __repr__(self) -> str: return "Boxes(" + str(self.tensor) + ")" def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor: """ Args: box_size (height, width): Size of the reference box. boundary_threshold (int): Boxes that extend beyond the reference box boundary by more than boundary_threshold are considered "outside". Returns: a binary vector, indicating whether each box is inside the reference box. """ height, width = box_size inds_inside = ( (self.tensor[..., 0] >= -boundary_threshold) & (self.tensor[..., 1] >= -boundary_threshold) & (self.tensor[..., 2] < width + boundary_threshold) & (self.tensor[..., 3] < height + boundary_threshold) ) return inds_inside def get_centers(self) -> torch.Tensor: """ Returns: The box centers in a Nx2 array of (x, y). """ return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2 def scale(self, scale_x: float, scale_y: float) -> None: """ Scale the box with horizontal and vertical scaling factors """ self.tensor[:, 0::2] *= scale_x self.tensor[:, 1::2] *= scale_y @classmethod def cat(cls, boxes_list: List["Boxes"]) -> "Boxes": """ Concatenates a list of Boxes into a single Boxes Arguments: boxes_list (list[Boxes]) Returns: Boxes: the concatenated Boxes """ assert isinstance(boxes_list, (list, tuple)) if len(boxes_list) == 0: return cls(torch.empty(0)) assert all([isinstance(box, Boxes) for box in boxes_list]) # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0)) return cat_boxes @property def device(self) -> device: return self.tensor.device # type "Iterator[torch.Tensor]", yield, and iter() not supported by torchscript # https://github.com/pytorch/pytorch/issues/18627 @torch.jit.unused def __iter__(self): """ Yield a box as a Tensor of shape (4,) at a time. """ yield from self.tensor def pairwise_intersection(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Given two lists of boxes of size N and M, compute the intersection area between __all__ N x M pairs of boxes. The box order must be (xmin, ymin, xmax, ymax) Args: boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. Returns: Tensor: intersection, sized [N,M]. """ boxes1, boxes2 = boxes1.tensor, boxes2.tensor width_height = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) - torch.max( boxes1[:, None, :2], boxes2[:, :2] ) # [N,M,2] width_height.clamp_(min=0) # [N,M,2] intersection = width_height.prod(dim=2) # [N,M] return intersection # implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py # with slight modifications def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Given two lists of boxes of size N and M, compute the IoU (intersection over union) between **all** N x M pairs of boxes. The box order must be (xmin, ymin, xmax, ymax). Args: boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. Returns: Tensor: IoU, sized [N,M]. """ area1 = boxes1.area() # [N] area2 = boxes2.area() # [M] inter = pairwise_intersection(boxes1, boxes2) # handle empty boxes iou = torch.where( inter > 0, inter / (area1[:, None] + area2 - inter), torch.zeros(1, dtype=inter.dtype, device=inter.device), ) return iou def pairwise_ioa(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Similar to :func:`pariwise_iou` but compute the IoA (intersection over boxes2 area). Args: boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively. Returns: Tensor: IoA, sized [N,M]. """ area2 = boxes2.area() # [M] inter = pairwise_intersection(boxes1, boxes2) # handle empty boxes ioa = torch.where( inter > 0, inter / area2, torch.zeros(1, dtype=inter.dtype, device=inter.device) ) return ioa def pairwise_point_box_distance(points: torch.Tensor, boxes: Boxes): """ Pairwise distance between N points and M boxes. The distance between a point and a box is represented by the distance from the point to 4 edges of the box. Distances are all positive when the point is inside the box. Args: points: Nx2 coordinates. Each row is (x, y) boxes: M boxes Returns: Tensor: distances of size (N, M, 4). The 4 values are distances from the point to the left, top, right, bottom of the box. """ x, y = points.unsqueeze(dim=2).unbind(dim=1) # (N, 1) x0, y0, x1, y1 = boxes.tensor.unsqueeze(dim=0).unbind(dim=2) # (1, M) return torch.stack([x - x0, y - y0, x1 - x, y1 - y], dim=2) def matched_pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor: """ Compute pairwise intersection over union (IOU) of two sets of matched boxes that have the same number of boxes. Similar to :func:`pairwise_iou`, but computes only diagonal elements of the matrix. Args: boxes1 (Boxes): bounding boxes, sized [N,4]. boxes2 (Boxes): same length as boxes1 Returns: Tensor: iou, sized [N]. """ assert len(boxes1) == len( boxes2 ), "boxlists should have the same" "number of entries, got {}, {}".format( len(boxes1), len(boxes2) ) area1 = boxes1.area() # [N] area2 = boxes2.area() # [N] box1, box2 = boxes1.tensor, boxes2.tensor lt = torch.max(box1[:, :2], box2[:, :2]) # [N,2] rb = torch.min(box1[:, 2:], box2[:, 2:]) # [N,2] wh = (rb - lt).clamp(min=0) # [N,2] inter = wh[:, 0] * wh[:, 1] # [N] iou = inter / (area1 + area2 - inter) # [N] return iou <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import logging import re from typing import Dict, List import torch from tabulate import tabulate def convert_basic_c2_names(original_keys): """ Apply some basic name conversion to names in C2 weights. It only deals with typical backbone models. Args: original_keys (list[str]): Returns: list[str]: The same number of strings matching those in original_keys. """ layer_keys = copy.deepcopy(original_keys) layer_keys = [ {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys ] # some hard-coded mappings layer_keys = [k.replace("_", ".") for k in layer_keys] layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] # Uniform both bn and gn names to "norm" layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] # stem layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] # to avoid mis-matching with "conv1" in other components (e.g. detection head) layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] # blocks layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] # DensePose substitutions layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] return layer_keys def convert_c2_detectron_names(weights): """ Map Caffe2 Detectron weight names to Detectron2 names. Args: weights (dict): name -> tensor Returns: dict: detectron2 names -> tensor dict: detectron2 names -> C2 names """ logger = logging.getLogger(__name__) logger.info("Renaming Caffe2 weights ......") original_keys = sorted(weights.keys()) layer_keys = copy.deepcopy(original_keys) layer_keys = convert_basic_c2_names(layer_keys) # -------------------------------------------------------------------------- # RPN hidd<fim_suffix>en representation conv # -------------------------------------------------------------------------- # FPN case # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then # shared for all other levels, hence the appearance of "fpn2" layer_keys = [ k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys ] # Non-FPN case layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] # -------------------------------------------------------------------------- # RPN box transformation conv # -------------------------------------------------------------------------- # FPN case (see note above about "fpn2") layer_keys = [ k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # Non-FPN case layer_keys = [ k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # -------------------------------------------------------------------------- # Fast R-CNN box head # -------------------------------------------------------------------------- layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] # -------------------------------------------------------------------------- # FPN lateral and output convolutions # -------------------------------------------------------------------------- def fpn_map(name): """ Look for keys with the following patterns: 1) Starts with "fpn.inner." Example: "fpn.inner.res2.2.sum.lateral.weight" Meaning: These are lateral pathway convolutions 2) Starts with "fpn.res" Example: "fpn.res2.2.sum.weight" Meaning: These are FPN output convolutions """ splits = name.split(".") norm = ".norm" if "norm" in splits else "" if name.startswith("fpn.inner."): # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] stage = int(splits[2][len("res") :]) return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) elif name.startswith("fpn.res"): # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] stage = int(splits[1][len("res") :]) return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) return name layer_keys = [fpn_map(k) for k in layer_keys] # -------------------------------------------------------------------------- # Mask R-CNN mask head # -------------------------------------------------------------------------- # roi_heads.StandardROIHeads case layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] # roi_heads.Res5ROIHeads case layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] # -------------------------------------------------------------------------- # Keypoint R-CNN head # -------------------------------------------------------------------------- # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] layer_keys = [ k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys ] layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] # -------------------------------------------------------------------------- # Done with replacements # -------------------------------------------------------------------------- assert len(set(layer_keys)) == len(layer_keys) assert len(original_keys) == len(layer_keys) new_weights = {} new_keys_to_original_keys = {} for orig, renamed in zip(original_keys, layer_keys): new_keys_to_original_keys[renamed] = orig if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): # remove the meaningless prediction weight for background class new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 new_weights[renamed] = weights[orig][new_start_idx:] logger.info( "Remove prediction weight for background class in {}. The shape changes from " "{} to {}.".format( renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) ) ) elif renamed.startswith("cls_score."): # move weights of bg class from original index 0 to last index logger.info( "Move classification weights for background class in {} from index 0 to " "index {}.".format(renamed, weights[orig].shape[0] - 1) ) new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) else: new_weights[renamed] = weights[orig] return new_weights, new_keys_to_original_keys # Note the current matching is not symmetric. # it assumes model_state_dict will have longer names. def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): """ Match names between the two state-dict, and returns a new chkpt_state_dict with names converted to match model_state_dict with heuristics. The returned dict can be later loaded with fvcore checkpointer. If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 model and will be renamed at first. Strategy: suppose that the models that we will create will have prefixes appended to each of its keys, for example due to an extra level of nesting that the original pre-trained weights from ImageNet won't contain. For example, model.state_dict() might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains res2.conv1.weight. We thus want to match both parameters together. For that, we look for each model weight, look among all loaded keys if there is one that is a suffix of the current weight name, and use it if that's the case. If multiple matches exist, take the one with longest size of the corresponding name. For example, for the same model as before, the pretrained weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, we want to match backbone[0].body.conv1.weight to conv1.weight, and backbone[0].body.res2.conv1.weight to res2.conv1.weight. """ model_keys = sorted(model_state_dict.keys()) if c2_conversion: ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) # original_keys: the name in the original dict (before renaming) else: original_keys = {x: x for x in ckpt_state_dict.keys()} ckpt_keys = sorted(ckpt_state_dict.keys()) def match(a, b): # Matched ckpt_key should be a complete (starts with '.') suffix. # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, # but matches whatever_conv1 or mesh_head.whatever_conv1. return a == b or a.endswith("." + b) # get a matrix of string matches, where each (i, j) entry correspond to the size of the # ckpt_key string, if it matches match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) # use the matched one with longest size in case of multiple matches max_match_size, idxs = match_matrix.max(1) # remove indices that correspond to no-match idxs[max_match_size == 0] = -1 logger = logging.getLogger(__name__) # matched_pairs (matched checkpoint key --> matched model key) matched_keys = {} result_state_dict = {} for idx_model, idx_ckpt in enumerate(idxs.tolist()): if idx_ckpt == -1: continue key_model = model_keys[idx_model] key_ckpt = ckpt_keys[idx_ckpt] value_ckpt = ckpt_state_dict[key_ckpt] shape_in_model = model_state_dict[key_model].shape if shape_in_model != value_ckpt.shape: logger.warning( "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( key_ckpt, value_ckpt.shape, key_model, shape_in_model ) ) logger.warning( "{} will not be loaded. Please double check and see if this is desired.".format( key_ckpt ) ) continue assert key_model not in result_state_dict result_state_dict[key_model] = value_ckpt if key_ckpt in matched_keys: # already added to matched_keys logger.error( "Ambiguity found for {} in checkpoint!" "It matches at least two keys in the model ({} and {}).".format( key_ckpt, key_model, matched_keys[key_ckpt] ) ) raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") matched_keys[key_ckpt] = key_model # logging: matched_model_keys = sorted(matched_keys.values()) if len(matched_model_keys) == 0: logger.warning("No weights in checkpoint matched with model.") return ckpt_state_dict common_prefix = _longest_common_prefix(matched_model_keys) rev_matched_keys = {v: k for k, v in matched_keys.items()} original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys} model_key_groups = _group_keys_by_module(matched_model_keys, original_keys) table = [] memo = set() for key_model in matched_model_keys: if key_model in memo: continue if key_model in model_key_groups: group = model_key_groups[key_model] memo |= set(group) shapes = [tuple(model_state_dict[k].shape) for k in group] table.append( ( _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*", _group_str([original_keys[k] for k in group]), " ".join([str(x).replace(" ", "") for x in shapes]), ) ) else: key_checkpoint = original_keys[key_model] shape = str(tuple(model_state_dict[key_model].shape)) table.append((key_model[len(common_prefix) :], key_checkpoint, shape)) table_str = tabulate( table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"] ) logger.info( "Following weights matched with " + (f"submodule {common_prefix[:-1]}" if common_prefix else "model") + ":\n" + table_str ) unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())] for k in unmatched_ckpt_keys: result_state_dict[k] = ckpt_state_dict[k] return result_state_dict def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]): """ Params in the same submodule are grouped together. Args: keys: names of all parameters original_names: mapping from parameter name to their name in the checkpoint Returns: dict[name -> all other names in the same group] """ def _submodule_name(key): pos = key.rfind(".") if pos < 0: return None prefix = key[: pos + 1] return prefix all_submodules = [_submodule_name(k) for k in keys] all_submodules = [x for x in all_submodules if x] all_submodules = sorted(all_submodules, key=len) ret = {} for prefix in all_submodules: group = [k for k in keys if k.startswith(prefix)] if len(group) <= 1: continue original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group]) if len(original_name_lcp) == 0: # don't group weights if original names don't share prefix continue for k in group: if k in ret: continue ret[k] = group return ret def _longest_common_prefix(names: List[str]) -> str: """ ["abc.zfg", "abc.zef"] -> "abc." """ names = [n.split(".") for n in names] m1, m2 = min(names), max(names) ret = [a for a, b in zip(m1, m2) if a == b] ret = ".".join(ret) + "." if len(ret) else "" return ret def _longest_common_prefix_str(names: List[str]) -> str: m1, m2 = min(names), max(names) lcp = [a for a, b in zip(m1, m2) if a == b] lcp = "".join(lcp) return lcp def _group_str(names: List[str]) -> str: """ Turn "common1", "common2", "common3" into "common{1,2,3}" """ lcp = _longest_common_prefix_str(names) rest = [x[len(lcp) :] for x in names] rest = "{" + ",".join(rest) + "}" ret = lcp + rest # add some simplification for BN specifically ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*") ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*") return ret <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>import math import numpy as np import cv2 def db_eval_iou(annotation, segmentation, void_pixels=None): """ Compute region similarity as the Jaccard Index. Arguments: annotation (ndarray): binary annotation map. segmentation (ndarray): binary segmentation map. void_pixels (ndarray): optional mask with void pixels Return: jaccard (float): region similarity """ assert annotation.shape == segmentation.shape, \ f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.' annotation = annotation.astype(np.bool) segmentation = segmentation.astype(np.bool) if void_pixels is not None: assert annotation.shape == void_pixels.shape, \ f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.' void_pixels = void_pixels.astype(np.bool) else: void_pixels = np.zeros_like(segmentation) # Intersection between all sets inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1)) union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1)) j = inters / union if j.ndim == 0: j = 1 if np.isclose(union, 0) else j else: j[np.isclose(union, 0)] = 1 return j def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008): assert annotation.shape == segmentation.shape if void_pixels is not None: assert annotation.shape == void_pixels.shape if annotation.ndim == 3: n_frames = annotation.shape[0] f_res = np.zeros(n_frames) for frame_id in range(n_frames): void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ] f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th) elif annotation.ndim == 2: f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th) else: raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions') return f_res def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008): """ Compute mean,recall and decay from per-frame evaluation. Calculates precision/recall for boundaries between foreground_mask and gt_mask using morphological operators to speed it up. Arguments: foreground_mask (ndarray): binary segmentation image. gt_mask (ndarray): binary annotated image. void_pixels (ndarray): optional mask with void pixels Returns: F (float): boundaries F-measure """ assert np.atleast_3d(foreground_mask).shape[2] == 1 if void_pixels is not None: void_pixels = void_pixels.astype(np.bool) else: void_pixels = np.zeros_like(foreground_mask).astype(np.bool) bound_pix = bound_th if bound_th >= 1 else \ np.ceil(bound_th * np.linalg.norm(foreground_mask.shape)) # Get the pixel boundaries of bot<fim_suffix>h masks fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels)) gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels)) from skimage.morphology import disk # fg_dil = binary_dilation(fg_boundary, disk(bound_pix)) fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8)) # gt_dil = binary_dilation(gt_boundary, disk(bound_pix)) gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8)) # Get the intersection gt_match = gt_boundary * fg_dil fg_match = fg_boundary * gt_dil # Area of the intersection n_fg = np.sum(fg_boundary) n_gt = np.sum(gt_boundary) # % Compute precision and recall if n_fg == 0 and n_gt > 0: precision = 1 recall = 0 elif n_fg > 0 and n_gt == 0: precision = 0 recall = 1 elif n_fg == 0 and n_gt == 0: precision = 1 recall = 1 else: precision = np.sum(fg_match) / float(n_fg) recall = np.sum(gt_match) / float(n_gt) # Compute F measure if precision + recall == 0: F = 0 else: F = 2 * precision * recall / (precision + recall) return F def _seg2bmap(seg, width=None, height=None): """ From a segmentation, compute a binary boundary map with 1 pixel wide boundaries. The boundary pixels are offset by 1/2 pixel towards the origin from the actual segment boundary. Arguments: seg : Segments labeled from 1..k. width : Width of desired bmap <= seg.shape[1] height : Height of desired bmap <= seg.shape[0] Returns: bmap (ndarray): Binary boundary map. David Martin <[email protected]> January 2003 """ seg = seg.astype(np.bool) seg[seg > 0] = 1 assert np.atleast_3d(seg).shape[2] == 1 width = seg.shape[1] if width is None else width height = seg.shape[0] if height is None else height h, w = seg.shape[:2] ar1 = float(width) / float(height) ar2 = float(w) / float(h) assert not ( width > w | height > h | abs(ar1 - ar2) > 0.01 ), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height) e = np.zeros_like(seg) s = np.zeros_like(seg) se = np.zeros_like(seg) e[:, :-1] = seg[:, 1:] s[:-1, :] = seg[1:, :] se[:-1, :-1] = seg[1:, 1:] b = seg ^ e | seg ^ s | seg ^ se b[-1, :] = seg[-1, :] ^ e[-1, :] b[:, -1] = seg[:, -1] ^ s[:, -1] b[-1, -1] = 0 if w == width and h == height: bmap = b else: bmap = np.zeros((height, width)) for x in range(w): for y in range(h): if b[y, x]: j = 1 + math.floor((y - 1) + height / h) i = 1 + math.floor((x - 1) + width / h) bmap[j, i] = 1 return bmap if __name__ == '__main__': from davis2017.davis import DAVIS from davis2017.results import Results dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics') results = Results(root_dir='examples/osvos') # Test timing F measure for seq in dataset.get_sequences(): all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True) all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1] all_res_masks = results.read_masks(seq, all_masks_id) f_metrics_res = np.zeros(all_gt_masks.shape[:2]) for ii in range(all_gt_masks.shape[0]): f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...]) # Run using to profile code: python -m cProfile -o f_measure.prof metrics.py # snakeviz f_measure.prof <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import logging import re from typing import Dict, List import torch from tabulate import tabulate def convert_basic_c2_names(original_keys): """ Apply some basic name conversion to names in C2 weights. It only deals with typical backbone models. Args: original_keys (list[str]): Returns: list[str]: The same number of strings matching those in original_keys. """ layer_keys = copy.deepcopy(original_keys) layer_keys = [ {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys ] # some hard-coded mappings layer_keys = [k.replace("_", ".") for k in layer_keys] layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] # Uniform both bn and gn names to "norm" layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] # stem layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] # to avoid mis-matching with "conv1" in other components (e.g. detection head) layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] # blocks layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] # DensePose substitutions layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] return layer_keys def convert_c2_detectron_names(weights): """ Map Caffe2 Detectron weight names to Detectron2 names. Args: weights (dict): name -> tensor Returns: dict: detectron2 names -> tensor dict: detectron2 names -> C2 names """ logger = logging.getLogger(__name__) logger.info("Renaming Caffe2 weights ......") original_keys = sorted(weights.keys()) layer_keys = copy.deepcopy(original_keys) layer_keys = convert_basic_c2_names(layer_keys) # -------------------------------------------------------------------------- # RPN hidden representation conv # -------------------------------------------------------------------------- # FPN case # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then # shared for all other levels, hence the appearance of "fpn2" layer_keys = [ k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys ] # Non-FPN case layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] # -------------------------------------------------------------------------- # RPN box transformation conv # -------------------------------------------------------------------------- # FPN case (see note above about "fpn2") layer_keys = [ k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # Non-FPN case layer_keys = [ k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # -------------------------------------------------------------------------- # Fast R-CNN box head # -------------------------------------------------------------------------- layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] # -------------------------------------------------------------------------- # FPN lateral and output convolutions # -------------------------------------------------------------------------- def fpn_map(name): """ Look for keys with the following patterns: 1) Starts with "fpn.inner." Example: "fpn.inner.res2.2.sum.lateral.weight" Meaning: These are lateral pathway convolutions 2) Starts with "fpn.res" Example: "fpn.res2.2.sum.weight" Meaning: These are FPN output convolutions """ splits = name.split(".") norm = ".norm" if "norm" in splits else "" if name.startswith("fpn.inner."): # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] stage = int(splits[2][len("res") :]) return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) elif name.startswith("fpn.res"): # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] stage = int(splits[1][len("res") :]) return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) return name layer_keys = [fpn_map(k) for k in layer_keys] # -------------------------------------------------------------------------- # Mask R-CNN mask head # -------------------------------------------------------------------------- # roi_heads.StandardROIHeads case layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] # roi_heads.Res5ROIHeads case layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] # -------------------------------------------------------------------------- # Keypoint R-CNN head # -------------------------------------------------------------------------- # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] layer_keys = [ k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys ] layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] # -------------------------------------------------------------------------- # Done with replacements # -------------------------------------------------------------------------- assert len(set(layer_keys)) == len(layer_keys) assert len(original_keys) == len(layer_keys) new_weights = {} new_keys_to_original_keys = {} for orig, renamed in zip(original_keys, layer_keys): new_keys_to_original_keys[renamed] = orig if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): # remove the meaningless prediction weight for background class new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 new_weights[renamed] = weights[orig][new_start_idx:] logger.info( "Remove prediction weight for background class in {}. The shape changes from " "{} to {}.".format( renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) ) ) elif renamed.startswith("cls_score."): # move weights of bg class from original index 0 to last index logger.info( "Move classification weights for background class in {} from index 0 to " "index {}.".format(renamed, weights[orig].shape[0] - 1) ) new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) else: new_weights[renamed] = weights[orig] return new_weights, new_keys_to_original_keys # Note the current matching is not symmetric. # it assumes model_state_dict will have longer names. def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): """ Match names between the two state-dict, and returns a new chkpt_state_dict with names converted to match model_state_dict with heuristics. The returned dict can be later loaded with fvcore checkpointer. If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 model and will be renamed at first. Strategy: suppose that the models that we will create will have prefixes appended to each of its keys, for example due to an extra level of nesting that the original pre-trained weights from ImageNet won't contain. For example, model.state_dict() might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains res2.conv1.weight. We thus want to match both parameters together. For that, we look for each model weight, look among all loaded keys if there is one that is a suffix of the current weight name, and use it if that's the case. If multiple matches exist, take the one with longest size of the corresponding name. For example, for the same model as before, the pretrained weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, we want to match backbone[0].body.conv1.weight to conv1.weight, and backbone[0].body.res2.conv1.weight to res2.conv1.weight. """ model_keys = sorted(model_state_dict.keys()) if c2_conversion: ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) # original_keys: the name in the original dict (before renaming) else: original_keys = {x: x for x in ckpt_state_dict.keys()} ckpt_keys = sorted(ckpt_state_dict.keys()) def match(a, b): # Matched ckpt_key should be a complete (starts with '.') suffix. # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, # but matches whatever_conv1 or mesh_head.whatever_conv1. return a == b or a.endswith("." + b) # get a matrix of string matches, where each (i, j) entry correspond to the size of the # ckpt_k<fim_suffix>ey string, if it matches match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) # use the matched one with longest size in case of multiple matches max_match_size, idxs = match_matrix.max(1) # remove indices that correspond to no-match idxs[max_match_size == 0] = -1 logger = logging.getLogger(__name__) # matched_pairs (matched checkpoint key --> matched model key) matched_keys = {} result_state_dict = {} for idx_model, idx_ckpt in enumerate(idxs.tolist()): if idx_ckpt == -1: continue key_model = model_keys[idx_model] key_ckpt = ckpt_keys[idx_ckpt] value_ckpt = ckpt_state_dict[key_ckpt] shape_in_model = model_state_dict[key_model].shape if shape_in_model != value_ckpt.shape: logger.warning( "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( key_ckpt, value_ckpt.shape, key_model, shape_in_model ) ) logger.warning( "{} will not be loaded. Please double check and see if this is desired.".format( key_ckpt ) ) continue assert key_model not in result_state_dict result_state_dict[key_model] = value_ckpt if key_ckpt in matched_keys: # already added to matched_keys logger.error( "Ambiguity found for {} in checkpoint!" "It matches at least two keys in the model ({} and {}).".format( key_ckpt, key_model, matched_keys[key_ckpt] ) ) raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") matched_keys[key_ckpt] = key_model # logging: matched_model_keys = sorted(matched_keys.values()) if len(matched_model_keys) == 0: logger.warning("No weights in checkpoint matched with model.") return ckpt_state_dict common_prefix = _longest_common_prefix(matched_model_keys) rev_matched_keys = {v: k for k, v in matched_keys.items()} original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys} model_key_groups = _group_keys_by_module(matched_model_keys, original_keys) table = [] memo = set() for key_model in matched_model_keys: if key_model in memo: continue if key_model in model_key_groups: group = model_key_groups[key_model] memo |= set(group) shapes = [tuple(model_state_dict[k].shape) for k in group] table.append( ( _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*", _group_str([original_keys[k] for k in group]), " ".join([str(x).replace(" ", "") for x in shapes]), ) ) else: key_checkpoint = original_keys[key_model] shape = str(tuple(model_state_dict[key_model].shape)) table.append((key_model[len(common_prefix) :], key_checkpoint, shape)) table_str = tabulate( table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"] ) logger.info( "Following weights matched with " + (f"submodule {common_prefix[:-1]}" if common_prefix else "model") + ":\n" + table_str ) unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())] for k in unmatched_ckpt_keys: result_state_dict[k] = ckpt_state_dict[k] return result_state_dict def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]): """ Params in the same submodule are grouped together. Args: keys: names of all parameters original_names: mapping from parameter name to their name in the checkpoint Returns: dict[name -> all other names in the same group] """ def _submodule_name(key): pos = key.rfind(".") if pos < 0: return None prefix = key[: pos + 1] return prefix all_submodules = [_submodule_name(k) for k in keys] all_submodules = [x for x in all_submodules if x] all_submodules = sorted(all_submodules, key=len) ret = {} for prefix in all_submodules: group = [k for k in keys if k.startswith(prefix)] if len(group) <= 1: continue original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group]) if len(original_name_lcp) == 0: # don't group weights if original names don't share prefix continue for k in group: if k in ret: continue ret[k] = group return ret def _longest_common_prefix(names: List[str]) -> str: """ ["abc.zfg", "abc.zef"] -> "abc." """ names = [n.split(".") for n in names] m1, m2 = min(names), max(names) ret = [a for a, b in zip(m1, m2) if a == b] ret = ".".join(ret) + "." if len(ret) else "" return ret def _longest_common_prefix_str(names: List[str]) -> str: m1, m2 = min(names), max(names) lcp = [a for a, b in zip(m1, m2) if a == b] lcp = "".join(lcp) return lcp def _group_str(names: List[str]) -> str: """ Turn "common1", "common2", "common3" into "common{1,2,3}" """ lcp = _longest_common_prefix_str(names) rest = [x[len(lcp) :] for x in names] rest = "{" + ",".join(rest) + "}" ret = lcp + rest # add some simplification for BN specifically ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*") ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*") return ret <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/iou_weighted_hungarian_bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. from typing import List import numpy as np from .base_tracker import TRACKER_HEADS_REGISTRY from .vanilla_hungarian_bbox_iou_tracker import VanillaHungarianBBoxIOUTracker from detectron2.config import configurable, CfgNode as CfgNode_ @TRACKER_HEADS_REGISTRY.register() class IOUWeightedHungarianBBoxIOUTracker(VanillaHungarianBBoxIOUTracker): """ A tracker using IoU as weight in Hungarian algorithm, also known as Munkres or Kuhn-Munkres algorithm """ @configurable def __init__( self, *, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, track_iou_threshold: float = 0.5, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video track_iou_threshold: iou threshold, below this number a bbox pair is removed from tracking """ super().__init__( video_height=video_height, video_width=video_width, max_num_instances=max_num_instances, max_lost_frame_count=max_lost_frame_count, min_box_rel_dim=min_box_rel_dim, min_instance_period=min_instance_period, track_iou_threshold=track_iou_threshold ) @classmethod def from_config(cls, cfg: CfgNode_): """ Old style initialization using CfgNode Args: cfg: D2 CfgNode, config file Return: dictionary storing arguments for __init__ method """ assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) return { "_target_": "detectron2.tracking.iou_weighted_hungarian_bbox_iou_tracker.IOUWeightedHungarianBBoxIOUTracker", # noqa "video_height": video_height, "video_width": video_width, "max_num_instances": max_num_instances, "max_lost_frame_count": max_lost_frame_count, "min_box_rel_dim": min_box_rel_dim, "min_instance_period": min_instance_period, "track_iou_threshold": track_iou_threshold } def assign_cost_matrix_values(self, cost_matrix: np.ndarray, bbox_pairs: List) -> np.ndarray: """ Based on IoU for each pair of bbox, assign the associated value in cost matrix Args: cost_matrix: np.ndarray, initialized 2D array with target dimensions bbox_pairs: list of bbox pair, in each pair, iou value is stored Return: np.ndarray, cost_matrix with assigned values """ for pair in bbox_pairs: # assign (-1 * IoU) for above threshold pairs, algorithms will minimize <fim_suffix>cost cost_matrix[pair["idx"]][pair["prev_idx"]] = -1 * pair["IoU"] return cost_matrix <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>import math import numpy as np import cv2 def db_eval_iou(annotation, segmentation, void_pixels=None): """ Compute region similarity as the Jaccard Index. Arguments: annotation (ndarray): binary annotation map. segmentation (ndarray): binary segmentation map. void_pixels (ndarray): optional mask with void pixels Return: jaccard (float): region similarity """ assert annotation.shape == segmentation.shape, \ f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.' annotation = annotation.astype(np.bool) segmentation = segmentation.astype(np.bool) if void_pixels is not None: assert annotation.shape == void_pixels.shape, \ f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.' void_pixels = void_pixels.astype(np.bool) else: void_pixels = np.zeros_like(segmentation) # Inters<fim_suffix>ection between all sets inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1)) union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1)) j = inters / union if j.ndim == 0: j = 1 if np.isclose(union, 0) else j else: j[np.isclose(union, 0)] = 1 return j def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008): assert annotation.shape == segmentation.shape if void_pixels is not None: assert annotation.shape == void_pixels.shape if annotation.ndim == 3: n_frames = annotation.shape[0] f_res = np.zeros(n_frames) for frame_id in range(n_frames): void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ] f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th) elif annotation.ndim == 2: f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th) else: raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions') return f_res def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008): """ Compute mean,recall and decay from per-frame evaluation. Calculates precision/recall for boundaries between foreground_mask and gt_mask using morphological operators to speed it up. Arguments: foreground_mask (ndarray): binary segmentation image. gt_mask (ndarray): binary annotated image. void_pixels (ndarray): optional mask with void pixels Returns: F (float): boundaries F-measure """ assert np.atleast_3d(foreground_mask).shape[2] == 1 if void_pixels is not None: void_pixels = void_pixels.astype(np.bool) else: void_pixels = np.zeros_like(foreground_mask).astype(np.bool) bound_pix = bound_th if bound_th >= 1 else \ np.ceil(bound_th * np.linalg.norm(foreground_mask.shape)) # Get the pixel boundaries of both masks fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels)) gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels)) from skimage.morphology import disk # fg_dil = binary_dilation(fg_boundary, disk(bound_pix)) fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8)) # gt_dil = binary_dilation(gt_boundary, disk(bound_pix)) gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8)) # Get the intersection gt_match = gt_boundary * fg_dil fg_match = fg_boundary * gt_dil # Area of the intersection n_fg = np.sum(fg_boundary) n_gt = np.sum(gt_boundary) # % Compute precision and recall if n_fg == 0 and n_gt > 0: precision = 1 recall = 0 elif n_fg > 0 and n_gt == 0: precision = 0 recall = 1 elif n_fg == 0 and n_gt == 0: precision = 1 recall = 1 else: precision = np.sum(fg_match) / float(n_fg) recall = np.sum(gt_match) / float(n_gt) # Compute F measure if precision + recall == 0: F = 0 else: F = 2 * precision * recall / (precision + recall) return F def _seg2bmap(seg, width=None, height=None): """ From a segmentation, compute a binary boundary map with 1 pixel wide boundaries. The boundary pixels are offset by 1/2 pixel towards the origin from the actual segment boundary. Arguments: seg : Segments labeled from 1..k. width : Width of desired bmap <= seg.shape[1] height : Height of desired bmap <= seg.shape[0] Returns: bmap (ndarray): Binary boundary map. David Martin <[email protected]> January 2003 """ seg = seg.astype(np.bool) seg[seg > 0] = 1 assert np.atleast_3d(seg).shape[2] == 1 width = seg.shape[1] if width is None else width height = seg.shape[0] if height is None else height h, w = seg.shape[:2] ar1 = float(width) / float(height) ar2 = float(w) / float(h) assert not ( width > w | height > h | abs(ar1 - ar2) > 0.01 ), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height) e = np.zeros_like(seg) s = np.zeros_like(seg) se = np.zeros_like(seg) e[:, :-1] = seg[:, 1:] s[:-1, :] = seg[1:, :] se[:-1, :-1] = seg[1:, 1:] b = seg ^ e | seg ^ s | seg ^ se b[-1, :] = seg[-1, :] ^ e[-1, :] b[:, -1] = seg[:, -1] ^ s[:, -1] b[-1, -1] = 0 if w == width and h == height: bmap = b else: bmap = np.zeros((height, width)) for x in range(w): for y in range(h): if b[y, x]: j = 1 + math.floor((y - 1) + height / h) i = 1 + math.floor((x - 1) + width / h) bmap[j, i] = 1 return bmap if __name__ == '__main__': from davis2017.davis import DAVIS from davis2017.results import Results dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics') results = Results(root_dir='examples/osvos') # Test timing F measure for seq in dataset.get_sequences(): all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True) all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1] all_res_masks = results.read_masks(seq, all_masks_id) f_metrics_res = np.zeros(all_gt_masks.shape[:2]) for ii in range(all_gt_masks.shape[0]): f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...]) # Run using to profile code: python -m cProfile -o f_measure.prof metrics.py # snakeviz f_measure.prof <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import logging import re from typing import Dict, List import torch from tabulate import tabulate def convert_basic_c2_names(original_keys): """ Apply some basic name conversion to names in C2 weights. It only deals with typical backbone models. Args: original_keys (list[str]): Returns: list[str]: The same number of strings matching those in original_keys. """ layer_keys = copy.deepcopy(original_keys) layer_keys = [ {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys ] # some hard-coded mappings layer_keys = [k.replace("_", ".") for k in layer_keys] layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] # Uniform both bn and gn names to "norm" layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] # stem layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] # to avoid mis-matching with "conv1" in other components (e.g. detection head) layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] # blocks layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] # DensePose substitutions layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] return layer_keys def convert_c2_detectron_names(weights): """ Map Caffe2 Detectron weight names to Detectron2 names. Args: weights (dict): name -> tensor Returns: dict: detectron2 names -> tensor dict: detectron2 names -> C2 names """ logger = logging.getLogger(__name__) logger.info("Renaming Caffe2 weights ......") original_keys = sorted(weights.keys()) layer_keys = copy.deepcopy(original_keys) layer_keys = convert_basic_c2_names(layer_keys) # -------------------------------------------------------------------------- # RPN hidden representation conv # -------------------------------------------------------------------------- # FPN case # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then # shared for all other levels, hence the appearance of "fpn2" layer_keys = [ k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys ] # Non-FPN case layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] # -------------------------------------------------------------------------- # RPN box transformation conv # -------------------------------------------------------------------------- # FPN case (see note above about "fpn2") layer_keys = [ k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # Non-FPN case layer_keys = [ k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # -------------------------------------------------------------------------- # Fast R-CNN box head # -------------------------------------------------------------------------- layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] # -------------------------------------------------------------------------- # FPN lateral and output convolutions # -------------------------------------------------------------------------- def fpn_map(name): """ Look for keys with the following patterns: 1) Starts with "fpn.inner." Example: "fpn.inner.res2.2.sum.lateral.weight" Meaning: These are lateral pathway convolutions 2) Starts with "fpn.res" Example: "fpn.res2.2.sum.weight" Meaning: These are FPN output convolutions """ splits = name.split(".") norm = ".norm" if "norm" in splits else "" if name.startswith("fpn.inner."): # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] stage = int(splits[2][len("res") :]) return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) elif name.startswith("fpn.res"): # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] stage = int(splits[1][len("res") :]) return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) return name layer_keys = [fpn_map(k) for k in layer_keys] # -------------------------------------------------------------------------- # Mask R-CNN mask head # -------------------------------------------------------------------------- # roi_heads.StandardROIHeads case layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] # roi_heads.Res5ROIHeads case layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] # -------------------------------------------------------------------------- # Keypoint R-CNN head # -------------------------------------------------------------------------- # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] layer_keys = [ k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys ] layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] # -------------------------------------------------------------------------- # Done with replacements # -------------------------------------------------------------------------- assert len(set(layer_keys)) == len(layer_keys) assert len(original_keys) == len(layer_keys) new_weights = {} new_keys_to_original_keys = {} for orig, renamed in zip(original_keys, layer_keys): new_keys_to_original_keys[renamed] = orig if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): # remove the meaningless prediction weigh<fim_suffix>t for background class new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 new_weights[renamed] = weights[orig][new_start_idx:] logger.info( "Remove prediction weight for background class in {}. The shape changes from " "{} to {}.".format( renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) ) ) elif renamed.startswith("cls_score."): # move weights of bg class from original index 0 to last index logger.info( "Move classification weights for background class in {} from index 0 to " "index {}.".format(renamed, weights[orig].shape[0] - 1) ) new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) else: new_weights[renamed] = weights[orig] return new_weights, new_keys_to_original_keys # Note the current matching is not symmetric. # it assumes model_state_dict will have longer names. def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): """ Match names between the two state-dict, and returns a new chkpt_state_dict with names converted to match model_state_dict with heuristics. The returned dict can be later loaded with fvcore checkpointer. If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 model and will be renamed at first. Strategy: suppose that the models that we will create will have prefixes appended to each of its keys, for example due to an extra level of nesting that the original pre-trained weights from ImageNet won't contain. For example, model.state_dict() might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains res2.conv1.weight. We thus want to match both parameters together. For that, we look for each model weight, look among all loaded keys if there is one that is a suffix of the current weight name, and use it if that's the case. If multiple matches exist, take the one with longest size of the corresponding name. For example, for the same model as before, the pretrained weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, we want to match backbone[0].body.conv1.weight to conv1.weight, and backbone[0].body.res2.conv1.weight to res2.conv1.weight. """ model_keys = sorted(model_state_dict.keys()) if c2_conversion: ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) # original_keys: the name in the original dict (before renaming) else: original_keys = {x: x for x in ckpt_state_dict.keys()} ckpt_keys = sorted(ckpt_state_dict.keys()) def match(a, b): # Matched ckpt_key should be a complete (starts with '.') suffix. # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, # but matches whatever_conv1 or mesh_head.whatever_conv1. return a == b or a.endswith("." + b) # get a matrix of string matches, where each (i, j) entry correspond to the size of the # ckpt_key string, if it matches match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) # use the matched one with longest size in case of multiple matches max_match_size, idxs = match_matrix.max(1) # remove indices that correspond to no-match idxs[max_match_size == 0] = -1 logger = logging.getLogger(__name__) # matched_pairs (matched checkpoint key --> matched model key) matched_keys = {} result_state_dict = {} for idx_model, idx_ckpt in enumerate(idxs.tolist()): if idx_ckpt == -1: continue key_model = model_keys[idx_model] key_ckpt = ckpt_keys[idx_ckpt] value_ckpt = ckpt_state_dict[key_ckpt] shape_in_model = model_state_dict[key_model].shape if shape_in_model != value_ckpt.shape: logger.warning( "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( key_ckpt, value_ckpt.shape, key_model, shape_in_model ) ) logger.warning( "{} will not be loaded. Please double check and see if this is desired.".format( key_ckpt ) ) continue assert key_model not in result_state_dict result_state_dict[key_model] = value_ckpt if key_ckpt in matched_keys: # already added to matched_keys logger.error( "Ambiguity found for {} in checkpoint!" "It matches at least two keys in the model ({} and {}).".format( key_ckpt, key_model, matched_keys[key_ckpt] ) ) raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") matched_keys[key_ckpt] = key_model # logging: matched_model_keys = sorted(matched_keys.values()) if len(matched_model_keys) == 0: logger.warning("No weights in checkpoint matched with model.") return ckpt_state_dict common_prefix = _longest_common_prefix(matched_model_keys) rev_matched_keys = {v: k for k, v in matched_keys.items()} original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys} model_key_groups = _group_keys_by_module(matched_model_keys, original_keys) table = [] memo = set() for key_model in matched_model_keys: if key_model in memo: continue if key_model in model_key_groups: group = model_key_groups[key_model] memo |= set(group) shapes = [tuple(model_state_dict[k].shape) for k in group] table.append( ( _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*", _group_str([original_keys[k] for k in group]), " ".join([str(x).replace(" ", "") for x in shapes]), ) ) else: key_checkpoint = original_keys[key_model] shape = str(tuple(model_state_dict[key_model].shape)) table.append((key_model[len(common_prefix) :], key_checkpoint, shape)) table_str = tabulate( table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"] ) logger.info( "Following weights matched with " + (f"submodule {common_prefix[:-1]}" if common_prefix else "model") + ":\n" + table_str ) unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())] for k in unmatched_ckpt_keys: result_state_dict[k] = ckpt_state_dict[k] return result_state_dict def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]): """ Params in the same submodule are grouped together. Args: keys: names of all parameters original_names: mapping from parameter name to their name in the checkpoint Returns: dict[name -> all other names in the same group] """ def _submodule_name(key): pos = key.rfind(".") if pos < 0: return None prefix = key[: pos + 1] return prefix all_submodules = [_submodule_name(k) for k in keys] all_submodules = [x for x in all_submodules if x] all_submodules = sorted(all_submodules, key=len) ret = {} for prefix in all_submodules: group = [k for k in keys if k.startswith(prefix)] if len(group) <= 1: continue original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group]) if len(original_name_lcp) == 0: # don't group weights if original names don't share prefix continue for k in group: if k in ret: continue ret[k] = group return ret def _longest_common_prefix(names: List[str]) -> str: """ ["abc.zfg", "abc.zef"] -> "abc." """ names = [n.split(".") for n in names] m1, m2 = min(names), max(names) ret = [a for a, b in zip(m1, m2) if a == b] ret = ".".join(ret) + "." if len(ret) else "" return ret def _longest_common_prefix_str(names: List[str]) -> str: m1, m2 = min(names), max(names) lcp = [a for a, b in zip(m1, m2) if a == b] lcp = "".join(lcp) return lcp def _group_str(names: List[str]) -> str: """ Turn "common1", "common2", "common3" into "common{1,2,3}" """ lcp = _longest_common_prefix_str(names) rest = [x[len(lcp) :] for x in names] rest = "{" + ",".join(rest) + "}" ret = lcp + rest # add some simplification for BN specifically ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*") ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*") return ret <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import dataclasses import logging from collections import abc from typing import Any from detectron2.utils.registry import _convert_target_to_string, locate __all__ = ["dump_dataclass", "instantiate"] def dump_dataclass(obj: Any): """ Dump a dataclass recursively into a dict that can be later instantiated. Args: obj: a dataclass object Returns: dict """ assert dataclasses.is_dataclass(obj) and not isinstance( obj, type ), "dump_dataclass() requires an instance of a dataclass." ret = {"_target_": _convert_target_to_string(type(obj))} for f in dataclasses.fields(obj): v = getattr(obj, f.name) if dataclasses.is_dataclass(v): v = dump_dataclass(v) if isinstance(v, (list, tuple)): v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v] ret[f.name] = v return ret def instantiate(cfg): """ Recursively instantiate objects defined in dictionaries by "_target_" and arguments. Args: cfg: a dict-like object with "_target_" that defines the caller, and other keys that define the arguments Returns: object instantiated by cfg """ from omegaconf import ListConfig if isinstance(cfg, ListConfig): lst = [instantiate(x) for x in cfg] return ListConfig(lst, flags={"allow_objects": True}) if isinstance(cfg, list): # Specialize for list, because many classes take # list[objects] as arguments, such as ResNet, DatasetMapper return [instantiate(x) for x in cfg] if isinstance(cfg, abc.Mapping) and "_target_" in cfg: # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all, # but faster: https://github.com/facebookresearch/hydra/issues/1200 cfg = {k: instantiate(v) for k, v in cfg.items()} cls = cfg.pop("_target_") cls = instantiate(cls) if isinstance(cls, str): cls_name = cls cls = locate(cls_name) assert cls is not None, cls_name else: try: cls_name = cls.__module__ + "." + cls.__qualname__ except Exception: # target could be anything, so the above could fail cls_name = str(cls) assert callable(cls), f"_target_ {cls} does not define a callable object" try: return cls(**cfg) except TypeError: logger = logging.getLogger(__name__) logger.error(f"Error when instantiating {cls_name}!") raise return cfg # return as-is if <fim_suffix>don't know what to do <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import logging import re from typing import Dict, List import torch from tabulate import tabulate def convert_basic_c2_names(original_keys): """ Apply some basic name conversion to names in C2 weights. It only deals with typical backbone models. Args: original_keys (list[str]): Returns: list[str]: The same number of strings matching those in original_keys. """ layer_keys = copy.deepcopy(original_keys) layer_keys = [ {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys ] # some hard-coded mappings layer_keys = [k.replace("_", ".") for k in layer_keys] layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] # Uniform both bn and gn names to "norm" layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] # stem layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] # to avoid mis-matching with "conv1" in other components (e.g. detection head) layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] # blocks layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] # DensePose substitutions layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] return layer_keys def convert_c2_detectron_names(weights): """ Map Caffe2 Detectron weight names to Detectron2 names. Args: weights (dict): name -> tensor Returns: dict: detectron2 names -> tensor dict: detectron2 names -> C2 names """ logger = logging.getLogger(__name__) logger.info("Renaming Caffe2 weights ......") original_keys = sorted(weights.keys()) layer_keys = copy.deepcopy(original_keys) layer_keys = convert_basic_c2_names(layer_keys) # -------------------------------------------------------------------------- # RPN hidden representation conv # -------------------------------------------------------------------------- # FPN case # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then # shared for all other levels, hence the appearance of "fpn2" layer_keys = [ k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys ] # Non-FPN case layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] # -------------------------------------------------------------------------- # RPN box transformation conv # -------------------------------------------------------------------------- # FPN case (see note above about "fpn2") layer_keys = [ k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # Non-FPN case layer_keys = [ k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # -------------------------------------------------------------------------- # Fast R-CNN box head # -------------------------------------------------------------------------- layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] # -------------------------------------------------------------------------- # FPN lateral and output convolutions # -------------------------------------------------------------------------- def fpn_map(name): """ Look for keys with the following patterns: 1) Starts with "fpn.inner." Example: "fpn.inner.res2.2.sum.lateral.weight" Meaning: These are lateral pathway convolutions 2) Starts with "fpn.res" Example: "fpn.res2.2.sum.weight" Meaning: These are FPN output convolutions """ splits = name.split(".") norm = ".norm" if "norm" in splits else "" if name.startswith("fpn.inner."): # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] stage = int(splits[2][len("res") :]) return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) elif name.startswith("fpn.res"): # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] stage = int(splits[1][len("res") :]) return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) return name layer_keys = [fpn_map(k) for k in layer_keys] # -------------------------------------------------------------------------- # Mask R-CNN mask head # -------------------------------------------------------------------------- # roi_heads.StandardROIHeads case layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] # roi_heads.Res5ROIHeads case layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] # -------------------------------------------------------------------------- # Keypoint R-CNN head # -------------------------------------------------------------------------- # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] layer_keys = [ k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys ] layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] # ----------------------------<fim_suffix>---------------------------------------------- # Done with replacements # -------------------------------------------------------------------------- assert len(set(layer_keys)) == len(layer_keys) assert len(original_keys) == len(layer_keys) new_weights = {} new_keys_to_original_keys = {} for orig, renamed in zip(original_keys, layer_keys): new_keys_to_original_keys[renamed] = orig if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): # remove the meaningless prediction weight for background class new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 new_weights[renamed] = weights[orig][new_start_idx:] logger.info( "Remove prediction weight for background class in {}. The shape changes from " "{} to {}.".format( renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) ) ) elif renamed.startswith("cls_score."): # move weights of bg class from original index 0 to last index logger.info( "Move classification weights for background class in {} from index 0 to " "index {}.".format(renamed, weights[orig].shape[0] - 1) ) new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) else: new_weights[renamed] = weights[orig] return new_weights, new_keys_to_original_keys # Note the current matching is not symmetric. # it assumes model_state_dict will have longer names. def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): """ Match names between the two state-dict, and returns a new chkpt_state_dict with names converted to match model_state_dict with heuristics. The returned dict can be later loaded with fvcore checkpointer. If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 model and will be renamed at first. Strategy: suppose that the models that we will create will have prefixes appended to each of its keys, for example due to an extra level of nesting that the original pre-trained weights from ImageNet won't contain. For example, model.state_dict() might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains res2.conv1.weight. We thus want to match both parameters together. For that, we look for each model weight, look among all loaded keys if there is one that is a suffix of the current weight name, and use it if that's the case. If multiple matches exist, take the one with longest size of the corresponding name. For example, for the same model as before, the pretrained weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, we want to match backbone[0].body.conv1.weight to conv1.weight, and backbone[0].body.res2.conv1.weight to res2.conv1.weight. """ model_keys = sorted(model_state_dict.keys()) if c2_conversion: ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) # original_keys: the name in the original dict (before renaming) else: original_keys = {x: x for x in ckpt_state_dict.keys()} ckpt_keys = sorted(ckpt_state_dict.keys()) def match(a, b): # Matched ckpt_key should be a complete (starts with '.') suffix. # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, # but matches whatever_conv1 or mesh_head.whatever_conv1. return a == b or a.endswith("." + b) # get a matrix of string matches, where each (i, j) entry correspond to the size of the # ckpt_key string, if it matches match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) # use the matched one with longest size in case of multiple matches max_match_size, idxs = match_matrix.max(1) # remove indices that correspond to no-match idxs[max_match_size == 0] = -1 logger = logging.getLogger(__name__) # matched_pairs (matched checkpoint key --> matched model key) matched_keys = {} result_state_dict = {} for idx_model, idx_ckpt in enumerate(idxs.tolist()): if idx_ckpt == -1: continue key_model = model_keys[idx_model] key_ckpt = ckpt_keys[idx_ckpt] value_ckpt = ckpt_state_dict[key_ckpt] shape_in_model = model_state_dict[key_model].shape if shape_in_model != value_ckpt.shape: logger.warning( "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( key_ckpt, value_ckpt.shape, key_model, shape_in_model ) ) logger.warning( "{} will not be loaded. Please double check and see if this is desired.".format( key_ckpt ) ) continue assert key_model not in result_state_dict result_state_dict[key_model] = value_ckpt if key_ckpt in matched_keys: # already added to matched_keys logger.error( "Ambiguity found for {} in checkpoint!" "It matches at least two keys in the model ({} and {}).".format( key_ckpt, key_model, matched_keys[key_ckpt] ) ) raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") matched_keys[key_ckpt] = key_model # logging: matched_model_keys = sorted(matched_keys.values()) if len(matched_model_keys) == 0: logger.warning("No weights in checkpoint matched with model.") return ckpt_state_dict common_prefix = _longest_common_prefix(matched_model_keys) rev_matched_keys = {v: k for k, v in matched_keys.items()} original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys} model_key_groups = _group_keys_by_module(matched_model_keys, original_keys) table = [] memo = set() for key_model in matched_model_keys: if key_model in memo: continue if key_model in model_key_groups: group = model_key_groups[key_model] memo |= set(group) shapes = [tuple(model_state_dict[k].shape) for k in group] table.append( ( _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*", _group_str([original_keys[k] for k in group]), " ".join([str(x).replace(" ", "") for x in shapes]), ) ) else: key_checkpoint = original_keys[key_model] shape = str(tuple(model_state_dict[key_model].shape)) table.append((key_model[len(common_prefix) :], key_checkpoint, shape)) table_str = tabulate( table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"] ) logger.info( "Following weights matched with " + (f"submodule {common_prefix[:-1]}" if common_prefix else "model") + ":\n" + table_str ) unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())] for k in unmatched_ckpt_keys: result_state_dict[k] = ckpt_state_dict[k] return result_state_dict def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]): """ Params in the same submodule are grouped together. Args: keys: names of all parameters original_names: mapping from parameter name to their name in the checkpoint Returns: dict[name -> all other names in the same group] """ def _submodule_name(key): pos = key.rfind(".") if pos < 0: return None prefix = key[: pos + 1] return prefix all_submodules = [_submodule_name(k) for k in keys] all_submodules = [x for x in all_submodules if x] all_submodules = sorted(all_submodules, key=len) ret = {} for prefix in all_submodules: group = [k for k in keys if k.startswith(prefix)] if len(group) <= 1: continue original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group]) if len(original_name_lcp) == 0: # don't group weights if original names don't share prefix continue for k in group: if k in ret: continue ret[k] = group return ret def _longest_common_prefix(names: List[str]) -> str: """ ["abc.zfg", "abc.zef"] -> "abc." """ names = [n.split(".") for n in names] m1, m2 = min(names), max(names) ret = [a for a, b in zip(m1, m2) if a == b] ret = ".".join(ret) + "." if len(ret) else "" return ret def _longest_common_prefix_str(names: List[str]) -> str: m1, m2 = min(names), max(names) lcp = [a for a, b in zip(m1, m2) if a == b] lcp = "".join(lcp) return lcp def _group_str(names: List[str]) -> str: """ Turn "common1", "common2", "common3" into "common{1,2,3}" """ lcp = _longest_common_prefix_str(names) rest = [x[len(lcp) :] for x in names] rest = "{" + ",".join(rest) + "}" ret = lcp + rest # add some simplification for BN specifically ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*") ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*") return ret <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import itertools import numpy as np from typing import Any, Iterator, List, Union import pycocotools.mask as mask_util import torch from torch import device from detectron2.layers.roi_align import ROIAlign from detectron2.utils.memory import retry_if_cuda_oom from .boxes import Boxes def polygon_area(x, y): # Using the shoelace formula # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: """ Args: polygons (list[ndarray]): each array has shape (Nx2,) height, width (int) Returns: ndarray: a bool mask of shape (height, width) """ if len(polygons) == 0: # COCOAPI does not support empty polygons return np.zeros((height, width)).astype(np.bool) rles = mask_util.frPyObjects(polygons, height, width) rle = mask_util.merge(rles) return mask_util.decode(rle).astype(np.bool) def rasterize_polygons_within_box( polygons: List[np.ndarray], box: np.ndarray, mask_size: int ) -> torch.Tensor: """ Rasterize the polygons into a mask image and crop the mask content in the given box. The cropped mask is resized to (mask_size, mask_size). This function is used when generating training targets for mask head in Mask R-CNN. Given original ground-truth masks for an image, new ground-truth mask training targets in the size of `mask_size x mask_size` must be provided for each predicted box. This function will be called to produce such targets. Args: polygons (list[ndarray[float]]): a list of polygons, which represents an instance. box: 4-element numpy array mask_size (int): Returns: Tensor: BoolTensor of shape (mask_size, mask_size) """ # 1. Shift the polygons w.r.t the boxes w, h = box[2] - box[0], box[3] - box[1] polygons = copy.deepcopy(polygons) for p in polygons: p[0::2] = p[0::2] - box[0] p[1::2] = p[1::2] - box[1] # 2. Rescale the polygons to the new box size # max() to avoid division by small number ratio_h = mask_size / max(h, 0.1) ratio_w = mask_size / max(w, 0.1) if ratio_h == ratio_w: for p in polygons: p *= ratio_h else: for p in polygons: p[0::2] *= ratio_w p[1::2] *= ratio_h # 3. Rasterize the polygons with coco api mask = polygons_to_bitmask(polygons, mask_size, mask_size) mask = torch.from_numpy(mask) return mask class BitMasks: """ This class stores the segmentation masks for all objects in one image, in the form of bitmaps. Attributes: tensor: bool Tensor of N,H,W, representing N instances in the image. """ def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): """ Args: tensor: bool Tensor of N,H,W, representing N instances in the image. """ device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device) assert tensor.dim() == 3, tensor.size() self.image_size = tensor.shape[1:] self.tensor = tensor @torch.jit.unused def to(self, *args: Any, **kwargs: Any) -> "BitMasks": return BitMasks(self.tensor.to(*args, **kwargs)) @property def device(self) -> torch.device: return self.tensor.device @torch.jit.unused def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": """ Returns: BitMasks: Create a new :class:`BitMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. 2. `new_masks = masks[2:10]`: return a slice of masks. 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return BitMasks(self.tensor[item].unsqueeze(0)) m = self.tensor[item] assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( item, m.shape ) return BitMasks(m) @torch.jit.unused def __iter__(self) -> torch.Tensor: yield from self.tensor @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s def __len__(self) -> int: return self.tensor.shape[0] def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or non-empty (True). """ return self.tensor.flatten(1).any(dim=1) @staticmethod def from_polygon_masks( polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int ) -> "BitMasks": """ Args: polygon_masks (list[list[ndarray]] or PolygonMasks) height, width (int) """ if isinstance(polygon_masks, PolygonMasks): polygon_masks = polygon_masks.polygons masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] if len(masks): return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) else: return BitMasks(torch.empty(0, height, width, dtype=torch.bool)) @staticmethod def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks": """ Args: roi_masks: height, width (int): """ return roi_masks.to_bitmasks(height, width) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each bitmask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. It has less reconstruction error compared to rasterization with polygons. However we observe no difference in accuracy, but BitMasks requires more memory to store all the masks. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = self.tensor.device batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 bit_masks = self.tensor.to(dtype=torch.float32) rois = rois.to(device=device) output = ( ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) .forward(bit_masks[:, None, :, :], rois) .squeeze(1) ) output = output >= 0.5 return output def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around bitmasks. If a mask is empty, it's bounding box will be all zero. """ boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32) x_any = torch.any(self.tensor, dim=1) y_any = torch.any(self.tensor, dim=2) for idx in range(self.tensor.shape[0]): x = torch.where(x_any[idx, :])[0] y = torch.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: boxes[idx, :] = torch.as_tensor( [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32 ) return Boxes(boxes) @staticmethod def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": """ Concatenates a list of BitMasks into a single BitMasks Arguments: bitmasks_list (list[BitMasks]) Returns: BitMasks: the concatenated BitMasks """ assert isinstance(bitmasks_list, (list, tuple)) assert len(bitmasks_list) > 0 assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0)) return cat_bitmasks class PolygonMasks: """ This class stores the segmentation masks for all objects in one image, in the form of polygons. Attributes: polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. """ def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): """ Arguments: polygons (list[list[np.ndarray]]): The first level of the list correspond to individual instances, the second level to all the polygons that compose the instance, and the third level to the polygon coordinates. The third level array should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). """ if not isinstance(polygons, list): raise ValueError( "Cannot create PolygonMasks: Expect a list of list of polygons per image. " "Got '{}' instead.".format(type(polygons)) ) def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray: # Use float64 for higher precision, because why not? # Always put polygons on CPU (self.to is a no-op) since they # are supposed to be small tensors. # May need to change this assumption if GPU placement becomes useful if isinstance(t, torch.Tensor): t = t.cpu().numpy() return np.asarray(t).astype("float64") def process_polygons( polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] ) -> List[np.ndarray]: if not isinstance(polygons_per_instance, list): raise ValueError( "Cannot create polygons: Expect a list of polygons per instance. " "Got '{}' instead.".format(type(polygons_per_instance)) ) # transform each polygon to a numpy array polygons_per_instance = [_make_array(p) for p in polygons_per_instance] for polygon in polygons_per_instance: if len(polygon) % 2 != 0 or len(polygon) < 6: raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.") return polygons_per_instance self.polygons: List[List[np.ndarray]] = [ process_polygons(polygons_per_instance) for polygons_per_instance in polygons ] def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": return self @property def device(self) -> torch.device: return torch.device("cpu") def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around polygon masks. """ boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) for idx, polygons_per_insta<fim_suffix>nce in enumerate(self.polygons): minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) maxxy = torch.zeros(2, dtype=torch.float32) for polygon in polygons_per_instance: coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) minxy = torch.min(minxy, torch.min(coords, dim=0).values) maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) boxes[idx, :2] = minxy boxes[idx, 2:] = maxxy return Boxes(boxes) def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or not (True). """ keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] return torch.from_numpy(np.asarray(keep, dtype=np.bool)) def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": """ Support indexing over the instances and return a `PolygonMasks` object. `item` can be: 1. An integer. It will return an object with only one instance. 2. A slice. It will return an object with the selected instances. 3. A list[int]. It will return an object with the selected instances, correpsonding to the indices in the list. 4. A vector mask of type BoolTensor, whose length is num_instances. It will return an object with the instances whose mask is nonzero. """ if isinstance(item, int): selected_polygons = [self.polygons[item]] elif isinstance(item, slice): selected_polygons = self.polygons[item] elif isinstance(item, list): selected_polygons = [self.polygons[i] for i in item] elif isinstance(item, torch.Tensor): # Polygons is a list, so we have to move the indices back to CPU. if item.dtype == torch.bool: assert item.dim() == 1, item.shape item = item.nonzero().squeeze(1).cpu().numpy().tolist() elif item.dtype in [torch.int32, torch.int64]: item = item.cpu().numpy().tolist() else: raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) selected_polygons = [self.polygons[i] for i in item] return PolygonMasks(selected_polygons) def __iter__(self) -> Iterator[List[np.ndarray]]: """ Yields: list[ndarray]: the polygons for one instance. Each Tensor is a float64 vector representing a polygon. """ return iter(self.polygons) def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.polygons)) return s def __len__(self) -> int: return len(self.polygons) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each mask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = boxes.device # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise # (several small tensors for representing a single instance mask) boxes = boxes.to(torch.device("cpu")) results = [ rasterize_polygons_within_box(poly, box.numpy(), mask_size) for poly, box in zip(self.polygons, boxes) ] """ poly: list[list[float]], the polygons for one instance box: a tensor of shape (4,) """ if len(results) == 0: return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) return torch.stack(results, dim=0).to(device=device) def area(self): """ Computes area of the mask. Only works with Polygons, using the shoelace formula: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates Returns: Tensor: a vector, area for each instance """ area = [] for polygons_per_instance in self.polygons: area_per_instance = 0 for p in polygons_per_instance: area_per_instance += polygon_area(p[0::2], p[1::2]) area.append(area_per_instance) return torch.tensor(area) @staticmethod def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": """ Concatenates a list of PolygonMasks into a single PolygonMasks Arguments: polymasks_list (list[PolygonMasks]) Returns: PolygonMasks: the concatenated PolygonMasks """ assert isinstance(polymasks_list, (list, tuple)) assert len(polymasks_list) > 0 assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) cat_polymasks = type(polymasks_list[0])( list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) ) return cat_polymasks class ROIMasks: """ Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given, full-image bitmask can be obtained by "pasting" the mask on the region defined by the corresponding ROI box. """ def __init__(self, tensor: torch.Tensor): """ Args: tensor: (N, M, M) mask tensor that defines the mask within each ROI. """ if tensor.dim() != 3: raise ValueError("ROIMasks must take a masks of 3 dimension.") self.tensor = tensor def to(self, device: torch.device) -> "ROIMasks": return ROIMasks(self.tensor.to(device)) @property def device(self) -> device: return self.tensor.device def __len__(self): return self.tensor.shape[0] def __getitem__(self, item) -> "ROIMasks": """ Returns: ROIMasks: Create a new :class:`ROIMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[2:10]`: return a slice of masks. 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ t = self.tensor[item] if t.dim() != 3: raise ValueError( f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!" ) return ROIMasks(t) @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s @torch.jit.unused def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5): """ Args: see documentation of :func:`paste_masks_in_image`. """ from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape if torch.jit.is_tracing(): if isinstance(height, torch.Tensor): paste_func = _paste_masks_tensor_shape else: paste_func = paste_masks_in_image else: paste_func = retry_if_cuda_oom(paste_masks_in_image) bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold) return BitMasks(bitmasks) <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import logging import re from typing import Dict, List import torch from tabulate import tabulate def convert_basic_c2_names(original_keys): """ Apply some basic name conversion to names in C2 weights. It only deals with typical backbone models. Args: original_keys (list[str]): Returns: list[str]: The same number of strings matching those in original_keys. """ layer_keys = copy.deepcopy(original_keys) layer_keys = [ {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys ] # some hard-coded mappings layer_keys = [k.replace("_", ".") for k in layer_keys] layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] # Uniform both bn and gn names to "norm" layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] # stem layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] # to avoid mis-matching with "conv1" in other components (e.g. detection head) layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] # blocks layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] # DensePose substitutions layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] return layer_keys def convert_c2_detectron_names(weights): """ Map Caffe2 Detectron weight names to Detectron2 names. Args: weights (dict): name -> tensor Returns: dict: detectron2 names -> tensor dict: detectron2 names -> C2 names """ logger = logging.getLogger(__name__) logger.info("Renaming Caffe2 weights ......") original_keys = sorted(weights.keys()) layer_keys = copy.deepcopy(original_keys) layer_keys = convert_basic_c2_names(layer_keys) # -------------------------------------------------------------------------- # RPN hidden representation conv # -------------------------------------------------------------------------- # FPN case # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then # shared for all other levels, hence the appearance of "fpn2" layer_keys = [ k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys ] # Non-FPN case layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] # -------------------------------------------------------------------------- # RPN box transformation conv # -------------------------------------------------------------------------- # FPN case (see note above about "fpn2") layer_keys = [ k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # Non-FPN case layer_keys = [ k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # -------------------------------------------------------------------------- # Fast R-CNN box head # -------------------------------------------------------------------------- layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] # -------------------------------------------------------------------------- # FPN lateral and output convolutions # -------------------------------------------------------------------------- def fpn_map(name): """ Look for keys with the following patterns: 1) Starts with "fpn.inner." Example: "fpn.inner.res2.2.sum.lateral.weight" Meaning: These are lateral pathway convolutions 2) Starts with "fpn.res" Example: "fpn.res2.2.sum.weight" Meaning: These are FPN output convolutions """ splits = name.split(".") norm = ".norm" if "norm" in splits else "" if name.startswith("fpn.inner."): # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] stage = int(splits[2][len("res") :]) return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) elif name.startswith("fpn.res"): # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] stage = int(splits[1][len("res") :]) return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) return name layer_keys = [fpn_map(k) for k in layer_keys] # -------------------------------------------------------------------------- # Mask R-CNN mask head # -------------------------------------------------------------------------- # roi_heads.StandardROIHeads case layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] # roi_heads.Res5ROIHeads case layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] # -------------------------------------------------------------------------- # Keypoint R-CNN head # -------------------------------------------------------------------------- # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] layer_keys = [ k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys ] layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] # -------------------------------------------------------------------------- # Done with replacements # -------------------------------------------------------------------------- assert len(set(layer_keys)) == len(layer_keys) assert len(original_keys) == len(layer_keys) new_weights = {} new_keys_to_original_keys = {} for orig, renamed in zip(original_keys, layer_keys): new_keys_to_original_keys[renamed] = orig if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): # remove the meaningless prediction weight for background class new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 new_weights[renamed] = weights[orig][new_start_idx:] logger.info( "Remove prediction weight for background class in {}. The shape changes from " "{} to {}.".format( renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) ) ) elif renamed.startswith("cls_score."): # move weights of bg class from original index 0 to last index logger.info( "Move classification weights for background class in {} from index 0 to " "index {}.".format(renamed, weights[orig].shape[0] - 1) ) new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) else: new_weights[renamed] = weights[orig] return new_weights, new_keys_to_original_keys # Note the current matching is not symmetric. # it assumes model_state_dict will have longer names. def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): """ Match names between the two state-dict, and returns a new chkpt_state_dict with names converted to match model_state_dict with heuristics. The returned dict can be later loaded with fvcore checkpointer. If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 model and will be renamed at first. Strategy: suppose that the models that we will create will have prefixes appended to each of its keys, for example due to an extra level of nesting that the original pre-trained weights from ImageNet won't contain. For example, model.state_dict() might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains res2.conv1.weight. We thus want to match both parameters together. For that, we look for each model weight, look among all loaded keys if there is one that is a suffix of the current weight name, and use it if that's the case. If multiple matches exist, take the one with longest size of the corresponding name. For example, for the same model as before, the pretrained weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, we want to match backbone[0].body.conv1.weight to conv1.weight, and backbone[0].body.res2.conv1.weight to res2.conv1.weight. """ model_keys = sorted(model_state_dict.keys()) if c2_conversion: ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) # original_keys: the name in the original dict (before renaming) else: original_keys = {x: x for x in ckpt_state_dict.keys()} ckpt_keys = sorted(ckpt_state_dict.keys()) def match(a, b): # Matched ckpt_key should be a complete (starts with '.') suffix. # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, # but matches whatever_conv1 or mesh_head.whatever_conv1. return a == b or a.endswith("." + b) # get a matrix of string matches, where each (i, j) entry correspond to the size of the # ckpt_key string, if it matches match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) # use the matched one with longest size in case of multiple matches max_match_size, idxs = match_matrix.max(1) # remove indices that correspond to no-match idxs[max_match_size == 0] = -1 logger = logging.getLogger(__name__) # matched_pairs (matched checkpoint key --> matched model key) matched_keys = {} result_state_dict = {} for idx_model,<fim_suffix> idx_ckpt in enumerate(idxs.tolist()): if idx_ckpt == -1: continue key_model = model_keys[idx_model] key_ckpt = ckpt_keys[idx_ckpt] value_ckpt = ckpt_state_dict[key_ckpt] shape_in_model = model_state_dict[key_model].shape if shape_in_model != value_ckpt.shape: logger.warning( "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( key_ckpt, value_ckpt.shape, key_model, shape_in_model ) ) logger.warning( "{} will not be loaded. Please double check and see if this is desired.".format( key_ckpt ) ) continue assert key_model not in result_state_dict result_state_dict[key_model] = value_ckpt if key_ckpt in matched_keys: # already added to matched_keys logger.error( "Ambiguity found for {} in checkpoint!" "It matches at least two keys in the model ({} and {}).".format( key_ckpt, key_model, matched_keys[key_ckpt] ) ) raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") matched_keys[key_ckpt] = key_model # logging: matched_model_keys = sorted(matched_keys.values()) if len(matched_model_keys) == 0: logger.warning("No weights in checkpoint matched with model.") return ckpt_state_dict common_prefix = _longest_common_prefix(matched_model_keys) rev_matched_keys = {v: k for k, v in matched_keys.items()} original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys} model_key_groups = _group_keys_by_module(matched_model_keys, original_keys) table = [] memo = set() for key_model in matched_model_keys: if key_model in memo: continue if key_model in model_key_groups: group = model_key_groups[key_model] memo |= set(group) shapes = [tuple(model_state_dict[k].shape) for k in group] table.append( ( _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*", _group_str([original_keys[k] for k in group]), " ".join([str(x).replace(" ", "") for x in shapes]), ) ) else: key_checkpoint = original_keys[key_model] shape = str(tuple(model_state_dict[key_model].shape)) table.append((key_model[len(common_prefix) :], key_checkpoint, shape)) table_str = tabulate( table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"] ) logger.info( "Following weights matched with " + (f"submodule {common_prefix[:-1]}" if common_prefix else "model") + ":\n" + table_str ) unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())] for k in unmatched_ckpt_keys: result_state_dict[k] = ckpt_state_dict[k] return result_state_dict def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]): """ Params in the same submodule are grouped together. Args: keys: names of all parameters original_names: mapping from parameter name to their name in the checkpoint Returns: dict[name -> all other names in the same group] """ def _submodule_name(key): pos = key.rfind(".") if pos < 0: return None prefix = key[: pos + 1] return prefix all_submodules = [_submodule_name(k) for k in keys] all_submodules = [x for x in all_submodules if x] all_submodules = sorted(all_submodules, key=len) ret = {} for prefix in all_submodules: group = [k for k in keys if k.startswith(prefix)] if len(group) <= 1: continue original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group]) if len(original_name_lcp) == 0: # don't group weights if original names don't share prefix continue for k in group: if k in ret: continue ret[k] = group return ret def _longest_common_prefix(names: List[str]) -> str: """ ["abc.zfg", "abc.zef"] -> "abc." """ names = [n.split(".") for n in names] m1, m2 = min(names), max(names) ret = [a for a, b in zip(m1, m2) if a == b] ret = ".".join(ret) + "." if len(ret) else "" return ret def _longest_common_prefix_str(names: List[str]) -> str: m1, m2 = min(names), max(names) lcp = [a for a, b in zip(m1, m2) if a == b] lcp = "".join(lcp) return lcp def _group_str(names: List[str]) -> str: """ Turn "common1", "common2", "common3" into "common{1,2,3}" """ lcp = _longest_common_prefix_str(names) rest = [x[len(lcp) :] for x in names] rest = "{" + ",".join(rest) + "}" ret = lcp + rest # add some simplification for BN specifically ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*") ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*") return ret <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import itertools import numpy as np from typing import Any, Iterator, List, Union import pycocotools.mask as mask_util import torch from torch import device from detectron2.layers.roi_align import ROIAlign from detectron2.utils.memory import retry_if_cuda_oom from .boxes import Boxes def polygon_area(x, y): # Using the shoelace formula # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: """ Args: polygons (list[ndarray]): each array has shape (Nx2,) height, width (int) Returns: ndarray: a bool mask of shape (height, width) """ if len(polygons) == 0: # COCOAPI does not support empty polygons return np.zeros((height, width)).astype(np.bool) rles = mask_util.frPyObjects(polygons, height, width) rle = mask_util.merge(rles) return mask_util.decode(rle).astype(np.bool) def rasterize_polygons_within_box( polygons: List[np.ndarray], box: np.ndarray, mask_size: int ) -> torch.Tensor: """ Rasterize the polygons into a mask image and crop the mask content in the given box. The cropped mask is resized to (mask_size, mask_size). This function is used when generating training targets for mask head in Mask R-CNN. Given original ground-truth masks for an image, new ground-truth mask training targets in the size of `mask_size x mask_size` must be provided for each predicted box. This function will be called to produce such targets. Args: polygons (list[ndarray[float]]): a list of polygons, which represents an instance. box: 4-element numpy array mask_size (int): Returns: Tensor: BoolTensor of shape (mask_size, mask_size) """ # 1. Shift the polygons w.r.t the boxes w, h = box[2] - box[0], box[3] - box[1] polygons = copy.deepcopy(polygons) for p in polygons: p[0::2] = p[0::2] - box[0] p[1::2] = p[1::2] - box[1] # 2. Rescale the polygons to the new box size # max() to avoid division by small number ratio_h = mask_size / max(h, 0.1) ratio_w = mask_size / max(w, 0.1) if ratio_h == ratio_w: for p in polygons: p *= ratio_h else: for p in polygons: p[0::2] *= ratio_w p[1::2] *= ratio_h # 3. Rasterize the polygons with coco api mask = polygons_to_bitmask(polygons, mask_size, mask_size) mask = torch.from_numpy(mask) return mask class BitMasks: """ This class stores the segmentation masks for all objects in one image, in the form of bitmaps. Attributes: tensor: bool Tensor of N,H,W, representing N instances in the image. """ def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): """ Args: tensor: bool Tensor of N,H,W, representing N instances in the image. """ device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device) assert tensor.dim() == 3, tensor.size() self.image_size = tensor.shape[1:] self.tensor = tensor @torch.jit.unused def to(self, *args: Any, **kwargs: Any) -> "BitMasks": return BitMasks(self.tensor.to(*args, **kwargs)) @property def device(self) -> torch.device: return self.tensor.device @torch.jit.unused def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": """ Returns: BitMasks: Create a new :class:`BitMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. 2. `new_masks = masks[2:10]`: return a slice of masks. 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return BitMasks(self.tensor[item].unsqueeze(0)) m = self.tensor[item] assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( item, m.shape ) return BitMasks(m) @torch.jit.unused def __iter__(self) -> torch.Tensor: yield from self.tensor @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s def __len__(self) -> int: return self.tensor.shape[0] def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or non-empty (True). """ return self.tensor.flatten(1).any(dim=1) @staticmethod def from_polygon_masks( polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int ) -> "BitMasks": """ Args: polygon_masks (list[list[ndarray]] or PolygonMasks) height, width (int) """ if isinstance(polygon_masks, PolygonMasks): polygon_masks = polygon_masks.polygons masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] if len(masks): return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) else: return BitMasks(torch.empty(0, height, width, dtype=torch.bool)) @staticmethod def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks": """ Args: roi_masks: height, width (int): """ return roi_masks.to_bitmasks(height, width) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each bitmask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. It has less reconstruction error compared to rasterization with polygons. However we observe no difference in accuracy, but BitMasks requires more memory to store all the masks. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = self.tensor.device batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 bit_masks = self.tensor.to(dtype=torch.float32) rois = rois.to(device=device) output = ( ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) .forward(bit_masks[:, None, :, :], rois) .squeeze(1) ) output = output >= 0.5 return output def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around bitmasks. If a mask is empty, it's bounding box will be all zero. """ boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32) x_any = torch.any(self.tensor, dim=1) y_any = torch.any(self.tensor, dim=2) for idx <fim_suffix>in range(self.tensor.shape[0]): x = torch.where(x_any[idx, :])[0] y = torch.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: boxes[idx, :] = torch.as_tensor( [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32 ) return Boxes(boxes) @staticmethod def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": """ Concatenates a list of BitMasks into a single BitMasks Arguments: bitmasks_list (list[BitMasks]) Returns: BitMasks: the concatenated BitMasks """ assert isinstance(bitmasks_list, (list, tuple)) assert len(bitmasks_list) > 0 assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0)) return cat_bitmasks class PolygonMasks: """ This class stores the segmentation masks for all objects in one image, in the form of polygons. Attributes: polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. """ def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): """ Arguments: polygons (list[list[np.ndarray]]): The first level of the list correspond to individual instances, the second level to all the polygons that compose the instance, and the third level to the polygon coordinates. The third level array should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). """ if not isinstance(polygons, list): raise ValueError( "Cannot create PolygonMasks: Expect a list of list of polygons per image. " "Got '{}' instead.".format(type(polygons)) ) def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray: # Use float64 for higher precision, because why not? # Always put polygons on CPU (self.to is a no-op) since they # are supposed to be small tensors. # May need to change this assumption if GPU placement becomes useful if isinstance(t, torch.Tensor): t = t.cpu().numpy() return np.asarray(t).astype("float64") def process_polygons( polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] ) -> List[np.ndarray]: if not isinstance(polygons_per_instance, list): raise ValueError( "Cannot create polygons: Expect a list of polygons per instance. " "Got '{}' instead.".format(type(polygons_per_instance)) ) # transform each polygon to a numpy array polygons_per_instance = [_make_array(p) for p in polygons_per_instance] for polygon in polygons_per_instance: if len(polygon) % 2 != 0 or len(polygon) < 6: raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.") return polygons_per_instance self.polygons: List[List[np.ndarray]] = [ process_polygons(polygons_per_instance) for polygons_per_instance in polygons ] def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": return self @property def device(self) -> torch.device: return torch.device("cpu") def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around polygon masks. """ boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) for idx, polygons_per_instance in enumerate(self.polygons): minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) maxxy = torch.zeros(2, dtype=torch.float32) for polygon in polygons_per_instance: coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) minxy = torch.min(minxy, torch.min(coords, dim=0).values) maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) boxes[idx, :2] = minxy boxes[idx, 2:] = maxxy return Boxes(boxes) def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or not (True). """ keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] return torch.from_numpy(np.asarray(keep, dtype=np.bool)) def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": """ Support indexing over the instances and return a `PolygonMasks` object. `item` can be: 1. An integer. It will return an object with only one instance. 2. A slice. It will return an object with the selected instances. 3. A list[int]. It will return an object with the selected instances, correpsonding to the indices in the list. 4. A vector mask of type BoolTensor, whose length is num_instances. It will return an object with the instances whose mask is nonzero. """ if isinstance(item, int): selected_polygons = [self.polygons[item]] elif isinstance(item, slice): selected_polygons = self.polygons[item] elif isinstance(item, list): selected_polygons = [self.polygons[i] for i in item] elif isinstance(item, torch.Tensor): # Polygons is a list, so we have to move the indices back to CPU. if item.dtype == torch.bool: assert item.dim() == 1, item.shape item = item.nonzero().squeeze(1).cpu().numpy().tolist() elif item.dtype in [torch.int32, torch.int64]: item = item.cpu().numpy().tolist() else: raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) selected_polygons = [self.polygons[i] for i in item] return PolygonMasks(selected_polygons) def __iter__(self) -> Iterator[List[np.ndarray]]: """ Yields: list[ndarray]: the polygons for one instance. Each Tensor is a float64 vector representing a polygon. """ return iter(self.polygons) def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.polygons)) return s def __len__(self) -> int: return len(self.polygons) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each mask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = boxes.device # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise # (several small tensors for representing a single instance mask) boxes = boxes.to(torch.device("cpu")) results = [ rasterize_polygons_within_box(poly, box.numpy(), mask_size) for poly, box in zip(self.polygons, boxes) ] """ poly: list[list[float]], the polygons for one instance box: a tensor of shape (4,) """ if len(results) == 0: return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) return torch.stack(results, dim=0).to(device=device) def area(self): """ Computes area of the mask. Only works with Polygons, using the shoelace formula: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates Returns: Tensor: a vector, area for each instance """ area = [] for polygons_per_instance in self.polygons: area_per_instance = 0 for p in polygons_per_instance: area_per_instance += polygon_area(p[0::2], p[1::2]) area.append(area_per_instance) return torch.tensor(area) @staticmethod def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": """ Concatenates a list of PolygonMasks into a single PolygonMasks Arguments: polymasks_list (list[PolygonMasks]) Returns: PolygonMasks: the concatenated PolygonMasks """ assert isinstance(polymasks_list, (list, tuple)) assert len(polymasks_list) > 0 assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) cat_polymasks = type(polymasks_list[0])( list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) ) return cat_polymasks class ROIMasks: """ Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given, full-image bitmask can be obtained by "pasting" the mask on the region defined by the corresponding ROI box. """ def __init__(self, tensor: torch.Tensor): """ Args: tensor: (N, M, M) mask tensor that defines the mask within each ROI. """ if tensor.dim() != 3: raise ValueError("ROIMasks must take a masks of 3 dimension.") self.tensor = tensor def to(self, device: torch.device) -> "ROIMasks": return ROIMasks(self.tensor.to(device)) @property def device(self) -> device: return self.tensor.device def __len__(self): return self.tensor.shape[0] def __getitem__(self, item) -> "ROIMasks": """ Returns: ROIMasks: Create a new :class:`ROIMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[2:10]`: return a slice of masks. 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ t = self.tensor[item] if t.dim() != 3: raise ValueError( f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!" ) return ROIMasks(t) @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s @torch.jit.unused def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5): """ Args: see documentation of :func:`paste_masks_in_image`. """ from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape if torch.jit.is_tracing(): if isinstance(height, torch.Tensor): paste_func = _paste_masks_tensor_shape else: paste_func = paste_masks_in_image else: paste_func = retry_if_cuda_oom(paste_masks_in_image) bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold) return BitMasks(bitmasks) <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/utils.py<fim_prefix>#!/usr/bin/env python3 from detectron2.structures import Instances import numpy as np from typing import List def create_prediction_pairs( instances: Instances, prev_instances: Instances, iou_all: np.ndarray, threshold: float = 0.5, ) -> List: """ Args: instances: predictions from current frame prev_instances: predictions from previous frame iou_all: 2D numpy array containing iou for each bbox pair threshold: below the threshold, doesn't consider the pair of bbox is valid Return: List of bbox pairs """ bbox_pairs = [] for i in range(len(instances)): for j in ra<fim_suffix>nge(len(prev_instances)): if iou_all[i, j] < threshold: continue bbox_pairs.append( { "idx": i, "prev_idx": j, "prev_id": prev_instances.ID[j], "IoU": iou_all[i, j], "prev_period": prev_instances.ID_period[j], } ) return bbox_pairs LARGE_COST_VALUE = 100000 <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy from typing import List import numpy as np import torch from detectron2.config import configurable from detectron2.structures import Boxes, Instances from detectron2.structures.boxes import pairwise_iou from ..config.config import CfgNode as CfgNode_ from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY @TRACKER_HEADS_REGISTRY.register() class BBoxIOUTracker(BaseTracker): """ A bounding box tracker to assign ID based on IoU between current and previous instances """ @configurable def __init__( self, *, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, track_iou_threshold: float = 0.5, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video track_iou_threshold: iou threshold, below this number a bbox pair is removed from tracking """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period self._track_iou_threshold = track_iou_threshold @classmethod def from_config(cls, cfg: CfgNode_): """ Old style initialization using CfgNode Args: cfg: D2 CfgNode, config file Return: dictionary storing arguments for __init__ method """ assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) return { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": video_height, "video_width": video_width, "max_num_instances": max_num_instances, "max_lost_frame_count": max_lost_frame_count, "min_box_rel_dim": min_box_rel_dim, "min_instance_period": min_instance_period, "track_iou_threshold": track_iou_threshold } def update(self, instances: Instances) -> Instances: """ See BaseTracker description """ if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: # calculate IoU of all bbox pairs iou_all = pairwise_iou( boxes1=instances.pred_boxes, boxes2=self._prev_instances.pred_boxes, ) # sort IoU in descending order bbox_pairs = self._create_prediction_pairs(instances, iou_all) # assign previous ID to current bbox if IoU > track_iou_threshold self._reset_fields() for bbox_pair in bbox_pairs: idx = bbox_pair["idx"] prev_id = bbox_pair["prev_id"] if idx in self._matched_idx \ or prev_id in self._matched_ID \ or bbox_pair["IoU"] < self._track_iou_threshold: continue instances.ID[idx] = prev_id instances.ID_period[idx] = bbox_pair["prev_period"] + 1 instances.lost_frame_count[idx] = 0 self._matched_idx.add(idx) self._matched_ID.add(prev_id) self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) instances = self._assign_new_id(instances) instances = self._merge_untracked_instances(instances) self._prev_instances = copy.deepcopy(instances) return instances def _create_prediction_pairs( self, instances: Instances, iou_all: np.ndarray ) -> List: """ For all instances in previous and current frames, create pairs. For each pair, store index of the instance in current frame predcitions, index in previous predictions, ID in previous predictions, IoU of the bboxes in this pair, period in previous predictions. Args: instances: D2 Instances, for predictions of the current frame iou_all: IoU for all bboxes pairs Return: A list of IoU for all pairs """ bbox_pairs = [] for i in range(len(instances)): for j in ra<fim_suffix>nge(len(self._prev_instances)): bbox_pairs.append( { "idx": i, "prev_idx": j, "prev_id": self._prev_instances.ID[j], "IoU": iou_all[i, j], "prev_period": self._prev_instances.ID_period[j], } ) return bbox_pairs def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _reset_fields(self): """ Before each uodate call, reset fields first """ self._matched_idx = set() self._matched_ID = set() self._untracked_prev_idx = set(range(len(self._prev_instances))) def _assign_new_id(self, instances: Instances) -> Instances: """ For each untracked instance, assign a new id Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with new ID assigned """ untracked_idx = set(range(len(instances))).difference(self._matched_idx) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _merge_untracked_instances(self, instances: Instances) -> Instances: """ For untracked previous instances, under certain condition, still keep them in tracking and merge with the current instances. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances merging current instances and instances from previous frame decided to keep tracking """ untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) for idx in self._untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy from typing import List import numpy as np import torch from detectron2.config import configurable from detectron2.structures import Boxes, Instances from detectron2.structures.boxes import pairwise_iou from ..config.config import CfgNode as CfgNode_ from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY @TRACKER_HEADS_REGISTRY.register() class BBoxIOUTracker(BaseTracker): """ A bounding box tracker to assign ID based on IoU between current and previous instances """ @configurable def __init__( self, *, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, track_iou_threshold: float = 0.5, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video track_iou_threshold: iou threshold, below this number a bbox pair is removed from tracking """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period self._track_iou_threshold = track_iou_threshold @classmethod def from_config(cls, cfg: CfgNode_): """ Old style initialization using CfgNode Args: cfg: D2 CfgNode, config file Return: dictionary storing arguments for __init__ method """ assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) return { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": video_height, "video_width": video_width, "max_num_instances": max_num_instances, "max_lost_frame_count": max_lost_frame_count, "min_box_rel_dim": min_box_rel_dim, "min_instance_period": min_instance_period, "track_iou_threshold": track_iou_threshold } def update(self, instances: Instances) -> Instances: """ See BaseTracker description """ if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: # calculate IoU of all bbox pairs iou_all = pairwise_iou( boxes1=instances.pred_boxes, boxes2=self._prev_instances.pred_boxes, ) # sort IoU in descending order bbox_pairs = self._create_prediction_pairs(instances, iou_all) # assign previous ID to current bbox if IoU > track_iou_threshold self._reset_fields() for bbox_pair in bbox_pai<fim_suffix>rs: idx = bbox_pair["idx"] prev_id = bbox_pair["prev_id"] if idx in self._matched_idx \ or prev_id in self._matched_ID \ or bbox_pair["IoU"] < self._track_iou_threshold: continue instances.ID[idx] = prev_id instances.ID_period[idx] = bbox_pair["prev_period"] + 1 instances.lost_frame_count[idx] = 0 self._matched_idx.add(idx) self._matched_ID.add(prev_id) self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) instances = self._assign_new_id(instances) instances = self._merge_untracked_instances(instances) self._prev_instances = copy.deepcopy(instances) return instances def _create_prediction_pairs( self, instances: Instances, iou_all: np.ndarray ) -> List: """ For all instances in previous and current frames, create pairs. For each pair, store index of the instance in current frame predcitions, index in previous predictions, ID in previous predictions, IoU of the bboxes in this pair, period in previous predictions. Args: instances: D2 Instances, for predictions of the current frame iou_all: IoU for all bboxes pairs Return: A list of IoU for all pairs """ bbox_pairs = [] for i in range(len(instances)): for j in range(len(self._prev_instances)): bbox_pairs.append( { "idx": i, "prev_idx": j, "prev_id": self._prev_instances.ID[j], "IoU": iou_all[i, j], "prev_period": self._prev_instances.ID_period[j], } ) return bbox_pairs def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _reset_fields(self): """ Before each uodate call, reset fields first """ self._matched_idx = set() self._matched_ID = set() self._untracked_prev_idx = set(range(len(self._prev_instances))) def _assign_new_id(self, instances: Instances) -> Instances: """ For each untracked instance, assign a new id Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with new ID assigned """ untracked_idx = set(range(len(instances))).difference(self._matched_idx) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _merge_untracked_instances(self, instances: Instances) -> Instances: """ For untracked previous instances, under certain condition, still keep them in tracking and merge with the current instances. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances merging current instances and instances from previous frame decided to keep tracking """ untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) for idx in self._untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy import numpy as np import torch from detectron2.structures import Boxes, Instances from .base_tracker import BaseTracker from scipy.optimize import linear_sum_assignment from ..config.config import CfgNode as CfgNode_ from typing import Dict from detectron2.config import configurable class BaseHungarianTracker(BaseTracker): """ A base class for all Hungarian trackers """ @configurable def __init__( self, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period @classmethod def from_config(cls, cfg: CfgNode_) -> Dict: raise NotImplementedError("Calling HungarianTracker::from_config") def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray: raise NotImplementedError("Calling HungarianTracker::build_matrix") def update(self, instances: Instances) -> Instances: if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: self._untracked_prev_idx = set(range(len(self._prev_instances))) cost_matrix = self.build_cost_matrix(instances, self._prev_instances) matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix) instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx) instances = self._process_unmatched_idx(instances, matched_idx) instances = self._process_unmatched_prev_idx(instances, matched_prev_idx) self._prev_instances = copy.deepcopy(instances) return instances def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _process_matched_idx( self, instances: Instances, matched_idx: np.ndarray, matched_prev_idx: np.ndarray ) -> Instances: assert matched_idx.size == matched_prev_idx.size for i in range(matched_idx.size)<fim_suffix>: instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]] instances.ID_period[matched_idx[i]] = \ self._prev_instances.ID_period[matched_prev_idx[i]] + 1 instances.lost_frame_count[matched_idx[i]] = 0 return instances def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances: untracked_idx = set(range(len(instances))).difference(set(matched_idx)) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _process_unmatched_prev_idx( self, instances: Instances, matched_prev_idx: np.ndarray ) -> Instances: untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx)) for idx in untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/bbox_iou_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy from typing import List import numpy as np import torch from detectron2.config import configurable from detectron2.structures import Boxes, Instances from detectron2.structures.boxes import pairwise_iou from ..config.config import CfgNode as CfgNode_ from .base_tracker import BaseTracker, TRACKER_HEADS_REGISTRY @TRACKER_HEADS_REGISTRY.register() class BBoxIOUTracker(BaseTracker): """ A bounding box tracker to assign ID based on IoU between current and previous instances """ @configurable def __init__( self, *, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, track_iou_threshold: float = 0.5, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video track_iou_threshold: iou threshold, below this number a bbox pair is removed from tracking """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period self._track_iou_threshold = track_iou_threshold @classmethod def from_config(cls, cfg: CfgNode_): """ Old style initialization using CfgNode Args: cfg: D2 CfgNode, config file Return: dictionary storing arguments for __init__ method """ assert "VIDEO_HEIGHT" in cfg.TRACKER_HEADS assert "VIDEO_WIDTH" in cfg.TRACKER_HEADS video_height = cfg.TRACKER_HEADS.get("VIDEO_HEIGHT") video_width = cfg.TRACKER_HEADS.get("VIDEO_WIDTH") max_num_instances = cfg.TRACKER_HEADS.get("MAX_NUM_INSTANCES", 200) max_lost_frame_count = cfg.TRACKER_HEADS.get("MAX_LOST_FRAME_COUNT", 0) min_box_rel_dim = cfg.TRACKER_HEADS.get("MIN_BOX_REL_DIM", 0.02) min_instance_period = cfg.TRACKER_HEADS.get("MIN_INSTANCE_PERIOD", 1) track_iou_threshold = cfg.TRACKER_HEADS.get("TRACK_IOU_THRESHOLD", 0.5) return { "_target_": "detectron2.tracking.bbox_iou_tracker.BBoxIOUTracker", "video_height": video_height, "video_width": video_width, "max_num_instances": max_num_instances, "max_lost_frame_count": max_lost_frame_count, "min_box_rel_dim": min_box_rel_dim, "min_instance_period": min_instance_period, "track_iou_threshold": track_iou_threshold } def update(self, instances: Instances) -> Instances: """ See BaseTracker description """ if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: # calculate IoU of all bbox pairs iou_all = pairwise_iou( boxes1=instances.pred_boxes, boxes2=self._prev_instances.pred_boxes, ) # sort IoU in descending order bbox_pairs = self._create_prediction_pairs(instances, iou_all) # assign previous ID to current bbox if IoU > track_iou_threshold self._reset_fields() for bbox_pair in bbox_pairs: idx = bbox_pair["idx"] prev_id = bbox_pair["prev_id"] if idx in self._matched_idx \ or prev_id in self._matched_ID \ or bbox_pair["IoU"] < self._track_iou_threshold: continue instances.ID[idx] = prev_id instances.ID_period[idx] = bbox_pair["prev_period"] + 1 instances.lost_frame_count[idx] = 0 self._matched_idx.add(idx) self._matched_ID.add(prev_id) self._untracked_prev_idx.remove(bbox_pair["prev_idx"]) instances = self._assign_new_id(instances) instances = self._merge_untracked_instances(instances) self._prev_instances = copy.deepcopy(instances) return instances def _create_prediction_pairs( self, instances: Instances, iou_all: np.ndarray ) -> List: """ For all instances in previous and current frames, create pairs. For each pair, store index of the instance in current frame predcitions, index in previous predictions, ID in previous predictions, IoU of the bboxes in this pair, period in previous predictions. Args: instances: D2 Instances, for predictions of the current frame iou_all: IoU for all bboxes pairs Return: A list of IoU for all pairs """ bbox_pairs = [] for i in range<fim_suffix>(len(instances)): for j in range(len(self._prev_instances)): bbox_pairs.append( { "idx": i, "prev_idx": j, "prev_id": self._prev_instances.ID[j], "IoU": iou_all[i, j], "prev_period": self._prev_instances.ID_period[j], } ) return bbox_pairs def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _reset_fields(self): """ Before each uodate call, reset fields first """ self._matched_idx = set() self._matched_ID = set() self._untracked_prev_idx = set(range(len(self._prev_instances))) def _assign_new_id(self, instances: Instances) -> Instances: """ For each untracked instance, assign a new id Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with new ID assigned """ untracked_idx = set(range(len(instances))).difference(self._matched_idx) for idx in untracked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _merge_untracked_instances(self, instances: Instances) -> Instances: """ For untracked previous instances, under certain condition, still keep them in tracking and merge with the current instances. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances merging current instances and instances from previous frame decided to keep tracking """ untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) for idx in self._untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>UniRef/detectron2/tracking/hungarian_tracker.py<fim_prefix>#!/usr/bin/env python3 # Copyright 2004-present Facebook. All Rights Reserved. import copy import numpy as np import torch from detectron2.structures import Boxes, Instances from .base_tracker import BaseTracker from scipy.optimize import linear_sum_assignment from ..config.config import CfgNode as CfgNode_ from typing import Dict from detectron2.config import configurable class BaseHungarianTracker(BaseTracker): """ A base class for all Hungarian trackers """ @configurable def __init__( self, video_height: int, video_width: int, max_num_instances: int = 200, max_lost_frame_count: int = 0, min_box_rel_dim: float = 0.02, min_instance_period: int = 1, **kwargs ): """ Args: video_height: height the video frame video_width: width of the video frame max_num_instances: maximum number of id allowed to be tracked max_lost_frame_count: maximum number of frame an id can lost tracking exceed this number, an id is considered as lost forever min_box_rel_dim: a percentage, smaller than this dimension, a bbox is removed from tracking min_instance_period: an instance will be shown after this number of period since its first showing up in the video """ super().__init__(**kwargs) self._video_height = video_height self._video_width = video_width self._max_num_instances = max_num_instances self._max_lost_frame_count = max_lost_frame_count self._min_box_rel_dim = min_box_rel_dim self._min_instance_period = min_instance_period @classmethod def from_config(cls, cfg: CfgNode_) -> Dict: raise NotImplementedError("Calling HungarianTracker::from_config") def build_cost_matrix(self, instances: Instances, prev_instances: Instances) -> np.ndarray: raise NotImplementedError("Calling HungarianTracker::build_matrix") def update(self, instances: Instances) -> Instances: if instances.has("pred_keypoints"): raise NotImplementedError("Need to add support for keypoints") instances = self._initialize_extra_fields(instances) if self._prev_instances is not None: self._untracked_prev_idx = set(range(len(self._prev_instances))) cost_matrix = self.build_cost_matrix(instances, self._prev_instances) matched_idx, matched_prev_idx = linear_sum_assignment(cost_matrix) instances = self._process_matched_idx(instances, matched_idx, matched_prev_idx) instances = self._process_unmatched_idx(instances, matched_idx) instances = self._process_unmatched_prev_idx(instances, matched_prev_idx) self._prev_instances = copy.deepcopy(instances) return instances def _initialize_extra_fields(self, instances: Instances) -> Instances: """ If input instances don't have ID, ID_period, lost_frame_count fields, this method is used to initialize these fields. Args: instances: D2 Instances, for predictions of the current frame Return: D2 Instances with extra fields added """ if not instances.has("ID"): instances.set("ID", [None] * len(instances)) if not instances.has("ID_period"): instances.set("ID_period", [None] * len(instances)) if not instances.has("lost_frame_count"): instances.set("lost_frame_count", [None] * len(instances)) if self._prev_instances is None: instances.ID = list(range(len(instances))) self._id_count += len(instances) instances.ID_period = [1] * len(instances) instances.lost_frame_count = [0] * len(instances) return instances def _process_matched_idx( self, instances: Instances, matched_idx: np.ndarray, matched_prev_idx: np.ndarray ) -> Instances: assert matched_idx.size == matched_prev_idx.size for i in range(matched_idx.size): instances.ID[matched_idx[i]] = self._prev_instances.ID[matched_prev_idx[i]] instances.ID_period[matched_idx[i]] = \ self._prev_instances.ID_period[matched_prev_idx[i]] + 1 instances.lost_frame_count[matched_idx[i]] = 0 return instances def _process_unmatched_idx(self, instances: Instances, matched_idx: np.ndarray) -> Instances: untracked_idx = set(range(len(instances))).difference(set(matched_idx)) for idx in untrac<fim_suffix>ked_idx: instances.ID[idx] = self._id_count self._id_count += 1 instances.ID_period[idx] = 1 instances.lost_frame_count[idx] = 0 return instances def _process_unmatched_prev_idx( self, instances: Instances, matched_prev_idx: np.ndarray ) -> Instances: untracked_instances = Instances( image_size=instances.image_size, pred_boxes=[], pred_masks=[], pred_classes=[], scores=[], ID=[], ID_period=[], lost_frame_count=[], ) prev_bboxes = list(self._prev_instances.pred_boxes) prev_classes = list(self._prev_instances.pred_classes) prev_scores = list(self._prev_instances.scores) prev_ID_period = self._prev_instances.ID_period if instances.has("pred_masks"): prev_masks = list(self._prev_instances.pred_masks) untracked_prev_idx = set(range(len(self._prev_instances))).difference(set(matched_prev_idx)) for idx in untracked_prev_idx: x_left, y_top, x_right, y_bot = prev_bboxes[idx] if ( (1.0 * (x_right - x_left) / self._video_width < self._min_box_rel_dim) or (1.0 * (y_bot - y_top) / self._video_height < self._min_box_rel_dim) or self._prev_instances.lost_frame_count[idx] >= self._max_lost_frame_count or prev_ID_period[idx] <= self._min_instance_period ): continue untracked_instances.pred_boxes.append(list(prev_bboxes[idx].numpy())) untracked_instances.pred_classes.append(int(prev_classes[idx])) untracked_instances.scores.append(float(prev_scores[idx])) untracked_instances.ID.append(self._prev_instances.ID[idx]) untracked_instances.ID_period.append(self._prev_instances.ID_period[idx]) untracked_instances.lost_frame_count.append( self._prev_instances.lost_frame_count[idx] + 1 ) if instances.has("pred_masks"): untracked_instances.pred_masks.append(prev_masks[idx].numpy().astype(np.uint8)) untracked_instances.pred_boxes = Boxes(torch.FloatTensor(untracked_instances.pred_boxes)) untracked_instances.pred_classes = torch.IntTensor(untracked_instances.pred_classes) untracked_instances.scores = torch.FloatTensor(untracked_instances.scores) if instances.has("pred_masks"): untracked_instances.pred_masks = torch.IntTensor(untracked_instances.pred_masks) else: untracked_instances.remove("pred_masks") return Instances.cat( [ instances, untracked_instances, ] ) <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>import math import numpy as np import cv2 def db_eval_iou(annotation, segmentation, void_pixels=None): """ Compute region similarity as the Jaccard Index. Arguments: annotation (ndarray): binary annotation map. segmentation (ndarray): binary segmentation map. void_pixels (ndarray): optional mask with void pixels Return: jaccard (float): region similarity """ assert annotation.shape == segmentation.shape, \ f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.' annotation = annotation.astype(np.bool) segmentation = segmentation.astype(np.bool) if void_pixels is not None: assert annotation.shape == void_pixels.shape, \ f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.' void_pixels = void_pixels.astype(np.bool) else: void_pixels = np.zeros_like(segmentation) # Intersection between all sets inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1)) union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1)) j = inters / union if j.ndim == 0: j = 1 if np.isclose(union, 0) else j else: j[np.isclose(union, 0)] = 1 return j def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008): assert annotation.shape == segmentation.shape if void_pixels is not None: assert annotation.shape == void_pixels.shape if annotation.ndim == 3: n_frames = annotation.shape[0] f_res = np.zeros(n_frames) for frame_id in range(n_frames): void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ] f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th) elif annotation.ndim == 2: f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th) else: raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions') return f_res def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008): """ Compute mean,recall and decay from per-frame evaluation. Calculates precision/recall for boundaries between foreground_mask and gt_mask using morphological operators to speed it up. Arguments: foreground_mask (ndarray): binary segmentation image. gt_mask (ndarray): binary annotated image. void_pixels (ndarray): optional mask with void pixels Returns: F (float): boundaries F-measure """ assert np.atleast_3d(foreground_mask).shape[2] == 1 if void_pixels is not None: void_pixels = void_pixels.astype(np.bool) else: void_pixels = np.zeros_like(foreground_mask).astype(np.bool) bound_pix = bound_th if bound_th >= 1 else \ np.ceil(bound_th * np.linalg.norm(foreground_mask.shape)) # Get the pixel boundaries of both masks fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels)) gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels)) from skimage.morphology import disk # fg_dil = binary_dilation(fg_boundary, disk(bound_pix)) fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8)) # gt_dil = binary_dilation(gt_boundary, disk(bound_pix)) gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8)) # Get the intersection gt_match = gt_boundary * fg_dil fg_match = fg_boundary * gt_dil # Area of the intersection n_fg = np.sum(fg_boundary) n_gt = np.sum(gt_boundary) # % Compute precision and recall if n_fg == 0 and n_gt > 0: precision = 1 recall = 0 elif n_fg > 0 and n_gt == 0: precision = 0 recall = 1 elif n_fg == 0 and n_gt == 0: precision = 1 recall = 1 else: precision = np.sum(fg_match) / float(n_fg) recall = np.sum(gt_match) / float(n_gt) # Compute F measure if precision + recall == 0: F = 0 else: F = 2 * precision * recall / (precision + recall) return F def _seg2bmap(seg, width=None, height=None): """ From a segmentation, compute a binary boundary map with 1 pixel wide boundaries. The boundary pixels are offset by 1/2 pixel towards the origin from the actual segment boundary. Arguments: seg : Segments labeled from 1..k. width : Width of desired bmap <= seg.shape[1] height : Height of desired bmap <= seg.shape[0] Returns: bmap (ndarray): Binary boundary map. David Martin <[email protected]> January 2003 """ seg = seg.astype(np.bool) seg[seg > 0] = 1 assert np.atleast_3d(seg).shape[2] == 1 width = seg.shape[1] if width is None else width height = seg.shape[0] if height is None else height h, w = seg.shape[:2] ar1 = float(width) / float(height) ar2 = float(w) / float(h) assert not ( width > w | height > h | abs(ar1 - ar2) > 0.01 ), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height) e = np.zeros_like(seg) s = np.zeros_like(seg) se = np.zeros_like(seg) e[:, :-1] = seg[:, 1:] s[:-1, :] = seg[1:, :] se[:-1, :-1] = seg[1:, 1:] b = seg ^ e | seg ^ s | seg ^ se b[-1, :] = seg[-1, :] ^ e[-1, :] b[:, -1] = seg[:, -1] ^ s[:, -1] b[-1, -1] = 0 if w == width and h == height: bmap = b else: bmap = np.zeros((height, width)) for x in range(w): for y in rang<fim_suffix>e(h): if b[y, x]: j = 1 + math.floor((y - 1) + height / h) i = 1 + math.floor((x - 1) + width / h) bmap[j, i] = 1 return bmap if __name__ == '__main__': from davis2017.davis import DAVIS from davis2017.results import Results dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics') results = Results(root_dir='examples/osvos') # Test timing F measure for seq in dataset.get_sequences(): all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True) all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1] all_res_masks = results.read_masks(seq, all_masks_id) f_metrics_res = np.zeros(all_gt_masks.shape[:2]) for ii in range(all_gt_masks.shape[0]): f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...]) # Run using to profile code: python -m cProfile -o f_measure.prof metrics.py # snakeviz f_measure.prof <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import dataclasses import logging from collections import abc from typing import Any from detectron2.utils.registry import _convert_target_to_string, locate __all__ = ["dump_dataclass", "instantiate"] def dump_dataclass(obj: Any): """ Dump a dataclass recursively into a dict that can be later instantiated. Args: obj: a dataclass object Returns: dict """ assert dataclasses.is_dataclass(obj) and not isinstance( obj, type ), "dump_dataclass() requires an instance of a dataclass." ret = {"_target_": _convert_target_to_string(type(obj))} for f in dataclasses.fields(obj): v = getattr(obj, f.name) if dataclasses.is_dataclass(v): v = dump_dataclass(v) if isinstance(v, (list, tuple)): v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v] ret[f.name] = v return ret def instantiate(cfg): """ Recursively instantiate objects defined in dictionaries by "_target_" and arguments. Args: cfg: a dict-like object with "_target_" that defines the caller, and other keys that define the arguments Returns: object instantiated by cfg """ from omegaconf imp<fim_suffix>ort ListConfig if isinstance(cfg, ListConfig): lst = [instantiate(x) for x in cfg] return ListConfig(lst, flags={"allow_objects": True}) if isinstance(cfg, list): # Specialize for list, because many classes take # list[objects] as arguments, such as ResNet, DatasetMapper return [instantiate(x) for x in cfg] if isinstance(cfg, abc.Mapping) and "_target_" in cfg: # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all, # but faster: https://github.com/facebookresearch/hydra/issues/1200 cfg = {k: instantiate(v) for k, v in cfg.items()} cls = cfg.pop("_target_") cls = instantiate(cls) if isinstance(cls, str): cls_name = cls cls = locate(cls_name) assert cls is not None, cls_name else: try: cls_name = cls.__module__ + "." + cls.__qualname__ except Exception: # target could be anything, so the above could fail cls_name = str(cls) assert callable(cls), f"_target_ {cls} does not define a callable object" try: return cls(**cfg) except TypeError: logger = logging.getLogger(__name__) logger.error(f"Error when instantiating {cls_name}!") raise return cfg # return as-is if don't know what to do <fim_middle>
null
IMPORT
complete_current_header_empty_completion
<filename>UniRef/detectron2/layers/roi_align.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. from torch import nn from torchvision.ops import roi_align # NOTE: torchvision's RoIAlign has a different default aligned=False class ROIAlign(nn.Module): def __init__(self, output_size, spatial_scale, sampling_ratio, aligned=True): """ Args: output_size (tuple): h, w spatial_scale (float): scale the input boxes by this number sampling_ratio (int): number of inputs samples to take for each output sample. 0 to take samples densely. aligned (bool): if False, use the legacy implementation in Detectron. If True, align the results more perfectly. Note: The meaning of aligned=True: Given a continuous coordinate c, its two neighboring pixel indices (in our pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example, c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled from the underlying signal at continuous coordinates 0.5 and 1.5). But the original roi_align (aligned=False) does not subtract the 0.5 when computing neighboring pixel indices and therefore it uses pixels with a slightly incorrect alignment (relative to our pixel model) when performing bilinear interpolation. With `aligned=True`, we first appropriately scale the ROI and then shift it by -0.5 prior to calling roi_align. This produces the correct neighbors; see detectron2/tests/test_roi_align.py for verification. The difference does not make a difference to the model's performance if ROIAlign is used together with conv layers. """ super().__init__() self.output_size = output_size self.spatial_scale = spatial_scale self.sampling_ratio = sampling_ratio self.aligned = aligned from torchvision import <fim_suffix>__version__ version = tuple(int(x) for x in __version__.split(".")[:2]) # https://github.com/pytorch/vision/pull/2438 assert version >= (0, 7), "Require torchvision >= 0.7" def forward(self, input, rois): """ Args: input: NCHW images rois: Bx5 boxes. First column is the index into N. The other 4 columns are xyxy. """ assert rois.dim() == 2 and rois.size(1) == 5 if input.is_quantized: input = input.dequantize() return roi_align( input, rois.to(dtype=input.dtype), self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned, ) def __repr__(self): tmpstr = self.__class__.__name__ + "(" tmpstr += "output_size=" + str(self.output_size) tmpstr += ", spatial_scale=" + str(self.spatial_scale) tmpstr += ", sampling_ratio=" + str(self.sampling_ratio) tmpstr += ", aligned=" + str(self.aligned) tmpstr += ")" return tmpstr <fim_middle>
null
IMPORT
complete_current_header_empty_completion
<filename>UniRef/external/davis2017-evaluation/davis2017/metrics.py<fim_prefix>import math import numpy as np import cv2 def db_eval_iou(annotation, segmentation, void_pixels=None): """ Compute region similarity as the Jaccard Index. Arguments: annotation (ndarray): binary annotation map. segmentation (ndarray): binary segmentation map. void_pixels (ndarray): optional mask with void pixels Return: jaccard (float): region similarity """ assert annotation.shape == segmentation.shape, \ f'Annotation({annotation.shape}) and segmentation:{segmentation.shape} dimensions do not match.' annotation = annotation.astype(np.bool) segmentation = segmentation.astype(np.bool) if void_pixels is not None: assert annotation.shape == void_pixels.shape, \ f'Annotation({annotation.shape}) and void pixels:{void_pixels.shape} dimensions do not match.' void_pixels = void_pixels.astype(np.bool) else: void_pixels = np.zeros_like(segmentation) # Intersection between all sets inters = np.sum((segmentation & annotation) & np.logical_not(void_pixels), axis=(-2, -1)) union = np.sum((segmentation | annotation) & np.logical_not(void_pixels), axis=(-2, -1)) j = inters / union if j.ndim == 0: j = 1 if np.isclose(union, 0) else j else: j[np.isclose(union, 0)] = 1 return j def db_eval_boundary(annotation, segmentation, void_pixels=None, bound_th=0.008): assert annotation.shape == segmentation.shape if void_pixels is not None: assert annotation.shape == void_pixels.shape if annotation.ndim == 3: n_frames = annotation.shape[0] f_res = np.zeros(n_frames) for frame_id in range(n_frames): void_pixels_frame = None if void_pixels is None else void_pixels[frame_id, :, :, ] f_res[frame_id] = f_measure(segmentation[frame_id, :, :, ], annotation[frame_id, :, :], void_pixels_frame, bound_th=bound_th) elif annotation.ndim == 2: f_res = f_measure(segmentation, annotation, void_pixels, bound_th=bound_th) else: raise ValueError(f'db_eval_boundary does not support tensors with {annotation.ndim} dimensions') return f_res def f_measure(foreground_mask, gt_mask, void_pixels=None, bound_th=0.008): """ Compute mean,recall and decay from per-frame evaluation. Calculates precision/recall for boundaries between foreground_mask and gt_mask using morphological operators to speed it up. Arguments: foreground_mask (ndarray): binary segmentation image. gt_mask (ndarray): binary annotated image. void_pixels (ndarray): optional mask with void pixels Returns: F (float): boundaries F-measure """ assert np.atleast_3d(foreground_mask).shape[2] == 1 if void_pixels is not None: void_pixels = void_pixels.astype(np.bool) else: void_pixels = np.zeros_like(foreground_mask).astype(np.bool) bound_pix = bound_th if bound_th >= 1 else \ np.ceil(bound_th * np.linalg.norm(foreground_mask.shape)) # Get the pixel boundaries of both masks fg_boundary = _seg2bmap(foreground_mask * np.logical_not(void_pixels)) gt_boundary = _seg2bmap(gt_mask * np.logical_not(void_pixels)) from skimage.morphology <fim_suffix>import disk # fg_dil = binary_dilation(fg_boundary, disk(bound_pix)) fg_dil = cv2.dilate(fg_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8)) # gt_dil = binary_dilation(gt_boundary, disk(bound_pix)) gt_dil = cv2.dilate(gt_boundary.astype(np.uint8), disk(bound_pix).astype(np.uint8)) # Get the intersection gt_match = gt_boundary * fg_dil fg_match = fg_boundary * gt_dil # Area of the intersection n_fg = np.sum(fg_boundary) n_gt = np.sum(gt_boundary) # % Compute precision and recall if n_fg == 0 and n_gt > 0: precision = 1 recall = 0 elif n_fg > 0 and n_gt == 0: precision = 0 recall = 1 elif n_fg == 0 and n_gt == 0: precision = 1 recall = 1 else: precision = np.sum(fg_match) / float(n_fg) recall = np.sum(gt_match) / float(n_gt) # Compute F measure if precision + recall == 0: F = 0 else: F = 2 * precision * recall / (precision + recall) return F def _seg2bmap(seg, width=None, height=None): """ From a segmentation, compute a binary boundary map with 1 pixel wide boundaries. The boundary pixels are offset by 1/2 pixel towards the origin from the actual segment boundary. Arguments: seg : Segments labeled from 1..k. width : Width of desired bmap <= seg.shape[1] height : Height of desired bmap <= seg.shape[0] Returns: bmap (ndarray): Binary boundary map. David Martin <[email protected]> January 2003 """ seg = seg.astype(np.bool) seg[seg > 0] = 1 assert np.atleast_3d(seg).shape[2] == 1 width = seg.shape[1] if width is None else width height = seg.shape[0] if height is None else height h, w = seg.shape[:2] ar1 = float(width) / float(height) ar2 = float(w) / float(h) assert not ( width > w | height > h | abs(ar1 - ar2) > 0.01 ), "Can" "t convert %dx%d seg to %dx%d bmap." % (w, h, width, height) e = np.zeros_like(seg) s = np.zeros_like(seg) se = np.zeros_like(seg) e[:, :-1] = seg[:, 1:] s[:-1, :] = seg[1:, :] se[:-1, :-1] = seg[1:, 1:] b = seg ^ e | seg ^ s | seg ^ se b[-1, :] = seg[-1, :] ^ e[-1, :] b[:, -1] = seg[:, -1] ^ s[:, -1] b[-1, -1] = 0 if w == width and h == height: bmap = b else: bmap = np.zeros((height, width)) for x in range(w): for y in range(h): if b[y, x]: j = 1 + math.floor((y - 1) + height / h) i = 1 + math.floor((x - 1) + width / h) bmap[j, i] = 1 return bmap if __name__ == '__main__': from davis2017.davis import DAVIS from davis2017.results import Results dataset = DAVIS(root='input_dir/ref', subset='val', sequences='aerobatics') results = Results(root_dir='examples/osvos') # Test timing F measure for seq in dataset.get_sequences(): all_gt_masks, _, all_masks_id = dataset.get_all_masks(seq, True) all_gt_masks, all_masks_id = all_gt_masks[:, 1:-1, :, :], all_masks_id[1:-1] all_res_masks = results.read_masks(seq, all_masks_id) f_metrics_res = np.zeros(all_gt_masks.shape[:2]) for ii in range(all_gt_masks.shape[0]): f_metrics_res[ii, :] = db_eval_boundary(all_gt_masks[ii, ...], all_res_masks[ii, ...]) # Run using to profile code: python -m cProfile -o f_measure.prof metrics.py # snakeviz f_measure.prof <fim_middle>
null
IMPORT
complete_current_header_empty_completion
<filename>UniRef/detectron2/utils/registry.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. from typing import Any import pydoc from fvcore.common.registry import Registry # for backward compatibility. """ ``Registry`` and `locate` provide ways to map a string (typically found in config files) to callable objects. """ __all__ = ["Registry", "locate"] def _convert_target_to_string(t: Any) -> str: """ Inverse of ``locate()``. Args: t: any object with ``__module__`` and ``__qualname__`` """ module, qualname = t.__module__, t.__qualname__ # Compress the path to this object, e.g. ``module.submodule._impl.class`` # may become ``module.submodule.class``, if the later also resolves to the same # object. This simplifies the string, and also is less affected by moving the # class implementation. module_parts = module.split(".") for k in range(1, len(module_parts)): prefix = ".".join(module_parts[:k]) candidate = f"{prefix}.{qualname}" try: if locate(candidate) is t: return candidate except ImportError: pass return f"{module}.{qualname}" def locate(name: str) -> Any: """ Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``, such as "module.submodule.class_name". Raise Exception if it cannot be found. """ obj = pydoc.locate(name) # Some cases (e.g. torch.optim.sgd.SGD) not handled correctly # by pydoc.locate. Try a private function from hydra. if obj is None: try: # from hydra.utils import get_method - will print many errors from h<fim_suffix>ydra.utils import _locate except ImportError as e: raise ImportError(f"Cannot dynamically locate object {name}!") from e else: obj = _locate(name) # it raises if fails return obj <fim_middle>
null
IMPORT
complete_current_header_empty_completion
<filename>UniRef/detectron2/config/config.py<fim_prefix># -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. import functools import inspect import logging from fvcore.common.config import CfgNode as _CfgNode from detectron2.utils.file_io import PathManager class CfgNode(_CfgNode): """ The same as `fvcore.common.config.CfgNode`, but different in: 1. Use unsafe yaml loading by default. Note that this may lead to arbitrary code execution: you must not load a config file from untrusted sources before manually inspecting the content of the file. 2. Support config versioning. When attempting to merge an old config, it will convert the old config automatically. .. automethod:: clone .. automethod:: freeze .. automethod:: defrost .. automethod:: is_frozen .. automethod:: load_yaml_with_base .. automethod:: merge_from_list .. automethod:: merge_from_other_cfg """ @classmethod def _open_cfg(cls, filename): return PathManager.open(filename, "r") # Note that the default value of allow_unsafe is changed to True def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: """ Load content from the given config file and merge it into self. Args: cfg_filename: config filename allow_unsafe: allow unsafe yaml syntax """ assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!" loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) loaded_cfg = type(self)(loaded_cfg) # defaults.py needs to import CfgNode from .defaults import _C latest_ver = _C.VERSION assert ( latest_ver == self.VERSION ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" logger = logging.getLogger(__name__) loaded_ver = loaded_cfg.get("VERSION", None) if loaded_ver is None: from .compat import guess_version loaded_ver = guess_version(loaded_cfg, cfg_filename) assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( loaded_ver, self.VERSION ) if loaded_ver == self.VERSION: self.merge_from_other_cfg(loaded_cfg) else: # compat.py needs to import CfgNode from .compat import upgrade_config, downgrade_config logger.warning( "Loading an old v{} config file '{}' by automatically upgrading to v{}. " "See docs/CHANGELOG.md for instructions to update your files.".format( loaded_ver, cfg_filename, self.VERSION ) ) # To convert, first obtain a full config at an old version old_self = downgrade_config(self, to_version=loaded_ver) old_self.merge_from_other_cfg(loaded_cfg) new_config = upgrade_config(old_self) self.clear() self.update(new_config) def dump(self, *args, **kwargs): """ Returns: str: a yaml string representation of the config """ # to make it show up in docs return super().dump(*args, **kwargs) global_cfg = CfgNode() def get_cfg() -> CfgNode: """ Get a copy of the default config. Returns: a detectron2 CfgNode instance. """ from .defa<fim_suffix>ults import _C return _C.clone() def set_global_cfg(cfg: CfgNode) -> None: """ Let the global config point to the given cfg. Assume that the given "cfg" has the key "KEY", after calling `set_global_cfg(cfg)`, the key can be accessed by: :: from detectron2.config import global_cfg print(global_cfg.KEY) By using a hacky global config, you can access these configs anywhere, without having to pass the config object or the values deep into the code. This is a hacky feature introduced for quick prototyping / research exploration. """ global global_cfg global_cfg.clear() global_cfg.update(cfg) def configurable(init_func=None, *, from_config=None): """ Decorate a function or a class's __init__ method so that it can be called with a :class:`CfgNode` object using a :func:`from_config` function that translates :class:`CfgNode` to arguments. Examples: :: # Usage 1: Decorator on __init__: class A: @configurable def __init__(self, a, b=2, c=3): pass @classmethod def from_config(cls, cfg): # 'cfg' must be the first argument # Returns kwargs to be passed to __init__ return {"a": cfg.A, "b": cfg.B} a1 = A(a=1, b=2) # regular construction a2 = A(cfg) # construct with a cfg a3 = A(cfg, b=3, c=4) # construct with extra overwrite # Usage 2: Decorator on any function. Needs an extra from_config argument: @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) def a_func(a, b=2, c=3): pass a1 = a_func(a=1, b=2) # regular call a2 = a_func(cfg) # call with a cfg a3 = a_func(cfg, b=3, c=4) # call with extra overwrite Args: init_func (callable): a class's ``__init__`` method in usage 1. The class must have a ``from_config`` classmethod which takes `cfg` as the first argument. from_config (callable): the from_config function in usage 2. It must take `cfg` as its first argument. """ if init_func is not None: assert ( inspect.isfunction(init_func) and from_config is None and init_func.__name__ == "__init__" ), "Incorrect use of @configurable. Check API documentation for examples." @functools.wraps(init_func) def wrapped(self, *args, **kwargs): try: from_config_func = type(self).from_config except AttributeError as e: raise AttributeError( "Class with @configurable must have a 'from_config' classmethod." ) from e if not inspect.ismethod(from_config_func): raise TypeError("Class with @configurable must have a 'from_config' classmethod.") if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config_func, *args, **kwargs) init_func(self, **explicit_args) else: init_func(self, *args, **kwargs) return wrapped else: if from_config is None: return configurable # @configurable() is made equivalent to @configurable assert inspect.isfunction( from_config ), "from_config argument of configurable must be a function!" def wrapper(orig_func): @functools.wraps(orig_func) def wrapped(*args, **kwargs): if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config, *args, **kwargs) return orig_func(**explicit_args) else: return orig_func(*args, **kwargs) wrapped.from_config = from_config return wrapped return wrapper def _get_args_from_config(from_config_func, *args, **kwargs): """ Use `from_config` to obtain explicit arguments. Returns: dict: arguments to be used for cls.__init__ """ signature = inspect.signature(from_config_func) if list(signature.parameters.keys())[0] != "cfg": if inspect.isfunction(from_config_func): name = from_config_func.__name__ else: name = f"{from_config_func.__self__}.from_config" raise TypeError(f"{name} must take 'cfg' as the first argument!") support_var_arg = any( param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD] for param in signature.parameters.values() ) if support_var_arg: # forward all arguments to from_config, if from_config accepts them ret = from_config_func(*args, **kwargs) else: # forward supported arguments to from_config supported_arg_names = set(signature.parameters.keys()) extra_kwargs = {} for name in list(kwargs.keys()): if name not in supported_arg_names: extra_kwargs[name] = kwargs.pop(name) ret = from_config_func(*args, **kwargs) # forward the other arguments to __init__ ret.update(extra_kwargs) return ret def _called_with_cfg(*args, **kwargs): """ Returns: bool: whether the arguments contain CfgNode and should be considered forwarded to from_config. """ from omegaconf import DictConfig if len(args) and isinstance(args[0], (_CfgNode, DictConfig)): return True if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)): return True # `from_config`'s first argument is forced to be "cfg". # So the above check covers all cases. return False <fim_middle>
null
IMPORT
complete_current_header_empty_completion
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import logging import re from typing import Dict, List import torch from tabulate import tabulate def convert_basic_c2_names(original_keys): """ Apply some basic name conversion to names in C2 weights. It only deals with typical backbone models. Args: original_keys (list[str]): Returns: list[str]: The same number of strings matching those in original_keys. """ layer_keys = copy.deepcopy(original_keys) layer_keys = [ {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys ] # some hard-coded mappings layer_keys = [k.replace("_", ".") for k in layer_keys] layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] # Uniform both bn and gn names to "norm" layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] # stem layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] # to avoid mis-matching with "conv1" in other components (e.g. detection head) layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] # blocks layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] # DensePose substitutions layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] return layer_keys def convert_c2_detectron_names(weights): """ Map Caffe2 Detectron weight names to Detectron2 names. Args: weights (dict): name -> tensor Returns: dict: detectron2 names -> tensor dict: detectron2 names -> C2 names """ logger = logging.getLogger(__name__) logger.info("Renaming Caffe2 weights ......") original_keys = sorted(weights.keys()) layer_keys = copy.deepcopy(original_keys) layer_keys = convert_basic_c2_names(layer_keys) # -------------------------------------------------------------------------- # RPN hidden representation conv # -------------------------------------------------------------------------- # FPN case # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then # shared for all other levels, hence the appearance of "fpn2" layer_keys = [ k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys ] # Non-FPN case layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] # -------------------------------------------------------------------------- # RPN box transformation conv # -------------------------------------------------------------------------- # FPN case (see note above about "fpn2") layer_keys = [ k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # Non-FPN case layer_keys = [ k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # -------------------------------------------------------------------------- # Fast R-CNN box head # -------------------------------------------------------------------------- layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] # -------------------------------------------------------------------------- # FPN lateral and output convolutions # -------------------------------------------------------------------------- def fpn_map(name): """ Look for keys with the following patterns: 1) Starts with "fpn.inner." Example: "fpn.inner.res2.2.sum.lateral.weight" Meaning: These are lateral pathway convolutions 2) Starts with "fpn.res" Example: "fpn.res2.2.sum.weight" Meaning: These are FPN output convolutions """ splits = name.split(".") norm = ".norm" if "norm" in splits else "" if name.startswith("fpn.inner."): # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] stage = int(splits[2][len("res") :]) return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) elif name.startswith("fpn.res"): # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] stage = int(splits[1][len("res") :]) return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) return name layer_keys = [fpn_map(k) for k in layer_keys] # -------------------------------------------------------------------------- # Mask R-CNN mask head # -------------------------------------------------------------------------- # roi_heads.StandardROIHeads case layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] # roi_heads.Res5ROIHeads case layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] # -------------------------------------------------------------------------- # Keypoint R-CNN head # -------------------------------------------------------------------------- # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] layer_keys = [ k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys ] layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] # -------------------------------------------------------------------------- # Done with replacements # -------------------------------------------------------------------------- assert len(set(layer_keys)) == len(layer_keys) assert len(original_keys) == len(layer_keys) new_weights = {} new_keys_to_original_keys = {} for orig, renamed in zip(original_keys, layer_keys): new_keys_to_original_keys[renamed] = orig if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): # remove the meaningless prediction weight for background class new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 new_weights[renamed] = weights[orig][new_start_idx:] logger.info( "Remove prediction weight for background class in {}. The shape changes from " "{} to {}.".format( renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) ) ) elif renamed.startswith("cls_score."): # move weights of bg class from original index 0 to last index logger.info( "Move classification weights for background class in {} from index 0 to " "index {}.".format(renamed, weights[orig].shape[0] - 1) ) new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) else: new_weights[renamed] = weights[orig] return new_weights, new_keys_to_original_keys # Note the current matching is not symmetric. # it assumes model_state_dict will have longer names. def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): """ Match names between the two state-dict, and returns a new chkpt_state_dict with names converted to match model_state_dict with heuristics. The returned dict can be later loaded with fvcore checkpointer. If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 model and will be renamed at first. Strategy: suppose that the models that we will create will have prefixes appended to each of its keys, for example due to an extra level of nesting that the original pre-trained weights from ImageNet won't contain. For example, model.state_dict() might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains res2.conv1.weight. We thus want to match both parameters together. For that, we look for each model weight, look among all loaded keys if there is one that is a suffix of the current weight name, and use it if that's the case. If multiple matches exist, take the one with longest size of the corresponding name. For example, for the same model as before, the pretrained weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, we want to match backbone[0].body.conv1.weight to conv1.weight, and backbone[0].body.res2.conv1.weight to res2.conv1.weight. """ model_keys = sorted(model_state_dict.keys()) if c2_conversion: ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) # original_keys: the name in the original dict (before renaming) else: original_keys = {x: x for x in ckpt_state_dict.keys()} ckpt_keys = sorted(ckpt_state_dict.keys()) def mat<fim_suffix>ch(a, b): # Matched ckpt_key should be a complete (starts with '.') suffix. # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, # but matches whatever_conv1 or mesh_head.whatever_conv1. return a == b or a.endswith("." + b) # get a matrix of string matches, where each (i, j) entry correspond to the size of the # ckpt_key string, if it matches match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) # use the matched one with longest size in case of multiple matches max_match_size, idxs = match_matrix.max(1) # remove indices that correspond to no-match idxs[max_match_size == 0] = -1 logger = logging.getLogger(__name__) # matched_pairs (matched checkpoint key --> matched model key) matched_keys = {} result_state_dict = {} for idx_model, idx_ckpt in enumerate(idxs.tolist()): if idx_ckpt == -1: continue key_model = model_keys[idx_model] key_ckpt = ckpt_keys[idx_ckpt] value_ckpt = ckpt_state_dict[key_ckpt] shape_in_model = model_state_dict[key_model].shape if shape_in_model != value_ckpt.shape: logger.warning( "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( key_ckpt, value_ckpt.shape, key_model, shape_in_model ) ) logger.warning( "{} will not be loaded. Please double check and see if this is desired.".format( key_ckpt ) ) continue assert key_model not in result_state_dict result_state_dict[key_model] = value_ckpt if key_ckpt in matched_keys: # already added to matched_keys logger.error( "Ambiguity found for {} in checkpoint!" "It matches at least two keys in the model ({} and {}).".format( key_ckpt, key_model, matched_keys[key_ckpt] ) ) raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") matched_keys[key_ckpt] = key_model # logging: matched_model_keys = sorted(matched_keys.values()) if len(matched_model_keys) == 0: logger.warning("No weights in checkpoint matched with model.") return ckpt_state_dict common_prefix = _longest_common_prefix(matched_model_keys) rev_matched_keys = {v: k for k, v in matched_keys.items()} original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys} model_key_groups = _group_keys_by_module(matched_model_keys, original_keys) table = [] memo = set() for key_model in matched_model_keys: if key_model in memo: continue if key_model in model_key_groups: group = model_key_groups[key_model] memo |= set(group) shapes = [tuple(model_state_dict[k].shape) for k in group] table.append( ( _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*", _group_str([original_keys[k] for k in group]), " ".join([str(x).replace(" ", "") for x in shapes]), ) ) else: key_checkpoint = original_keys[key_model] shape = str(tuple(model_state_dict[key_model].shape)) table.append((key_model[len(common_prefix) :], key_checkpoint, shape)) table_str = tabulate( table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"] ) logger.info( "Following weights matched with " + (f"submodule {common_prefix[:-1]}" if common_prefix else "model") + ":\n" + table_str ) unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())] for k in unmatched_ckpt_keys: result_state_dict[k] = ckpt_state_dict[k] return result_state_dict def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]): """ Params in the same submodule are grouped together. Args: keys: names of all parameters original_names: mapping from parameter name to their name in the checkpoint Returns: dict[name -> all other names in the same group] """ def _submodule_name(key): pos = key.rfind(".") if pos < 0: return None prefix = key[: pos + 1] return prefix all_submodules = [_submodule_name(k) for k in keys] all_submodules = [x for x in all_submodules if x] all_submodules = sorted(all_submodules, key=len) ret = {} for prefix in all_submodules: group = [k for k in keys if k.startswith(prefix)] if len(group) <= 1: continue original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group]) if len(original_name_lcp) == 0: # don't group weights if original names don't share prefix continue for k in group: if k in ret: continue ret[k] = group return ret def _longest_common_prefix(names: List[str]) -> str: """ ["abc.zfg", "abc.zef"] -> "abc." """ names = [n.split(".") for n in names] m1, m2 = min(names), max(names) ret = [a for a, b in zip(m1, m2) if a == b] ret = ".".join(ret) + "." if len(ret) else "" return ret def _longest_common_prefix_str(names: List[str]) -> str: m1, m2 = min(names), max(names) lcp = [a for a, b in zip(m1, m2) if a == b] lcp = "".join(lcp) return lcp def _group_str(names: List[str]) -> str: """ Turn "common1", "common2", "common3" into "common{1,2,3}" """ lcp = _longest_common_prefix_str(names) rest = [x[len(lcp) :] for x in names] rest = "{" + ",".join(rest) + "}" ret = lcp + rest # add some simplification for BN specifically ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*") ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*") return ret <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import itertools import numpy as np from typing import Any, Iterator, List, Union import pycocotools.mask as mask_util import torch from torch import device from detectron2.layers.roi_align import ROIAlign from detectron2.utils.memory import retry_if_cuda_oom from .boxes import Boxes def polygon_area(x, y): # Using the shoelace formula # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: """ Args: polygons (list[ndarray]): each array has shape (Nx2,) height, width (int) Returns: ndarray: a bool mask of shape (height, width) """ if len(polygons) == 0: # COCOAPI does not support empty polygons return np.zeros((height, width)).astype(np.bool) rles = mask_util.frPyObjects(polygons, height, width) rle = mask_util.merge(rles) return mask_util.decode(rle).astype(np.bool) def rasterize_polygons_within_box( polygons: List[np.ndarray], box: np.ndarray, mask_size: int ) -> torch.Tensor: """ Rasterize the polygons into a mask image and crop the mask content in the given box. The cropped mask is resized to (mask_size, mask_size). This function is used when generating training targets for mask head in Mask R-CNN. Given original ground-truth masks for an image, new ground-truth mask training targets in the size of `mask_size x mask_size` must be provided for each predicted box. This function will be called to produce such targets. Args: polygons (list[ndarray[float]]): a list of polygons, which represents an instance. box: 4-element numpy array mask_size (int): Returns: Tensor: BoolTensor of shape (mask_size, mask_size) """ # 1. Shift the polygons w.r.t the boxes w, h = box[2] - box[0], box[3] - box[1] polygons = copy.deepcopy(polygons) for p in polygons: p[0::2] = p[0::2] - box[0] p[1::2] = p[1::2] - box[1] # 2. Rescale the polygons to the new box size # max() to avoid division by small number ratio_h = mask_size / max(h, 0.1) ratio_w = mask_size / max(w, 0.1) if ratio_h == ratio_w: for p in polygons: p *= ratio_h else: for p in polygons: p[0::2] *= ratio_w p[1::2] *= ratio_h # 3. Rasterize the polygons with coco api mask = polygons_to_bitmask(polygons, mask_size, mask_size) mask = torch.from_numpy(mask) return mask class BitMasks: """ This class stores the segmentation masks for all objects in one image, in the form of bitmaps. Attributes: tensor: bool Tensor of N,H,W, representing N instances in the image. """ def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): """ Args: tensor: bool Tensor of N,H,W, representing N instances in the image. """ device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device) assert tensor.dim() == 3, tensor.size() self.image_size = tensor.shape[1:] self.tensor = tensor @torch.jit.unused def to(self, *args: Any, **kwargs: Any) -> "BitMasks": return BitMasks(self.tensor.to(*args, **kwargs)) @property def device(self) -> torch.device: return self.tensor.device @torch.jit.unused def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": """ Returns: BitMasks: Create a new :class:`BitMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. 2. `new_masks = masks[2:10]`: return a slice of masks. 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return BitMasks(self.tensor[item].unsqueeze(0)) m = self.tensor[item] assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( item, m.shape ) return BitMasks(m) @torch.jit.unused def __iter__(self) -> torch.Tensor: yield from self.tensor @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s def __len__(self) -> int: return self.tensor.shape[0] def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or non-empty (True). """ return self.tensor.flatten(1).any(dim=1) @staticmethod def from_polygon_masks( polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int ) -> "BitMasks": """ Args: polygon_masks (list[list[ndarray]] or PolygonMasks) height, width (int) """ if isinstance(polygon_masks, PolygonMasks): polygon_masks = polygon_masks.polygons masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] if len(masks): return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) else: return BitMasks(torch.empty(0, height, width, dtype=torch.bool)) @staticmethod def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks": """ Args: roi_masks: height, width (int): """ return roi_masks.to_bitmasks(height, width) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each bitmask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. It has less reconstruction error compared to rasterization with polygons. However we observe no difference in accuracy, but BitMasks requires more memory to store all the masks. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = self.tensor.device batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 bit_masks = self.tensor.to(dtype=torch.float32) rois = rois.to(device=device) output = ( ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) .forward(bit_masks[:, None, :, :], rois) .squeeze(1) ) output = output >= 0.5 return output def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around bitmasks. If a mask is empty, it's bounding box will be all zero. """ boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32) x_any = torch.any(self.tensor, dim=1) y_any = torch.any(self.tensor, dim=2) for idx in range(self.tensor.shape[0]): x = torch.where(x_any[idx, :])[0] y = torch.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: boxes[idx, :] = torch.as_tensor( [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32 ) return Boxes(boxes) @staticmethod def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": """ Concatenates a list of BitMasks into a single BitMasks Arguments: bitmasks_list (list[BitMasks]) Returns: BitMasks: the concatenated BitMasks """ assert isinstance(bitmasks_list, (list, tuple)) assert len(bitmasks_list) > 0 assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0)) return cat_bitmasks class PolygonMasks: """ This class stores the segmentation masks for all objects in one image, in the form of polygons. Attributes: polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. """ def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): """ Arguments: polygons (list[list[np.ndarray]]): The first level of the list correspond to individual instances, the second level to all the polygons that compose the instance, and the third level to the polygon coordinates. The third level array should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). """ if not isinstance(polygons, list): raise ValueError( "Cannot create PolygonMasks: Expect a list of list of polygons per image. " "Got '{}' instead.".format(type(polygons)) ) def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray: # Use float64 for higher precision, because why not? # Always put polygons on CPU (self.to is a no-op) since they # are supposed to be small tensors. # May need to change this assumption if GPU placement becomes useful if isinstance(t, torch.Tensor): t = t.cpu().numpy() return np.asarray(t).astype("float64") def proces<fim_suffix>s_polygons( polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] ) -> List[np.ndarray]: if not isinstance(polygons_per_instance, list): raise ValueError( "Cannot create polygons: Expect a list of polygons per instance. " "Got '{}' instead.".format(type(polygons_per_instance)) ) # transform each polygon to a numpy array polygons_per_instance = [_make_array(p) for p in polygons_per_instance] for polygon in polygons_per_instance: if len(polygon) % 2 != 0 or len(polygon) < 6: raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.") return polygons_per_instance self.polygons: List[List[np.ndarray]] = [ process_polygons(polygons_per_instance) for polygons_per_instance in polygons ] def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": return self @property def device(self) -> torch.device: return torch.device("cpu") def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around polygon masks. """ boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) for idx, polygons_per_instance in enumerate(self.polygons): minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) maxxy = torch.zeros(2, dtype=torch.float32) for polygon in polygons_per_instance: coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) minxy = torch.min(minxy, torch.min(coords, dim=0).values) maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) boxes[idx, :2] = minxy boxes[idx, 2:] = maxxy return Boxes(boxes) def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or not (True). """ keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] return torch.from_numpy(np.asarray(keep, dtype=np.bool)) def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": """ Support indexing over the instances and return a `PolygonMasks` object. `item` can be: 1. An integer. It will return an object with only one instance. 2. A slice. It will return an object with the selected instances. 3. A list[int]. It will return an object with the selected instances, correpsonding to the indices in the list. 4. A vector mask of type BoolTensor, whose length is num_instances. It will return an object with the instances whose mask is nonzero. """ if isinstance(item, int): selected_polygons = [self.polygons[item]] elif isinstance(item, slice): selected_polygons = self.polygons[item] elif isinstance(item, list): selected_polygons = [self.polygons[i] for i in item] elif isinstance(item, torch.Tensor): # Polygons is a list, so we have to move the indices back to CPU. if item.dtype == torch.bool: assert item.dim() == 1, item.shape item = item.nonzero().squeeze(1).cpu().numpy().tolist() elif item.dtype in [torch.int32, torch.int64]: item = item.cpu().numpy().tolist() else: raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) selected_polygons = [self.polygons[i] for i in item] return PolygonMasks(selected_polygons) def __iter__(self) -> Iterator[List[np.ndarray]]: """ Yields: list[ndarray]: the polygons for one instance. Each Tensor is a float64 vector representing a polygon. """ return iter(self.polygons) def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.polygons)) return s def __len__(self) -> int: return len(self.polygons) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each mask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = boxes.device # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise # (several small tensors for representing a single instance mask) boxes = boxes.to(torch.device("cpu")) results = [ rasterize_polygons_within_box(poly, box.numpy(), mask_size) for poly, box in zip(self.polygons, boxes) ] """ poly: list[list[float]], the polygons for one instance box: a tensor of shape (4,) """ if len(results) == 0: return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) return torch.stack(results, dim=0).to(device=device) def area(self): """ Computes area of the mask. Only works with Polygons, using the shoelace formula: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates Returns: Tensor: a vector, area for each instance """ area = [] for polygons_per_instance in self.polygons: area_per_instance = 0 for p in polygons_per_instance: area_per_instance += polygon_area(p[0::2], p[1::2]) area.append(area_per_instance) return torch.tensor(area) @staticmethod def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": """ Concatenates a list of PolygonMasks into a single PolygonMasks Arguments: polymasks_list (list[PolygonMasks]) Returns: PolygonMasks: the concatenated PolygonMasks """ assert isinstance(polymasks_list, (list, tuple)) assert len(polymasks_list) > 0 assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) cat_polymasks = type(polymasks_list[0])( list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) ) return cat_polymasks class ROIMasks: """ Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given, full-image bitmask can be obtained by "pasting" the mask on the region defined by the corresponding ROI box. """ def __init__(self, tensor: torch.Tensor): """ Args: tensor: (N, M, M) mask tensor that defines the mask within each ROI. """ if tensor.dim() != 3: raise ValueError("ROIMasks must take a masks of 3 dimension.") self.tensor = tensor def to(self, device: torch.device) -> "ROIMasks": return ROIMasks(self.tensor.to(device)) @property def device(self) -> device: return self.tensor.device def __len__(self): return self.tensor.shape[0] def __getitem__(self, item) -> "ROIMasks": """ Returns: ROIMasks: Create a new :class:`ROIMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[2:10]`: return a slice of masks. 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ t = self.tensor[item] if t.dim() != 3: raise ValueError( f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!" ) return ROIMasks(t) @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s @torch.jit.unused def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5): """ Args: see documentation of :func:`paste_masks_in_image`. """ from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape if torch.jit.is_tracing(): if isinstance(height, torch.Tensor): paste_func = _paste_masks_tensor_shape else: paste_func = paste_masks_in_image else: paste_func = retry_if_cuda_oom(paste_masks_in_image) bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold) return BitMasks(bitmasks) <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import logging import re from typing import Dict, List import torch from tabulate import tabulate def convert_basic_c2_names(original_keys): """ Apply some basic name conversion to names in C2 weights. It only deals with typical backbone models. Args: original_keys (list[str]): Returns: list[str]: The same number of strings matching those in original_keys. """ layer_keys = copy.deepcopy(original_keys) layer_keys = [ {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys ] # some hard-coded mappings layer_keys = [k.replace("_", ".") for k in layer_keys] layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] # Uniform both bn and gn names to "norm" layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] # stem layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] # to avoid mis-matching with "conv1" in other components (e.g. detection head) layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] # blocks layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] # DensePose substitutions layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] return layer_keys def convert_c2_detectron_names(weights): """ Map Caffe2 Detectron weight names to Detectron2 names. Args: weights (dict): name -> tensor Returns: dict: detectron2 names -> tensor dict: detectron2 names -> C2 names """ logger = logging.getLogger(__name__) logger.info("Renaming Caffe2 weights ......") original_keys = sorted(weights.keys()) layer_keys = copy.deepcopy(original_keys) layer_keys = convert_basic_c2_names(layer_keys) # -------------------------------------------------------------------------- # RPN hidden representation conv # -------------------------------------------------------------------------- # FPN case # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then # shared for all other levels, hence the appearance of "fpn2" layer_keys = [ k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys ] # Non-FPN case layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] # -------------------------------------------------------------------------- # RPN box transformation conv # -------------------------------------------------------------------------- # FPN case (see note above about "fpn2") layer_keys = [ k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # Non-FPN case layer_keys = [ k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # -------------------------------------------------------------------------- # Fast R-CNN box head # -------------------------------------------------------------------------- layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] # -------------------------------------------------------------------------- # FPN lateral and output convolutions # -------------------------------------------------------------------------- def fpn_m<fim_suffix>ap(name): """ Look for keys with the following patterns: 1) Starts with "fpn.inner." Example: "fpn.inner.res2.2.sum.lateral.weight" Meaning: These are lateral pathway convolutions 2) Starts with "fpn.res" Example: "fpn.res2.2.sum.weight" Meaning: These are FPN output convolutions """ splits = name.split(".") norm = ".norm" if "norm" in splits else "" if name.startswith("fpn.inner."): # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] stage = int(splits[2][len("res") :]) return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) elif name.startswith("fpn.res"): # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] stage = int(splits[1][len("res") :]) return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) return name layer_keys = [fpn_map(k) for k in layer_keys] # -------------------------------------------------------------------------- # Mask R-CNN mask head # -------------------------------------------------------------------------- # roi_heads.StandardROIHeads case layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] # roi_heads.Res5ROIHeads case layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] # -------------------------------------------------------------------------- # Keypoint R-CNN head # -------------------------------------------------------------------------- # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] layer_keys = [ k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys ] layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] # -------------------------------------------------------------------------- # Done with replacements # -------------------------------------------------------------------------- assert len(set(layer_keys)) == len(layer_keys) assert len(original_keys) == len(layer_keys) new_weights = {} new_keys_to_original_keys = {} for orig, renamed in zip(original_keys, layer_keys): new_keys_to_original_keys[renamed] = orig if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): # remove the meaningless prediction weight for background class new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 new_weights[renamed] = weights[orig][new_start_idx:] logger.info( "Remove prediction weight for background class in {}. The shape changes from " "{} to {}.".format( renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) ) ) elif renamed.startswith("cls_score."): # move weights of bg class from original index 0 to last index logger.info( "Move classification weights for background class in {} from index 0 to " "index {}.".format(renamed, weights[orig].shape[0] - 1) ) new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) else: new_weights[renamed] = weights[orig] return new_weights, new_keys_to_original_keys # Note the current matching is not symmetric. # it assumes model_state_dict will have longer names. def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): """ Match names between the two state-dict, and returns a new chkpt_state_dict with names converted to match model_state_dict with heuristics. The returned dict can be later loaded with fvcore checkpointer. If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 model and will be renamed at first. Strategy: suppose that the models that we will create will have prefixes appended to each of its keys, for example due to an extra level of nesting that the original pre-trained weights from ImageNet won't contain. For example, model.state_dict() might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains res2.conv1.weight. We thus want to match both parameters together. For that, we look for each model weight, look among all loaded keys if there is one that is a suffix of the current weight name, and use it if that's the case. If multiple matches exist, take the one with longest size of the corresponding name. For example, for the same model as before, the pretrained weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, we want to match backbone[0].body.conv1.weight to conv1.weight, and backbone[0].body.res2.conv1.weight to res2.conv1.weight. """ model_keys = sorted(model_state_dict.keys()) if c2_conversion: ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) # original_keys: the name in the original dict (before renaming) else: original_keys = {x: x for x in ckpt_state_dict.keys()} ckpt_keys = sorted(ckpt_state_dict.keys()) def match(a, b): # Matched ckpt_key should be a complete (starts with '.') suffix. # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, # but matches whatever_conv1 or mesh_head.whatever_conv1. return a == b or a.endswith("." + b) # get a matrix of string matches, where each (i, j) entry correspond to the size of the # ckpt_key string, if it matches match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) # use the matched one with longest size in case of multiple matches max_match_size, idxs = match_matrix.max(1) # remove indices that correspond to no-match idxs[max_match_size == 0] = -1 logger = logging.getLogger(__name__) # matched_pairs (matched checkpoint key --> matched model key) matched_keys = {} result_state_dict = {} for idx_model, idx_ckpt in enumerate(idxs.tolist()): if idx_ckpt == -1: continue key_model = model_keys[idx_model] key_ckpt = ckpt_keys[idx_ckpt] value_ckpt = ckpt_state_dict[key_ckpt] shape_in_model = model_state_dict[key_model].shape if shape_in_model != value_ckpt.shape: logger.warning( "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( key_ckpt, value_ckpt.shape, key_model, shape_in_model ) ) logger.warning( "{} will not be loaded. Please double check and see if this is desired.".format( key_ckpt ) ) continue assert key_model not in result_state_dict result_state_dict[key_model] = value_ckpt if key_ckpt in matched_keys: # already added to matched_keys logger.error( "Ambiguity found for {} in checkpoint!" "It matches at least two keys in the model ({} and {}).".format( key_ckpt, key_model, matched_keys[key_ckpt] ) ) raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") matched_keys[key_ckpt] = key_model # logging: matched_model_keys = sorted(matched_keys.values()) if len(matched_model_keys) == 0: logger.warning("No weights in checkpoint matched with model.") return ckpt_state_dict common_prefix = _longest_common_prefix(matched_model_keys) rev_matched_keys = {v: k for k, v in matched_keys.items()} original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys} model_key_groups = _group_keys_by_module(matched_model_keys, original_keys) table = [] memo = set() for key_model in matched_model_keys: if key_model in memo: continue if key_model in model_key_groups: group = model_key_groups[key_model] memo |= set(group) shapes = [tuple(model_state_dict[k].shape) for k in group] table.append( ( _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*", _group_str([original_keys[k] for k in group]), " ".join([str(x).replace(" ", "") for x in shapes]), ) ) else: key_checkpoint = original_keys[key_model] shape = str(tuple(model_state_dict[key_model].shape)) table.append((key_model[len(common_prefix) :], key_checkpoint, shape)) table_str = tabulate( table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"] ) logger.info( "Following weights matched with " + (f"submodule {common_prefix[:-1]}" if common_prefix else "model") + ":\n" + table_str ) unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())] for k in unmatched_ckpt_keys: result_state_dict[k] = ckpt_state_dict[k] return result_state_dict def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]): """ Params in the same submodule are grouped together. Args: keys: names of all parameters original_names: mapping from parameter name to their name in the checkpoint Returns: dict[name -> all other names in the same group] """ def _submodule_name(key): pos = key.rfind(".") if pos < 0: return None prefix = key[: pos + 1] return prefix all_submodules = [_submodule_name(k) for k in keys] all_submodules = [x for x in all_submodules if x] all_submodules = sorted(all_submodules, key=len) ret = {} for prefix in all_submodules: group = [k for k in keys if k.startswith(prefix)] if len(group) <= 1: continue original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group]) if len(original_name_lcp) == 0: # don't group weights if original names don't share prefix continue for k in group: if k in ret: continue ret[k] = group return ret def _longest_common_prefix(names: List[str]) -> str: """ ["abc.zfg", "abc.zef"] -> "abc." """ names = [n.split(".") for n in names] m1, m2 = min(names), max(names) ret = [a for a, b in zip(m1, m2) if a == b] ret = ".".join(ret) + "." if len(ret) else "" return ret def _longest_common_prefix_str(names: List[str]) -> str: m1, m2 = min(names), max(names) lcp = [a for a, b in zip(m1, m2) if a == b] lcp = "".join(lcp) return lcp def _group_str(names: List[str]) -> str: """ Turn "common1", "common2", "common3" into "common{1,2,3}" """ lcp = _longest_common_prefix_str(names) rest = [x[len(lcp) :] for x in names] rest = "{" + ",".join(rest) + "}" ret = lcp + rest # add some simplification for BN specifically ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*") ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*") return ret <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>UniRef/detectron2/checkpoint/c2_model_loading.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import logging import re from typing import Dict, List import torch from tabulate import tabulate def convert_basic_c2_names(original_keys): """ Apply some basic name conversion to names in C2 weights. It only deals with typical backbone models. Args: original_keys (list[str]): Returns: list[str]: The same number of strings matching those in original_keys. """ layer_keys = copy.deepcopy(original_keys) layer_keys = [ {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys ] # some hard-coded mappings layer_keys = [k.replace("_", ".") for k in layer_keys] layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys] layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys] # Uniform both bn and gn names to "norm" layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys] layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys] layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys] layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys] layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys] # stem layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys] # to avoid mis-matching with "conv1" in other components (e.g. detection head) layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys] # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5) # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys] # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys] # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys] # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys] # blocks layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys] layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys] layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys] layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys] # DensePose substitutions layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys] layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys] layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys] layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys] layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys] return layer_keys def convert_c2_detectron_names(weights): """ Map Caffe2 Detectron weight names to Detectron2 names. Args: weights (dict): name -> tensor Returns: dict: detectron2 names -> tensor dict: detectron2 names -> C2 names """ logger = logging.getLogger(__name__) logger.info("Renaming Caffe2 weights ......") original_keys = sorted(weights.keys()) layer_keys = copy.deepcopy(original_keys) layer_keys = convert_basic_c2_names(layer_keys) # -------------------------------------------------------------------------- # RPN hidden representation conv # -------------------------------------------------------------------------- # FPN case # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then # shared for all other levels, hence the appearance of "fpn2" layer_keys = [ k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys ] # Non-FPN case layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys] # -------------------------------------------------------------------------- # RPN box transformation conv # -------------------------------------------------------------------------- # FPN case (see note above about "fpn2") layer_keys = [ k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # Non-FPN case layer_keys = [ k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys ] layer_keys = [ k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits") for k in layer_keys ] # -------------------------------------------------------------------------- # Fast R-CNN box head # -------------------------------------------------------------------------- layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys] layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys] layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys] layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys] # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys] # -------------------------------------------------------------------------- # FPN lateral and output convolutions # -------------------------------------------------------------------------- def fpn_map(name): """ Look for keys with the following patterns: 1) Starts with "fpn.inner." Example: "fpn.inner.res2.2.sum.lateral.weight" Meaning: These are lateral pathway convolutions 2) Starts with "fpn.res" Example: "fpn.res2.2.sum.weight" Meaning: These are FPN output convolutions """ splits = name.split(".") norm = ".norm" if "norm" in splits else "" if name.startswith("fpn.inner."): # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight'] stage = int(splits[2][len("res") :]) return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1]) elif name.startswith("fpn.res"): # splits example: ['fpn', 'res2', '2', 'sum', 'weight'] stage = int(splits[1][len("res") :]) return "fpn_output{}{}.{}".format(stage, norm, splits[-1]) return name layer_keys = [fpn_map(k) for k in layer_keys] # -------------------------------------------------------------------------- # Mask R-CNN mask head # -------------------------------------------------------------------------- # roi_heads.StandardROIHeads case layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys] layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys] layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys] # roi_heads.Res5ROIHeads case layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys] # -------------------------------------------------------------------------- # Keypoint R-CNN head # -------------------------------------------------------------------------- # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX" layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys] layer_keys = [ k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys ] layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys] # -------------------------------------------------------------------------- # Done with replacements # -------------------------------------------------------------------------- assert len(set(layer_keys)) == len(layer_keys) assert len(original_keys) == len(layer_keys) new_weights = {} new_keys_to_original_keys = {} for orig, renamed in zip(original_keys, layer_keys): new_keys_to_original_keys[renamed] = orig if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."): # remove the meaningless prediction weight for background class new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1 new_weights[renamed] = weights[orig][new_start_idx:] logger.info( "Remove prediction weight for background class in {}. The shape changes from " "{} to {}.".format( renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape) ) ) elif renamed.startswith("cls_score."): # move weights of bg class from original index 0 to last index logger.info( "Move classification weights for background class in {} from index 0 to " "index {}.".format(renamed, weights[orig].shape[0] - 1) ) new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]]) else: new_weights[renamed] = weights[orig] return new_weights, new_keys_to_original_keys # Note the current matching is not symmetric. # it assumes model_state_dict will have longer names. def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True): """ Match names between the two state-dict, and returns a new chkpt_state_dict with names converted to match model_state_dict with heuristics. The returned dict can be later loaded with fvcore checkpointer. If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2 model and will be renamed at first. Strategy: suppose that the models that we will create will have prefixes appended to each of its keys, for example due to an extra level of nesting that the original pre-trained weights from ImageNet won't contain. For example, model.state_dict() might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains res2.conv1.weight. We thus want to match both parameters together. For that, we look for each model weight, look among all loaded keys if there is one that is a suffix of the current weight name, and use it if that's the case. If multiple matches exist, take the one with longest size of the corresponding name. For example, for the same model as before, the pretrained weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case, we want to match backbone[0].body.conv1.weight to conv1.weight, and backbone[0].body.res2.conv1.weight to res2.conv1.weight. """ model_keys = sorted(model_state_dict.keys()) if c2_conversion: ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict) # original_keys: the name in the original dict (before renaming) else: original_keys = {x: x for x in ckpt_state_dict.keys()} ckpt_keys = sorted(ckpt_state_dict.keys()) def match(a, b): # Matched ckpt_key should be a complete (starts with '.') suffix. # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1, # but matches whatever_conv1 or mesh_head.whatever_conv1. return a == b or a.endswith("." + b) # get a matrix of string matches, where each (i, j) entry correspond to the size of the # ckpt_key string, if it matches match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys] match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys)) # use the matched one with longest size in case of multiple matches max_match_size, idxs = match_matrix.max(1) # remove indices that correspond to no-match idxs[max_match_size == 0] = -1 logger = logging.getLogger(__name__) # matched_pairs (matched checkpoint key --> matched model key) matched_keys = {} result_state_dict = {} for idx_model, idx_ckpt in enumerate(idxs.tolist()): if idx_ckpt == -1: continue key_model = model_keys[idx_model] key_ckpt = ckpt_keys[idx_ckpt] value_ckpt = ckpt_state_dict[key_ckpt] shape_in_model = model_state_dict[key_model].shape if shape_in_model != value_ckpt.shape: logger.warning( "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format( key_ckpt, value_ckpt.shape, key_model, shape_in_model ) ) logger.warning( "{} will not be loaded. Please double check and see if this is desired.".format( key_ckpt ) ) continue assert key_model not in result_state_dict result_state_dict[key_model] = value_ckpt if key_ckpt in matched_keys: # already added to matched_keys logger.error( "Ambiguity found for {} in checkpoint!" "It matches at least two keys in the model ({} and {}).".format( key_ckpt, key_model, matched_keys[key_ckpt] ) ) raise ValueError("Cannot match one checkpoint key to multiple keys in the model.") matched_keys[key_ckpt] = key_model # logging: matched_model_keys = sorted(matched_keys.values()) if len(matched_model_keys) == 0: logger.warning("No weights in checkpoint matched with model.") return ckpt_state_dict common_prefix = _longest_common_prefix(matched_model_keys) rev_matched_keys = {v: k for k, v in matched_keys.items()} original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys} model_key_groups = _group_keys_by_module(matched_model_keys, original_keys) table = [] memo = set() for key_model in matched_model_keys: if key_model in memo: continue if key_model in model_key_groups: group = model_key_groups[key_model] memo |= set(group) shapes = [tuple(model_state_dict[k].shape) for k in group] table.append( ( _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*", _group_str([original_keys[k] for k in group]), " ".join([str(x).replace(" ", "") for x in shapes]), ) ) else: key_checkpoint = original_keys[key_model] shape = str(tuple(model_state_dict[key_model].shape)) table.append((key_model[len(common_prefix) :], key_checkpoint, shape)) table_str = tabulate( table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"] ) logger.info( "Following weights matched with " + (f"submodule {common_prefix[:-1]}" if common_prefix else "model") + ":\n" + table_str ) unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())] for k in unmatched_ckpt_keys: result_state_dict[k] = ckpt_state_dict[k] return result_state_dict def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]): """ Params in the same submodule are grouped together. Args: keys: names of all parameters original_names: mapping from parameter name to their name in the checkpoint Returns: dict[name -> all other names in the same group] """ def _submod<fim_suffix>ule_name(key): pos = key.rfind(".") if pos < 0: return None prefix = key[: pos + 1] return prefix all_submodules = [_submodule_name(k) for k in keys] all_submodules = [x for x in all_submodules if x] all_submodules = sorted(all_submodules, key=len) ret = {} for prefix in all_submodules: group = [k for k in keys if k.startswith(prefix)] if len(group) <= 1: continue original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group]) if len(original_name_lcp) == 0: # don't group weights if original names don't share prefix continue for k in group: if k in ret: continue ret[k] = group return ret def _longest_common_prefix(names: List[str]) -> str: """ ["abc.zfg", "abc.zef"] -> "abc." """ names = [n.split(".") for n in names] m1, m2 = min(names), max(names) ret = [a for a, b in zip(m1, m2) if a == b] ret = ".".join(ret) + "." if len(ret) else "" return ret def _longest_common_prefix_str(names: List[str]) -> str: m1, m2 = min(names), max(names) lcp = [a for a, b in zip(m1, m2) if a == b] lcp = "".join(lcp) return lcp def _group_str(names: List[str]) -> str: """ Turn "common1", "common2", "common3" into "common{1,2,3}" """ lcp = _longest_common_prefix_str(names) rest = [x[len(lcp) :] for x in names] rest = "{" + ",".join(rest) + "}" ret = lcp + rest # add some simplification for BN specifically ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*") ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*") return ret <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>UniRef/detectron2/config/config.py<fim_prefix># -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. import functools import inspect import logging from fvcore.common.config import CfgNode as _CfgNode from detectron2.utils.file_io import PathManager class CfgNode(_CfgNode): """ The same as `fvcore.common.config.CfgNode`, but different in: 1. Use unsafe yaml loading by default. Note that this may lead to arbitrary code execution: you must not load a config file from untrusted sources before manually inspecting the content of the file. 2. Support config versioning. When attempting to merge an old config, it will convert the old config automatically. .. automethod:: clone .. automethod:: freeze .. automethod:: defrost .. automethod:: is_frozen .. automethod:: load_yaml_with_base .. automethod:: merge_from_list .. automethod:: merge_from_other_cfg """ @classmethod def _open_cfg(cls, filename): return PathManager.open(filename, "r") # Note that the default value of allow_unsafe is changed to True def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: """ Load content from the given config file and merge it into self. Args: cfg_filename: config filename allow_unsafe: allow unsafe yaml syntax """ assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!" loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) loaded_cfg = type(self)(loaded_cfg) # defaults.py needs to import CfgNode from .defaults import _C latest_ver = _C.VERSION assert ( latest_ver == self.VERSION ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" logger = logging.getLogger(__name__) loaded_ver = loaded_cfg.get("VERSION", None) if loaded_ver is None: from .compat import guess_version loaded_ver = guess_version(loaded_cfg, cfg_filename) assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( loaded_ver, self.VERSION ) if loaded_ver == self.VERSION: self.merge_from_other_cfg(loaded_cfg) else: # compat.py needs to import CfgNode from .compat import upgrade_config, downgrade_config logger.warning( "Loading an old v{} config file '{}' by automatically upgrading to v{}. " "See docs/CHANGELOG.md for instructions to update your files.".format( loaded_ver, cfg_filename, self.VERSION ) ) # To convert, first obtain a full config at an old version old_self = downgrade_config(self, to_version=loaded_ver) old_self.merge_from_other_cfg(loaded_cfg) new_config = upgrade_config(old_self) self.clear() self.update(new_config) def dump(self, *args, **kwargs): """ Returns: str: a yaml string representation of the config """ # to make it show up in docs return super().dump(*args, **kwargs) global_cfg = CfgNode() def get_cfg() -> CfgNode: """ Get a copy of the default config. Returns: a detectron2 CfgNode instance. """ from .defaults import _C return _C.clone() def set_global_cfg(cfg: CfgNode) -> None: """ Let the global config point to the given cfg. Assume that the given "cfg" has the key "KEY", after calling `set_global_cfg(cfg)`, the key can be accessed by: :: from detectron2.config import global_cfg print(global_cfg.KEY) By using a hacky global config, you can access these configs anywhere, without having to pass the config object or the values deep into the code. This is a hacky feature introduced for quick prototyping / research exploration. """ global global_cfg global_cfg.clear() global_cfg.update(cfg) def configurable(init_func=None, *, from_config=None): """ Decorate a function or a class's __init__ method so that it can be called with a :class:`CfgNode` object using a :func:`from_config` function that translates :class:`CfgNode` to arguments. Examples: :: # Usage 1: Decorator on __init__: class A: @configurable def __init__(self, a, b=2, c=3): pass @classmethod def from_config(cls, cfg): # 'cfg' must be the first argument # Returns kwargs to be passed to __init__ return {"a": cfg.A, "b": cfg.B} a1 = A(a=1, b=2) # regular construction a2 = A(cfg) # construct with a cfg a3 = A(cfg, b=3, c=4) # construct with extra overwrite # Usage 2: Decorator on any function. Needs an extra from_config argument: @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) def a_func(a, b=2, c=3): pass a1 = a_func(a=1, b=2) # regular call a2 = a_func(cfg) # call with a cfg a3 = a_func(cfg, b=3, c=4) # call with extra overwrite Args: init_func (callable): a class's ``__init__`` method in usage 1. The class must have a ``from_config`` classmethod which takes `cfg` as the first argument. from_config (callable): the from_config function in usage 2. It must take `cfg` as its first argument. """ if init_func is not None: assert ( inspect.isfunction(init_func) and from_config is None and init_func.__name__ == "__init__" ), "Incorrect use of @configurable. Check API documentation for examples." @functools.wraps(init_func) def wrapped(self, *args, <fim_suffix>**kwargs): try: from_config_func = type(self).from_config except AttributeError as e: raise AttributeError( "Class with @configurable must have a 'from_config' classmethod." ) from e if not inspect.ismethod(from_config_func): raise TypeError("Class with @configurable must have a 'from_config' classmethod.") if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config_func, *args, **kwargs) init_func(self, **explicit_args) else: init_func(self, *args, **kwargs) return wrapped else: if from_config is None: return configurable # @configurable() is made equivalent to @configurable assert inspect.isfunction( from_config ), "from_config argument of configurable must be a function!" def wrapper(orig_func): @functools.wraps(orig_func) def wrapped(*args, **kwargs): if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config, *args, **kwargs) return orig_func(**explicit_args) else: return orig_func(*args, **kwargs) wrapped.from_config = from_config return wrapped return wrapper def _get_args_from_config(from_config_func, *args, **kwargs): """ Use `from_config` to obtain explicit arguments. Returns: dict: arguments to be used for cls.__init__ """ signature = inspect.signature(from_config_func) if list(signature.parameters.keys())[0] != "cfg": if inspect.isfunction(from_config_func): name = from_config_func.__name__ else: name = f"{from_config_func.__self__}.from_config" raise TypeError(f"{name} must take 'cfg' as the first argument!") support_var_arg = any( param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD] for param in signature.parameters.values() ) if support_var_arg: # forward all arguments to from_config, if from_config accepts them ret = from_config_func(*args, **kwargs) else: # forward supported arguments to from_config supported_arg_names = set(signature.parameters.keys()) extra_kwargs = {} for name in list(kwargs.keys()): if name not in supported_arg_names: extra_kwargs[name] = kwargs.pop(name) ret = from_config_func(*args, **kwargs) # forward the other arguments to __init__ ret.update(extra_kwargs) return ret def _called_with_cfg(*args, **kwargs): """ Returns: bool: whether the arguments contain CfgNode and should be considered forwarded to from_config. """ from omegaconf import DictConfig if len(args) and isinstance(args[0], (_CfgNode, DictConfig)): return True if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)): return True # `from_config`'s first argument is forced to be "cfg". # So the above check covers all cases. return False <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>UniRef/detectron2/structures/masks.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import copy import itertools import numpy as np from typing import Any, Iterator, List, Union import pycocotools.mask as mask_util import torch from torch import device from detectron2.layers.roi_align import ROIAlign from detectron2.utils.memory import retry_if_cuda_oom from .boxes import Boxes def polygon_area(x, y): # Using the shoelace formula # https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def polygons_to_bitmask(polygons: List[np.ndarray], height: int, width: int) -> np.ndarray: """ Args: polygons (list[ndarray]): each array has shape (Nx2,) height, width (int) Returns: ndarray: a bool mask of shape (height, width) """ if len(polygons) == 0: # COCOAPI does not support empty polygons return np.zeros((height, width)).astype(np.bool) rles = mask_util.frPyObjects(polygons, height, width) rle = mask_util.merge(rles) return mask_util.decode(rle).astype(np.bool) def rasterize_polygons_within_box( polygons: List[np.ndarray], box: np.ndarray, mask_size: int ) -> torch.Tensor: """ Rasterize the polygons into a mask image and crop the mask content in the given box. The cropped mask is resized to (mask_size, mask_size). This function is used when generating training targets for mask head in Mask R-CNN. Given original ground-truth masks for an image, new ground-truth mask training targets in the size of `mask_size x mask_size` must be provided for each predicted box. This function will be called to produce such targets. Args: polygons (list[ndarray[float]]): a list of polygons, which represents an instance. box: 4-element numpy array mask_size (int): Returns: Tensor: BoolTensor of shape (mask_size, mask_size) """ # 1. Shift the polygons w.r.t the boxes w, h = box[2] - box[0], box[3] - box[1] polygons = copy.deepcopy(polygons) for p in polygons: p[0::2] = p[0::2] - box[0] p[1::2] = p[1::2] - box[1] # 2. Rescale the polygons to the new box size # max() to avoid division by small number ratio_h = mask_size / max(h, 0.1) ratio_w = mask_size / max(w, 0.1) if ratio_h == ratio_w: for p in polygons: p *= ratio_h else: for p in polygons: p[0::2] *= ratio_w p[1::2] *= ratio_h # 3. Rasterize the polygons with coco api mask = polygons_to_bitmask(polygons, mask_size, mask_size) mask = torch.from_numpy(mask) return mask class BitMasks: """ This class stores the segmentation masks for all objects in one image, in the form of bitmaps. Attributes: tensor: bool Tensor of N,H,W, representing N instances in the image. """ def __init__(self, tensor: Union[torch.Tensor, np.ndarray]): """ Args: tensor: bool Tensor of N,H,W, representing N instances in the image. """ device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device("cpu") tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device) assert tensor.dim() == 3, tensor.size() self.image_size = tensor.shape[1:] self.tensor = tensor @torch.jit.unused def to(self, *args: Any, **kwargs: Any) -> "BitMasks": return BitMasks(self.tensor.to(*args, **kwargs)) @property def device(self) -> torch.device: return self.tensor.device @torch.jit.unused def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> "BitMasks": """ Returns: BitMasks: Create a new :class:`BitMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask. 2. `new_masks = masks[2:10]`: return a slice of masks. 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ if isinstance(item, int): return BitMasks(self.tensor[item].unsqueeze(0)) m = self.tensor[item] assert m.dim() == 3, "Indexing on BitMasks with {} returns a tensor with shape {}!".format( item, m.shape ) return BitMasks(m) @torch.jit.unused def __iter__(self) -> torch.Tensor: yield from self.tensor @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s def __len__(self) -> int: return self.tensor.shape[0] def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or non-empty (True). """ return self.tensor.flatten(1).any(dim=1) @staticmethod def from_polygon_masks( polygon_masks: Union["PolygonMasks", List[List[np.ndarray]]], height: int, width: int ) -> "BitMasks": """ Args: polygon_masks (list[list[ndarray]] or PolygonMasks) height, width (int) """ if isinstance(polygon_masks, PolygonMasks): polygon_masks = polygon_masks.polygons masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks] if len(masks): return BitMasks(torch.stack([torch.from_numpy(x) for x in masks])) else: return BitMasks(torch.empty(0, height, width, dtype=torch.bool)) @staticmethod def from_roi_masks(roi_masks: "ROIMasks", height: int, width: int) -> "BitMasks": """ Args: roi_masks: height, width (int): """ return roi_masks.to_bitmasks(height, width) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each bitmask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. It has less reconstruction error compared to rasterization with polygons. However we observe no difference in accuracy, but BitMasks requires more memory to store all the masks. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = self.tensor.device batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None] rois = torch.cat([batch_inds, boxes], dim=1) # Nx5 bit_masks = self.tensor.to(dtype=torch.float32) rois = rois.to(device=device) output = ( ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True) .forward(bit_masks[:, None, :, :], rois) .squeeze(1) ) output = output >= 0.5 return output def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around bitmasks. If a mask is empty, it's bounding box will be all zero. """ boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32) x_any = torch.any(self.tensor, dim=1) y_any = torch.any(self.tensor, dim=2) for idx in range(self.tensor.shape[0]): x = torch.where(x_any[idx, :])[0] y = torch.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: boxes[idx, :] = torch.as_tensor( [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32 ) return Boxes(boxes) @staticmethod def cat(bitmasks_list: List["BitMasks"]) -> "BitMasks": """ Concatenates a list of BitMasks into a single BitMasks Arguments: bitmasks_list (list[BitMasks]) Returns: BitMasks: the concatenated BitMasks """ assert isinstance(bitmasks_list, (list, tuple)) assert len(bitmasks_list) > 0 assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list) cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0)) return cat_bitmasks class PolygonMasks: """ This class stores the segmentation masks for all objects in one image, in the form of polygons. Attributes: polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon. """ def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]): """ Arguments: polygons (list[list[np.ndarray]]): The first level of the list correspond to individual instances, the second level to all the polygons that compose the instance, and the third level to the polygon coordinates. The third level array should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). """ if not isinstance(polygons, list): raise ValueError( "Cannot create PolygonMasks: Expect a list of list of polygons per image. " "Got '{}' instead.".format(type(polygons)) ) def _make_array(t: Union[torch.Tensor, np.ndarr<fim_suffix>ay]) -> np.ndarray: # Use float64 for higher precision, because why not? # Always put polygons on CPU (self.to is a no-op) since they # are supposed to be small tensors. # May need to change this assumption if GPU placement becomes useful if isinstance(t, torch.Tensor): t = t.cpu().numpy() return np.asarray(t).astype("float64") def process_polygons( polygons_per_instance: List[Union[torch.Tensor, np.ndarray]] ) -> List[np.ndarray]: if not isinstance(polygons_per_instance, list): raise ValueError( "Cannot create polygons: Expect a list of polygons per instance. " "Got '{}' instead.".format(type(polygons_per_instance)) ) # transform each polygon to a numpy array polygons_per_instance = [_make_array(p) for p in polygons_per_instance] for polygon in polygons_per_instance: if len(polygon) % 2 != 0 or len(polygon) < 6: raise ValueError(f"Cannot create a polygon from {len(polygon)} coordinates.") return polygons_per_instance self.polygons: List[List[np.ndarray]] = [ process_polygons(polygons_per_instance) for polygons_per_instance in polygons ] def to(self, *args: Any, **kwargs: Any) -> "PolygonMasks": return self @property def device(self) -> torch.device: return torch.device("cpu") def get_bounding_boxes(self) -> Boxes: """ Returns: Boxes: tight bounding boxes around polygon masks. """ boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32) for idx, polygons_per_instance in enumerate(self.polygons): minxy = torch.as_tensor([float("inf"), float("inf")], dtype=torch.float32) maxxy = torch.zeros(2, dtype=torch.float32) for polygon in polygons_per_instance: coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32) minxy = torch.min(minxy, torch.min(coords, dim=0).values) maxxy = torch.max(maxxy, torch.max(coords, dim=0).values) boxes[idx, :2] = minxy boxes[idx, 2:] = maxxy return Boxes(boxes) def nonempty(self) -> torch.Tensor: """ Find masks that are non-empty. Returns: Tensor: a BoolTensor which represents whether each mask is empty (False) or not (True). """ keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons] return torch.from_numpy(np.asarray(keep, dtype=np.bool)) def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> "PolygonMasks": """ Support indexing over the instances and return a `PolygonMasks` object. `item` can be: 1. An integer. It will return an object with only one instance. 2. A slice. It will return an object with the selected instances. 3. A list[int]. It will return an object with the selected instances, correpsonding to the indices in the list. 4. A vector mask of type BoolTensor, whose length is num_instances. It will return an object with the instances whose mask is nonzero. """ if isinstance(item, int): selected_polygons = [self.polygons[item]] elif isinstance(item, slice): selected_polygons = self.polygons[item] elif isinstance(item, list): selected_polygons = [self.polygons[i] for i in item] elif isinstance(item, torch.Tensor): # Polygons is a list, so we have to move the indices back to CPU. if item.dtype == torch.bool: assert item.dim() == 1, item.shape item = item.nonzero().squeeze(1).cpu().numpy().tolist() elif item.dtype in [torch.int32, torch.int64]: item = item.cpu().numpy().tolist() else: raise ValueError("Unsupported tensor dtype={} for indexing!".format(item.dtype)) selected_polygons = [self.polygons[i] for i in item] return PolygonMasks(selected_polygons) def __iter__(self) -> Iterator[List[np.ndarray]]: """ Yields: list[ndarray]: the polygons for one instance. Each Tensor is a float64 vector representing a polygon. """ return iter(self.polygons) def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.polygons)) return s def __len__(self) -> int: return len(self.polygons) def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor: """ Crop each mask by the given box, and resize results to (mask_size, mask_size). This can be used to prepare training targets for Mask R-CNN. Args: boxes (Tensor): Nx4 tensor storing the boxes for each mask mask_size (int): the size of the rasterized mask. Returns: Tensor: A bool tensor of shape (N, mask_size, mask_size), where N is the number of predicted boxes for this image. """ assert len(boxes) == len(self), "{} != {}".format(len(boxes), len(self)) device = boxes.device # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise # (several small tensors for representing a single instance mask) boxes = boxes.to(torch.device("cpu")) results = [ rasterize_polygons_within_box(poly, box.numpy(), mask_size) for poly, box in zip(self.polygons, boxes) ] """ poly: list[list[float]], the polygons for one instance box: a tensor of shape (4,) """ if len(results) == 0: return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device) return torch.stack(results, dim=0).to(device=device) def area(self): """ Computes area of the mask. Only works with Polygons, using the shoelace formula: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates Returns: Tensor: a vector, area for each instance """ area = [] for polygons_per_instance in self.polygons: area_per_instance = 0 for p in polygons_per_instance: area_per_instance += polygon_area(p[0::2], p[1::2]) area.append(area_per_instance) return torch.tensor(area) @staticmethod def cat(polymasks_list: List["PolygonMasks"]) -> "PolygonMasks": """ Concatenates a list of PolygonMasks into a single PolygonMasks Arguments: polymasks_list (list[PolygonMasks]) Returns: PolygonMasks: the concatenated PolygonMasks """ assert isinstance(polymasks_list, (list, tuple)) assert len(polymasks_list) > 0 assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list) cat_polymasks = type(polymasks_list[0])( list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list)) ) return cat_polymasks class ROIMasks: """ Represent masks by N smaller masks defined in some ROIs. Once ROI boxes are given, full-image bitmask can be obtained by "pasting" the mask on the region defined by the corresponding ROI box. """ def __init__(self, tensor: torch.Tensor): """ Args: tensor: (N, M, M) mask tensor that defines the mask within each ROI. """ if tensor.dim() != 3: raise ValueError("ROIMasks must take a masks of 3 dimension.") self.tensor = tensor def to(self, device: torch.device) -> "ROIMasks": return ROIMasks(self.tensor.to(device)) @property def device(self) -> device: return self.tensor.device def __len__(self): return self.tensor.shape[0] def __getitem__(self, item) -> "ROIMasks": """ Returns: ROIMasks: Create a new :class:`ROIMasks` by indexing. The following usage are allowed: 1. `new_masks = masks[2:10]`: return a slice of masks. 2. `new_masks = masks[vector]`, where vector is a torch.BoolTensor with `length = len(masks)`. Nonzero elements in the vector will be selected. Note that the returned object might share storage with this object, subject to Pytorch's indexing semantics. """ t = self.tensor[item] if t.dim() != 3: raise ValueError( f"Indexing on ROIMasks with {item} returns a tensor with shape {t.shape}!" ) return ROIMasks(t) @torch.jit.unused def __repr__(self) -> str: s = self.__class__.__name__ + "(" s += "num_instances={})".format(len(self.tensor)) return s @torch.jit.unused def to_bitmasks(self, boxes: torch.Tensor, height, width, threshold=0.5): """ Args: see documentation of :func:`paste_masks_in_image`. """ from detectron2.layers.mask_ops import paste_masks_in_image, _paste_masks_tensor_shape if torch.jit.is_tracing(): if isinstance(height, torch.Tensor): paste_func = _paste_masks_tensor_shape else: paste_func = paste_masks_in_image else: paste_func = retry_if_cuda_oom(paste_masks_in_image) bitmasks = paste_func(self.tensor, boxes.tensor, (height, width), threshold=threshold) return BitMasks(bitmasks) <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>UniRef/detectron2/config/config.py<fim_prefix># -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. import functools import inspect import logging from fvcore.common.config import CfgNode as _CfgNode from detectron2.utils.file_io import PathManager class CfgNode(_CfgNode): """ The same as `fvcore.common.config.CfgNode`, but different in: 1. Use unsafe yaml loading by default. Note that this may lead to arbitrary code execution: you must not load a config file from untrusted sources before manually inspecting the content of the file. 2. Support config versioning. When attempting to merge an old config, it will convert the old config automatically. .. automethod:: clone .. automethod:: freeze .. automethod:: defrost .. automethod:: is_frozen .. automethod:: load_yaml_with_base .. automethod:: merge_from_list .. automethod:: merge_from_other_cfg """ @classmethod def _open_cfg(cls, filename): return PathManager.open(filename, "r") # Note that the default value of allow_unsafe is changed to True def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: """ Load content from the given config file and merge it into self. Args: cfg_filename: config filename allow_unsafe: allow unsafe yaml syntax """ assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!" loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) loaded_cfg = type(self)(loaded_cfg) # defaults.py needs to import CfgNode from .defaults import _C latest_ver = _C.VERSION assert ( latest_ver == self.VERSION ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" logger = logging.getLogger(__name__) loaded_ver = loaded_cfg.get("VERSION", None) if loaded_ver is None: from .compat import guess_version loaded_ver = guess_version(loaded_cfg, cfg_filename) assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( loaded_ver, self.VERSION ) if loaded_ver == self.VERSION: self.merge_from_other_cfg(loaded_cfg) else: # compat.py needs to import CfgNode from .compat import upgrade_config, downgrade_config logger.warning( "Loading an old v{} config file '{}' by automatically upgrading to v{}. " "See docs/CHANGELOG.md for instructions to update your files.".format( loaded_ver, cfg_filename, self.VERSION ) ) # To convert, first obtain a full config at an old version old_self = downgrade_config(self, to_version=loaded_ver) old_self.merge_from_other_cfg(loaded_cfg) new_config = upgrade_config(old_self) self.clear() self.update(new_config) def dump(self, *args, **kwargs): """ Returns: str: a yaml string representation of the config """ # to make it show up in docs return super().dump(*args, **kwargs) global_cfg = CfgNode() def get_cfg() -> CfgNode: """ Get a copy of the default config. Returns: a detectron2 CfgNode instance. """ from .defaults import _C return _C.clone() def set_global_cfg(cfg: CfgNode) -> None: """ Let the global config point to the given cfg. Assume that the given "cfg" has the key "KEY", after calling `set_global_cfg(cfg)`, the key can be accessed by: :: from detectron2.config import global_cfg print(global_cfg.KEY) By using a hacky global config, you can access these configs anywhere, without having to pass the config object or the values deep into the code. This is a hacky feature introduced for quick prototyping / research exploration. """ global global_cfg global_cfg.clear() global_cfg.update(cfg) def configurable(init_func=None, *, from_config=None): """ Decorate a function or a class's __init__ method so that it can be called with a :class:`CfgNode` object using a :func:`from_config` function that translates :class:`CfgNode` to arguments. Examples: :: # Usage 1: Decorator on __init__: class A: @configurable def __init__(self, a, b=2, c=3): pass @classmethod def from_config(cls, cfg): # 'cfg' must be the first argument # Returns kwargs to be passed to __init__ return {"a": cfg.A, "b": cfg.B} a1 = A(a=1, b=2) # regular construction a2 = A(cfg) # construct with a cfg a3 = A(cfg, b=3, c=4) # construct with extra overwrite # Usage 2: Decorator on any function. Needs an extra from_config argument: @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) def a_func(a, b=2, c=3): pass a1 = a_func(a=1, b=2) # regular call a2 = a_func(cfg) # call with a cfg a3 = a_func(cfg, b=3, c=4) # call with extra overwrite Args: init_func (callable): a class's ``__init__`` method in usage 1. The class must have a ``from_config`` classmethod which takes `cfg` as the first argument. from_config (callable): the from_config function in usage 2. It must take `cfg` as its first argument. """ if init_func is not None: assert ( inspect.isfunction(init_func) and from_config is None and init_func.__name__ == "__init__" ), "Incorrect use of @configurable. Check API documentation for examples." @functools.wraps(init_func) def wrapped(self, *args, **kwargs): try: from_config_func = type(self).from_config except AttributeError as e: raise AttributeError( "Class with @configurable must have a 'from_config' classmethod." ) from e if not inspect.ismethod(from_config_func): raise TypeError("Class with @configurable must have a 'from_config' classmethod.") if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config_func, *args, **kwargs) init_func(self, **explicit_args) else: init_func(self, *args, **kwargs) return wrapped else: if from_config is None: return configurable # @configurable() is made equivalent to @configurable assert inspect.isfunction( from_config ), "from_config argument of configurable must be a function!" def wrapper(orig_func): @functools.wraps(orig_func) def wrapped(*args, **kwa<fim_suffix>rgs): if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config, *args, **kwargs) return orig_func(**explicit_args) else: return orig_func(*args, **kwargs) wrapped.from_config = from_config return wrapped return wrapper def _get_args_from_config(from_config_func, *args, **kwargs): """ Use `from_config` to obtain explicit arguments. Returns: dict: arguments to be used for cls.__init__ """ signature = inspect.signature(from_config_func) if list(signature.parameters.keys())[0] != "cfg": if inspect.isfunction(from_config_func): name = from_config_func.__name__ else: name = f"{from_config_func.__self__}.from_config" raise TypeError(f"{name} must take 'cfg' as the first argument!") support_var_arg = any( param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD] for param in signature.parameters.values() ) if support_var_arg: # forward all arguments to from_config, if from_config accepts them ret = from_config_func(*args, **kwargs) else: # forward supported arguments to from_config supported_arg_names = set(signature.parameters.keys()) extra_kwargs = {} for name in list(kwargs.keys()): if name not in supported_arg_names: extra_kwargs[name] = kwargs.pop(name) ret = from_config_func(*args, **kwargs) # forward the other arguments to __init__ ret.update(extra_kwargs) return ret def _called_with_cfg(*args, **kwargs): """ Returns: bool: whether the arguments contain CfgNode and should be considered forwarded to from_config. """ from omegaconf import DictConfig if len(args) and isinstance(args[0], (_CfgNode, DictConfig)): return True if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)): return True # `from_config`'s first argument is forced to be "cfg". # So the above check covers all cases. return False <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>UniRef/detectron2/utils/registry.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. from typing import Any import pydoc from fvcore.common.registry import Registry # for backward compatibility. """ ``Registry`` and `locate` provide ways to map a string (typically found in config files) to callable objects. """ __all__ = ["Registry", "locate"] def _convert_target_to_string(t: Any) -> str: """ Inverse of ``locate()``. Args: t: any object with ``__module__`` and ``__qualname__`` """ module, qualname = t.__module__, t.__qualname__ # Compress the path to this object, e.g. ``module.submodule._impl.class`` # may become ``module.submodule.class``, if the later also resolves to the same # object. This simplifies the string, and also is less affected by moving the # class implementation. module_parts = module.split(".") for k in range(1, len(module_parts)): prefix = ".".join(module_parts[:k]) candidate = f"{prefix}.{qualname}" try: if locate(candidate) is t: return candidate except ImportError: pass return f"{module}.{qualname}" def locate(name: str) -> Any: """ Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``, such as "module.submodule.class_name". Raise Exception if it cannot be found. """ obj = pydoc.locate(name) # Some cases (e.g. torch.optim.sgd.SGD) not handled correctly # by pydoc.locate. Try a private function from hydra. if obj is None: tr<fim_suffix>y: # from hydra.utils import get_method - will print many errors from hydra.utils import _locate except ImportError as e: raise ImportError(f"Cannot dynamically locate object {name}!") from e else: obj = _locate(name) # it raises if fails return obj <fim_middle>
null
TRY
complete_current_header_empty_completion
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import dataclasses import logging from collections import abc from typing import Any from detectron2.utils.registry import _convert_target_to_string, locate __all__ = ["dump_dataclass", "instantiate"] def dump_dataclass(obj: Any): """ Dump a dataclass recursively into a dict that can be later instantiated. Args: obj: a dataclass object Returns: dict """ assert dataclasses.is_dataclass(obj) and not isinstance( obj, type ), "dump_dataclass() requires an instance of a dataclass." ret = {"_target_": _convert_target_to_string(type(obj))} for f in dataclasses.fields(obj): v = getattr(obj, f.name) if dataclasses.is_dataclass(v): v = dump_dataclass(v) if isinstance(v, (list, tuple)): v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v] ret[f.name] = v return ret def instantiate(cfg): """ Recursively instantiate objects defined in dictionaries by "_target_" and arguments. Args: cfg: a dict-like object with "_target_" that defines the caller, and other keys that define the arguments Returns: object instantiated by cfg """ from omegaconf import ListConfig if isinstance(cfg, ListConfig): lst = [instantiate(x) for x in cfg] return ListConfig(lst, flags={"allow_objects": True}) if isinstance(cfg, list): # Specialize for list, because many classes take # list[objects] as arguments, such as ResNet, DatasetMapper return [instantiate(x) for x in cfg] if isinstance(cfg, abc.Mapping) and "_target_" in cfg: # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all, # but faster: https://github.com/facebookresearch/hydra/issues/1200 cfg = {k: instantiate(v) for k, v in cfg.items()} cls = cfg.pop("_target_") cls = instantiate(cls) if isinstance(cls, str): cls_name = cls cls = locate(cls_name) assert cls is not None, cls_name else: try: cls_name = cls.__module__ + "." + cls.__qualname__ except Exception: # target could be anything, so the above could fail cls_name = str(cls) assert callable(cls), f"_target_ {cls} does not define a callable object" tr<fim_suffix>y: return cls(**cfg) except TypeError: logger = logging.getLogger(__name__) logger.error(f"Error when instantiating {cls_name}!") raise return cfg # return as-is if don't know what to do <fim_middle>
null
TRY
complete_current_header_empty_completion
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import dataclasses import logging from collections import abc from typing import Any from detectron2.utils.registry import _convert_target_to_string, locate __all__ = ["dump_dataclass", "instantiate"] def dump_dataclass(obj: Any): """ Dump a dataclass recursively into a dict that can be later instantiated. Args: obj: a dataclass object Returns: dict """ assert dataclasses.is_dataclass(obj) and not isinstance( obj, type ), "dump_dataclass() requires an instance of a dataclass." ret = {"_target_": _convert_target_to_string(type(obj))} for f in dataclasses.fields(obj): v = getattr(obj, f.name) if dataclasses.is_dataclass(v): v = dump_dataclass(v) if isinstance(v, (list, tuple)): v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v] ret[f.name] = v return ret def instantiate(cfg): """ Recursively instantiate objects defined in dictionaries by "_target_" and arguments. Args: cfg: a dict-like object with "_target_" that defines the caller, and other keys that define the arguments Returns: object instantiated by cfg """ from omegaconf import ListConfig if isinstance(cfg, ListConfig): lst = [instantiate(x) for x in cfg] return ListConfig(lst, flags={"allow_objects": True}) if isinstance(cfg, list): # Specialize for list, because many classes take # list[objects] as arguments, such as ResNet, DatasetMapper return [instantiate(x) for x in cfg] if isinstance(cfg, abc.Mapping) and "_target_" in cfg: # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all, # but faster: https://github.com/facebookresearch/hydra/issues/1200 cfg = {k: instantiate(v) for k, v in cfg.items()} cls = cfg.pop("_target_") cls = instantiate(cls) if isinstance(cls, str): cls_name = cls cls = locate(cls_name) assert cls is not None, cls_name else: tr<fim_suffix>y: cls_name = cls.__module__ + "." + cls.__qualname__ except Exception: # target could be anything, so the above could fail cls_name = str(cls) assert callable(cls), f"_target_ {cls} does not define a callable object" try: return cls(**cfg) except TypeError: logger = logging.getLogger(__name__) logger.error(f"Error when instantiating {cls_name}!") raise return cfg # return as-is if don't know what to do <fim_middle>
null
TRY
complete_current_header_empty_completion
<filename>UniRef/detectron2/config/config.py<fim_prefix># -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. import functools import inspect import logging from fvcore.common.config import CfgNode as _CfgNode from detectron2.utils.file_io import PathManager class CfgNode(_CfgNode): """ The same as `fvcore.common.config.CfgNode`, but different in: 1. Use unsafe yaml loading by default. Note that this may lead to arbitrary code execution: you must not load a config file from untrusted sources before manually inspecting the content of the file. 2. Support config versioning. When attempting to merge an old config, it will convert the old config automatically. .. automethod:: clone .. automethod:: freeze .. automethod:: defrost .. automethod:: is_frozen .. automethod:: load_yaml_with_base .. automethod:: merge_from_list .. automethod:: merge_from_other_cfg """ @classmethod def _open_cfg(cls, filename): return PathManager.open(filename, "r") # Note that the default value of allow_unsafe is changed to True def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: """ Load content from the given config file and merge it into self. Args: cfg_filename: config filename allow_unsafe: allow unsafe yaml syntax """ assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!" loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) loaded_cfg = type(self)(loaded_cfg) # defaults.py needs to import CfgNode from .defaults import _C latest_ver = _C.VERSION assert ( latest_ver == self.VERSION ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" logger = logging.getLogger(__name__) loaded_ver = loaded_cfg.get("VERSION", None) if loaded_ver is None: from .compat import guess_version loaded_ver = guess_version(loaded_cfg, cfg_filename) assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( loaded_ver, self.VERSION ) if loaded_ver == self.VERSION: self.merge_from_other_cfg(loaded_cfg) else: # compat.py needs to import CfgNode from .compat import upgrade_config, downgrade_config logger.warning( "Loading an old v{} config file '{}' by automatically upgrading to v{}. " "See docs/CHANGELOG.md for instructions to update your files.".format( loaded_ver, cfg_filename, self.VERSION ) ) # To convert, first obtain a full config at an old version old_self = downgrade_config(self, to_version=loaded_ver) old_self.merge_from_other_cfg(loaded_cfg) new_config = upgrade_config(old_self) self.clear() self.update(new_config) def dump(self, *args, **kwargs): """ Returns: str: a yaml string representation of the config """ # to make it show up in docs return super().dump(*args, **kwargs) global_cfg = CfgNode() def get_cfg() -> CfgNode: """ Get a copy of the default config. Returns: a detectron2 CfgNode instance. """ from .defaults import _C return _C.clone() def set_global_cfg(cfg: CfgNode) -> None: """ Let the global config point to the given cfg. Assume that the given "cfg" has the key "KEY", after calling `set_global_cfg(cfg)`, the key can be accessed by: :: from detectron2.config import global_cfg print(global_cfg.KEY) By using a hacky global config, you can access these configs anywhere, without having to pass the config object or the values deep into the code. This is a hacky feature introduced for quick prototyping / research exploration. """ global global_cfg global_cfg.clear() global_cfg.update(cfg) def configurable(init_func=None, *, from_config=None): """ Decorate a function or a class's __init__ method so that it can be called with a :class:`CfgNode` object using a :func:`from_config` function that translates :class:`CfgNode` to arguments. Examples: :: # Usage 1: Decorator on __init__: class A: @configurable def __init__(self, a, b=2, c=3): pass @classmethod def from_config(cls, cfg): # 'cfg' must be the first argument # Returns kwargs to be passed to __init__ return {"a": cfg.A, "b": cfg.B} a1 = A(a=1, b=2) # regular construction a2 = A(cfg) # construct with a cfg a3 = A(cfg, b=3, c=4) # construct with extra overwrite # Usage 2: Decorator on any function. Needs an extra from_config argument: @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) def a_func(a, b=2, c=3): pass a1 = a_func(a=1, b=2) # regular call a2 = a_func(cfg) # call with a cfg a3 = a_func(cfg, b=3, c=4) # call with extra overwrite Args: init_func (callable): a class's ``__init__`` method in usage 1. The class must have a ``from_config`` classmethod which takes `cfg` as the first argument. from_config (callable): the from_config function in usage 2. It must take `cfg` as its first argument. """ if init_func is not None: assert ( inspect.isfunction(init_func) and from_config is None and init_func.__name__ == "__init__" ), "Incorrect use of @configurable. Check API documentation for examples." @functools.wraps(init_func) def wrapped(self, *args, **kwargs): tr<fim_suffix>y: from_config_func = type(self).from_config except AttributeError as e: raise AttributeError( "Class with @configurable must have a 'from_config' classmethod." ) from e if not inspect.ismethod(from_config_func): raise TypeError("Class with @configurable must have a 'from_config' classmethod.") if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config_func, *args, **kwargs) init_func(self, **explicit_args) else: init_func(self, *args, **kwargs) return wrapped else: if from_config is None: return configurable # @configurable() is made equivalent to @configurable assert inspect.isfunction( from_config ), "from_config argument of configurable must be a function!" def wrapper(orig_func): @functools.wraps(orig_func) def wrapped(*args, **kwargs): if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config, *args, **kwargs) return orig_func(**explicit_args) else: return orig_func(*args, **kwargs) wrapped.from_config = from_config return wrapped return wrapper def _get_args_from_config(from_config_func, *args, **kwargs): """ Use `from_config` to obtain explicit arguments. Returns: dict: arguments to be used for cls.__init__ """ signature = inspect.signature(from_config_func) if list(signature.parameters.keys())[0] != "cfg": if inspect.isfunction(from_config_func): name = from_config_func.__name__ else: name = f"{from_config_func.__self__}.from_config" raise TypeError(f"{name} must take 'cfg' as the first argument!") support_var_arg = any( param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD] for param in signature.parameters.values() ) if support_var_arg: # forward all arguments to from_config, if from_config accepts them ret = from_config_func(*args, **kwargs) else: # forward supported arguments to from_config supported_arg_names = set(signature.parameters.keys()) extra_kwargs = {} for name in list(kwargs.keys()): if name not in supported_arg_names: extra_kwargs[name] = kwargs.pop(name) ret = from_config_func(*args, **kwargs) # forward the other arguments to __init__ ret.update(extra_kwargs) return ret def _called_with_cfg(*args, **kwargs): """ Returns: bool: whether the arguments contain CfgNode and should be considered forwarded to from_config. """ from omegaconf import DictConfig if len(args) and isinstance(args[0], (_CfgNode, DictConfig)): return True if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)): return True # `from_config`'s first argument is forced to be "cfg". # So the above check covers all cases. return False <fim_middle>
null
TRY
complete_current_header_empty_completion
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import dataclasses import logging from collections import abc from typing import Any from detectron2.utils.registry import _convert_target_to_string, locate __all__ = ["dump_dataclass", "instantiate"] def dump_dataclass(obj: Any): """ Dump a dataclass recursively into a dict that can be later instantiated. Args: obj: a dataclass object Returns: dict """ assert dataclasses.is_dataclass(obj) and not isinstance( obj, type ), "dump_dataclass() requires an instance of a dataclass." ret = {"_target_": _convert_target_to_string(type(obj))} for f in dataclasses.fields(obj): v = getattr(obj, f.name) if dataclasses.is_dataclass(v): v = dump_dataclass(v) if isinstance(v, (list, tuple)): v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v] ret[f.name] = v return ret def instantiate(cfg): """ Recursively instantiate objects defined in dictionaries by "_target_" and arguments. Args: cfg: a dict-like object with "_target_" that defines the caller, and other keys that define the arguments Returns: object instantiated by cfg """ from omegaconf import ListConfig if isinstance(cfg, ListConfig): lst = [instantiate(x) for x in cfg] return ListConfig(lst, flags={"allow_objects": True}) if isinstance(cfg, list): # Specialize for list, because many classes take # list[objects] as arguments, such as ResNet, DatasetMapper return [instantiate(x) for x in cfg] if isinstance(cfg, abc.Mapping) and "_target_" in cfg: # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all, # but faster: https://github.com/facebookresearch/hydra/issues/1200 cfg = {k: instantiate(v) for k, v in cfg.items()} cls = cfg.pop("_target_") cls = instantiate(cls) if isinstance(cls, str): cls_name = cls cls = locate(cls_name) assert cls is not None, cls_name else: try: cls_name = cls.__module__ + "." + cls.__qualname__ except Exceptio<fim_suffix>n: # target could be anything, so the above could fail cls_name = str(cls) assert callable(cls), f"_target_ {cls} does not define a callable object" try: return cls(**cfg) except TypeError: logger = logging.getLogger(__name__) logger.error(f"Error when instantiating {cls_name}!") raise return cfg # return as-is if don't know what to do <fim_middle>
null
CATCH
complete_current_header_empty_completion
<filename>UniRef/detectron2/utils/registry.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. from typing import Any import pydoc from fvcore.common.registry import Registry # for backward compatibility. """ ``Registry`` and `locate` provide ways to map a string (typically found in config files) to callable objects. """ __all__ = ["Registry", "locate"] def _convert_target_to_string(t: Any) -> str: """ Inverse of ``locate()``. Args: t: any object with ``__module__`` and ``__qualname__`` """ module, qualname = t.__module__, t.__qualname__ # Compress the path to this object, e.g. ``module.submodule._impl.class`` # may become ``module.submodule.class``, if the later also resolves to the same # object. This simplifies the string, and also is less affected by moving the # class implementation. module_parts = module.split(".") for k in range(1, len(module_parts)): prefix = ".".join(module_parts[:k]) candidate = f"{prefix}.{qualname}" try: if locate(candidate) is t: return candidate except ImportError: pass return f"{module}.{qualname}" def locate(name: str) -> Any: """ Locate and return an object ``x`` using an input string ``{x.__module__}.{x.__qualname__}``, such as "module.submodule.class_name". Raise Exception if it cannot be found. """ obj = pydoc.locate(name) # Some cases (e.g. torch.optim.sgd.SGD) not handled correctly # by pydoc.locate. Try a private function from hydra. if obj is None: try: # from hydra.utils import get_method - will print many errors from hydra.utils import _locate except ImportErr<fim_suffix>or as e: raise ImportError(f"Cannot dynamically locate object {name}!") from e else: obj = _locate(name) # it raises if fails return obj <fim_middle>
null
CATCH
complete_current_header_empty_completion
<filename>UniRef/detectron2/config/config.py<fim_prefix># -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. import functools import inspect import logging from fvcore.common.config import CfgNode as _CfgNode from detectron2.utils.file_io import PathManager class CfgNode(_CfgNode): """ The same as `fvcore.common.config.CfgNode`, but different in: 1. Use unsafe yaml loading by default. Note that this may lead to arbitrary code execution: you must not load a config file from untrusted sources before manually inspecting the content of the file. 2. Support config versioning. When attempting to merge an old config, it will convert the old config automatically. .. automethod:: clone .. automethod:: freeze .. automethod:: defrost .. automethod:: is_frozen .. automethod:: load_yaml_with_base .. automethod:: merge_from_list .. automethod:: merge_from_other_cfg """ @classmethod def _open_cfg(cls, filename): return PathManager.open(filename, "r") # Note that the default value of allow_unsafe is changed to True def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None: """ Load content from the given config file and merge it into self. Args: cfg_filename: config filename allow_unsafe: allow unsafe yaml syntax """ assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!" loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe) loaded_cfg = type(self)(loaded_cfg) # defaults.py needs to import CfgNode from .defaults import _C latest_ver = _C.VERSION assert ( latest_ver == self.VERSION ), "CfgNode.merge_from_file is only allowed on a config object of latest version!" logger = logging.getLogger(__name__) loaded_ver = loaded_cfg.get("VERSION", None) if loaded_ver is None: from .compat import guess_version loaded_ver = guess_version(loaded_cfg, cfg_filename) assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format( loaded_ver, self.VERSION ) if loaded_ver == self.VERSION: self.merge_from_other_cfg(loaded_cfg) else: # compat.py needs to import CfgNode from .compat import upgrade_config, downgrade_config logger.warning( "Loading an old v{} config file '{}' by automatically upgrading to v{}. " "See docs/CHANGELOG.md for instructions to update your files.".format( loaded_ver, cfg_filename, self.VERSION ) ) # To convert, first obtain a full config at an old version old_self = downgrade_config(self, to_version=loaded_ver) old_self.merge_from_other_cfg(loaded_cfg) new_config = upgrade_config(old_self) self.clear() self.update(new_config) def dump(self, *args, **kwargs): """ Returns: str: a yaml string representation of the config """ # to make it show up in docs return super().dump(*args, **kwargs) global_cfg = CfgNode() def get_cfg() -> CfgNode: """ Get a copy of the default config. Returns: a detectron2 CfgNode instance. """ from .defaults import _C return _C.clone() def set_global_cfg(cfg: CfgNode) -> None: """ Let the global config point to the given cfg. Assume that the given "cfg" has the key "KEY", after calling `set_global_cfg(cfg)`, the key can be accessed by: :: from detectron2.config import global_cfg print(global_cfg.KEY) By using a hacky global config, you can access these configs anywhere, without having to pass the config object or the values deep into the code. This is a hacky feature introduced for quick prototyping / research exploration. """ global global_cfg global_cfg.clear() global_cfg.update(cfg) def configurable(init_func=None, *, from_config=None): """ Decorate a function or a class's __init__ method so that it can be called with a :class:`CfgNode` object using a :func:`from_config` function that translates :class:`CfgNode` to arguments. Examples: :: # Usage 1: Decorator on __init__: class A: @configurable def __init__(self, a, b=2, c=3): pass @classmethod def from_config(cls, cfg): # 'cfg' must be the first argument # Returns kwargs to be passed to __init__ return {"a": cfg.A, "b": cfg.B} a1 = A(a=1, b=2) # regular construction a2 = A(cfg) # construct with a cfg a3 = A(cfg, b=3, c=4) # construct with extra overwrite # Usage 2: Decorator on any function. Needs an extra from_config argument: @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B}) def a_func(a, b=2, c=3): pass a1 = a_func(a=1, b=2) # regular call a2 = a_func(cfg) # call with a cfg a3 = a_func(cfg, b=3, c=4) # call with extra overwrite Args: init_func (callable): a class's ``__init__`` method in usage 1. The class must have a ``from_config`` classmethod which takes `cfg` as the first argument. from_config (callable): the from_config function in usage 2. It must take `cfg` as its first argument. """ if init_func is not None: assert ( inspect.isfunction(init_func) and from_config is None and init_func.__name__ == "__init__" ), "Incorrect use of @configurable. Check API documentation for examples." @functools.wraps(init_func) def wrapped(self, *args, **kwargs): try: from_config_func = type(self).from_config except <fim_suffix>AttributeError as e: raise AttributeError( "Class with @configurable must have a 'from_config' classmethod." ) from e if not inspect.ismethod(from_config_func): raise TypeError("Class with @configurable must have a 'from_config' classmethod.") if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config_func, *args, **kwargs) init_func(self, **explicit_args) else: init_func(self, *args, **kwargs) return wrapped else: if from_config is None: return configurable # @configurable() is made equivalent to @configurable assert inspect.isfunction( from_config ), "from_config argument of configurable must be a function!" def wrapper(orig_func): @functools.wraps(orig_func) def wrapped(*args, **kwargs): if _called_with_cfg(*args, **kwargs): explicit_args = _get_args_from_config(from_config, *args, **kwargs) return orig_func(**explicit_args) else: return orig_func(*args, **kwargs) wrapped.from_config = from_config return wrapped return wrapper def _get_args_from_config(from_config_func, *args, **kwargs): """ Use `from_config` to obtain explicit arguments. Returns: dict: arguments to be used for cls.__init__ """ signature = inspect.signature(from_config_func) if list(signature.parameters.keys())[0] != "cfg": if inspect.isfunction(from_config_func): name = from_config_func.__name__ else: name = f"{from_config_func.__self__}.from_config" raise TypeError(f"{name} must take 'cfg' as the first argument!") support_var_arg = any( param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD] for param in signature.parameters.values() ) if support_var_arg: # forward all arguments to from_config, if from_config accepts them ret = from_config_func(*args, **kwargs) else: # forward supported arguments to from_config supported_arg_names = set(signature.parameters.keys()) extra_kwargs = {} for name in list(kwargs.keys()): if name not in supported_arg_names: extra_kwargs[name] = kwargs.pop(name) ret = from_config_func(*args, **kwargs) # forward the other arguments to __init__ ret.update(extra_kwargs) return ret def _called_with_cfg(*args, **kwargs): """ Returns: bool: whether the arguments contain CfgNode and should be considered forwarded to from_config. """ from omegaconf import DictConfig if len(args) and isinstance(args[0], (_CfgNode, DictConfig)): return True if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)): return True # `from_config`'s first argument is forced to be "cfg". # So the above check covers all cases. return False <fim_middle>
null
CATCH
complete_current_header_empty_completion
<filename>UniRef/detectron2/config/instantiate.py<fim_prefix># Copyright (c) Facebook, Inc. and its affiliates. import dataclasses import logging from collections import abc from typing import Any from detectron2.utils.registry import _convert_target_to_string, locate __all__ = ["dump_dataclass", "instantiate"] def dump_dataclass(obj: Any): """ Dump a dataclass recursively into a dict that can be later instantiated. Args: obj: a dataclass object Returns: dict """ assert dataclasses.is_dataclass(obj) and not isinstance( obj, type ), "dump_dataclass() requires an instance of a dataclass." ret = {"_target_": _convert_target_to_string(type(obj))} for f in dataclasses.fields(obj): v = getattr(obj, f.name) if dataclasses.is_dataclass(v): v = dump_dataclass(v) if isinstance(v, (list, tuple)): v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v] ret[f.name] = v return ret def instantiate(cfg): """ Recursively instantiate objects defined in dictionaries by "_target_" and arguments. Args: cfg: a dict-like object with "_target_" that defines the caller, and other keys that define the arguments Returns: object instantiated by cfg """ from omegaconf import ListConfig if isinstance(cfg, ListConfig): lst = [instantiate(x) for x in cfg] return ListConfig(lst, flags={"allow_objects": True}) if isinstance(cfg, list): # Specialize for list, because many classes take # list[objects] as arguments, such as ResNet, DatasetMapper return [instantiate(x) for x in cfg] if isinstance(cfg, abc.Mapping) and "_target_" in cfg: # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all, # but faster: https://github.com/facebookresearch/hydra/issues/1200 cfg = {k: instantiate(v) for k, v in cfg.items()} cls = cfg.pop("_target_") cls = instantiate(cls) if isinstance(cls, str): cls_name = cls cls = locate(cls_name) assert cls is not None, cls_name else: try: cls_name = cls.__module__ + "." + cls.__qualname__ except Exception: # target could be anything, so the above could fail cls_name = str(cls) assert callable(cls), f"_target_ {cls} does not define a callable object" try: return cls(**cfg) exc<fim_suffix>ept TypeError: logger = logging.getLogger(__name__) logger.error(f"Error when instantiating {cls_name}!") raise return cfg # return as-is if don't know what to do <fim_middle>
null
CATCH
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/spin_math.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pyformat: mode=yapf """Math utility functions.""" from typing import Optional, Union from internal import math import jax from jax import numpy as jnp import optax def matmul(a, b): """jnp.matmul defaults to bfloat16 on TPU, but this doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def safe_sqrt(x, *, eps = jnp.finfo(jnp.float32).eps, value_at_zero = 0.0): """A safe version of jnp.sqrt that avoid evaluating at zero. Note: sqrt(x) = sqrt(eps) = 3e-4 when x < eps = 1.19e-7. Args: x: The operand. eps: A small number to prevent NaNs. value_at_zero: The value to clamp x to near zero. The return value will be sqrt(value_at_zero) Returns: The sqrt(x), or sqrt(value_at_zero) near zero. """ safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero)) return jnp.sqrt(safe_x) def safe_acos(t, eps = jnp.finfo(jnp.float32).eps): """A safe version of arccos which avoids evaluating at -1 or 1.""" return jnp.arccos(jnp.clip(t, -1.0 + eps, 1.0 - eps)) def safe_log(x, *, eps = jnp.finfo(jnp.float32).eps, value_at_zero = jnp.finfo(jnp.float32).eps): """Computes a safe log that avoids evaluating at zero. Args: x: Input array. eps: A small number to prevent NaNs. value_at_zero: The value to clamp x to near zero. The return value will be sqrt(value_at_zero) Returns: log(x) or log(value_at_zero) near zero. """ safe_x = jnp.where(x > eps, x, jnp.full_like(x, value_at_zero)) return jnp.log(safe_x) def normalize( x, axis = -1, # pylint: disable=redefined-builtin ord = None, eps = jnp.finfo(jnp.float32).eps, ): """Normalize a vector.""" return x / optax.safe_norm(x, axis=axis, ord=ord, min_norm=eps, keepdims=True) def inv_sqrtm( matrix, normalize_eigvals = False, ): """Takes the inverse matrix square root of a PSD matrix. Forked from `coord.sqrtm`. Args: matrix: (..., d, d) A positive semi-definite matrix. normalize_eigvals: If True, normalize the eigenvalues by the geometric mean. Returns: The inverse square root of the matrix, and (eigvec, eigval) if return_eigs is True. """ eigvec, eigval = jax.lax.linalg.eigh( matrix, symmetrize_input=False, sort_eigenvalues=False) if normalize_eigvals: # Equivalent to dividing by geometric mean, but numerically stabler. log_eigval = jnp.log(eigval) eigval = jnp.exp(log_eigval - jnp.mean(log_eigval, axis=-1, keepdims=True)) scaling = math.safe_div(1, math.safe_sqrt(eigval)) scaling = scaling[Ellipsis, None, :] sqrtm_mat = matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1)) return sqrtm_mat, (eigvec, eigval) def to_homogeneous(v): """Converts a vector to a homogeneous representation. Args: v: (*, C) A non-homogeneous vector. Returns: (*, C+1) A homogeneous version of v. """ return jnp.concatenate([v, jnp.ones_like(v[Ellipsis, :1])], axis=-1) def from_homogeneous(v): """Converts a homogeneous vector to a non-homogeneous vector. Args: v: (*, C+1) A homogeneous vector. Returns: (*, C) The non-homogeneous version of v. """ return v[Ellipsis, :-1] / v[Ellipsis, -1:] def apply_homogeneous_transform(transform, vectors): """Apply a homogeneous transformation to a collection of vectors. Args: transform: (C+1,C+1) A homogeneous transformation matrix. vectors: (*,C) An array containing 3D points. Returns: (*,C) The points transformed by the array. """ vectors_h = to_homogeneous(vectors.reshape((-1, vectors.shape[-1]))) transformed = from_homogeneous(matmul(transform, vectors_h.T).T) return transformed.reshape(vectors.shape) def generalized_bias_and_gain(x, slope, threshold): """Maps the input according to the generalized bias and gain func<fim_suffix>tion. References: https://arxiv.org/abs/2010.09714 Args: x: The inputs array with values in [0, 1] to map. slope: The slope parameter of the curve which controls the slope of the curve at the threshold. threshold: The value at which `x` reverses its shape, and the point at which the output is guaranteed to be equal to the input. Returns: The output of the curve at each input point `x`. """ eps = jnp.finfo(jnp.float32).tiny left_curve = (threshold * x) / (x + slope * (threshold - x) + eps) right_curve = ((1 - threshold) * (x - 1) / (1 - x - slope * (threshold - x) + eps) + 1) return jnp.where(x < threshold, left_curve, right_curve) <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating step functions (piecewise-constant 1D functions). We have a shared naming and dimension convention for these functions. All input/output step functions are assumed to be aligned along the last axis. `t` always indicates the x coordinates of the *endpoints* of a step function. `y` indicates unconstrained values for the *bins* of a step function `w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin values that *integrate* to <= 1. """ from internal import linspline from internal import math from internal import utils import jax import jax.numpy as jnp import numpy as np def query(tq, t, y, left=None, right=None): """Query step function (t, y) at locations tq. Edges repeat by default.""" utils.assert_valid_stepfun(t, y) # Query the step function to recover the interval value. (i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu()) # Apply boundary conditions. left = y[Ellipsis, :1] if left is None else left right = y[Ellipsis, -1:] if right is None else right yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq) return yq def weight_to_pdf(t, w): """Turn a vector of weights that sums to 1 into a PDF that integrates to 1.""" utils.assert_valid_stepfun(t, w) td = jnp.diff(t) return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td)) def pdf_to_weight(t, p): """Turn a PDF that integrates to 1 into a vector of weights that sums to 1.""" utils.assert_valid_stepfun(t, p) return p * jnp.diff(t) def integrate_weights(w): """Compute the cumulativ<fim_suffix>e sum of w, assuming all weight vectors sum to 1. The output's size on the last dimension is one greater than that of the input, because we're computing the integral corresponding to the endpoints of a step function, not the integral of the interior/bin values. Args: w: Tensor, which will be integrated along the last axis. This is assumed to sum to 1 along the last axis, and this function will (silently) break if that is not the case. Returns: cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1 """ cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1)) shape = cw.shape[:-1] + (1,) # Ensure that the CDF starts with exactly 0 and ends with exactly 1. cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1) return cw0 def invert_cdf(u, t, w_logits): """Invert the CDF defined by (t, w) at the points specified by u in [0, 1).""" utils.assert_valid_stepfun(t, w_logits) # Compute the PDF and CDF for each weight vector. w = jax.nn.softmax(w_logits, axis=-1) cw = integrate_weights(w) # Interpolate into the inverse CDF. t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu()) return t_new def sample( rng, t, w_logits, num_samples, single_jitter=False, deterministic_center=False, eps=jnp.finfo(jnp.float32).eps, ): """Piecewise-Constant PDF sampling from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of samples. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. deterministic_center: bool, if False, when `rng` is None return samples that linspace the entire PDF. If True, skip the front and back of the linspace so that the centers of each PDF interval are returned. eps: float, something like numerical epsilon. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) # Draw uniform samples. if rng is None: # Match the behavior of jax.random.uniform() by spanning [0, 1-eps]. if deterministic_center: pad = 1 / (2 * num_samples) u = jnp.linspace(pad, 1.0 - pad - eps, num_samples) else: u = jnp.linspace(0, 1.0 - eps, num_samples) u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,)) else: # `u` is in [0, 1) --- it can be zero, but it can never be 1. u_max = eps + (1 - eps) / num_samples max_jitter = (1 - u_max) / (num_samples - 1) - eps d = 1 if single_jitter else num_samples u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform( rng, t.shape[:-1] + (d,), maxval=max_jitter ) return invert_cdf(u, t, w_logits) def sample_intervals( rng, t, w_logits, num_samples, single_jitter=False, domain=(-jnp.inf, jnp.inf), ): """Sample *intervals* (rather than points) from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of intervals to sample. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. domain: (minval, maxval), the range of valid values for `t`. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) if num_samples <= 1: raise ValueError(f'num_samples must be > 1, is {num_samples}.') # Sample a set of points from the step function. centers = sample( rng, t, w_logits, num_samples, single_jitter, deterministic_center=True ) # The intervals we return will span the midpoints of each adjacent sample. mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2 # Each first/last fencepost is the reflection of the first/last midpoint # around the first/last sampled center. first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1] last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:] samples = jnp.concatenate([first, mid, last], axis=-1) # We clamp to the limits of the input domain, provided by the caller. samples = jnp.clip(samples, *domain) return samples def lossfun_distortion(t, w): """Compute iint w[i] w[j] |t[i] - t[j]| di dj.""" utils.assert_valid_stepfun(t, w) # The loss incurred between all pairs of intervals. ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2 dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :]) loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1) # The loss incurred within each individual interval with itself. loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3 return loss_inter + loss_intra def weighted_percentile(t, w, ps): """Compute the weighted percentiles of a step function. w's must sum to 1.""" utils.assert_valid_stepfun(t, w) cw = integrate_weights(w) # We want to interpolate into the integrated weights according to `ps`. wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( jnp.array(ps) / 100, cw, t ) return wprctile def resample(t, tp, vp, use_avg=False): """Resample a step function defined by (tp, vp) into intervals t. Notation roughly matches jnp.interp. Resamples by summation by default. Args: t: tensor with shape (..., n+1), the endpoints to resample into. tp: tensor with shape (..., m+1), the endpoints of the step function being resampled. vp: tensor with shape (..., m), the values of the step function being resampled. use_avg: bool, if False, return the sum of the step function for each interval in `t`. If True, return the average, weighted by the width of each interval in `t`. Returns: v: tensor with shape (..., n), the values of the resampled step function. """ utils.assert_valid_stepfun(tp, vp) if use_avg: wp = jnp.diff(tp) v_numer = resample(t, tp, vp * wp, use_avg=False) v_denom = resample(t, tp, wp, use_avg=False) v = math.safe_div(v_numer, v_denom) return v acc = jnp.cumsum(vp, axis=-1) acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1) acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( t, tp, acc0 ) v = jnp.diff(acc0_resampled, axis=-1) return v def blur_and_resample_weights(tq, t, w, blur_halfwidth): """Blur the (t, w) histogram by blur_halfwidth, then resample it into tq.""" utils.assert_valid_stepfun(t, w) # Convert the histogram to a PDF. p = weight_to_pdf(t, w) # Blur the PDF step function into a piecewise linear spline PDF. t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth) # Integrate the spline PDF, then query it to get integrated weights. quad = linspline.compute_integral(t_linspline, p_linspline) acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad) # Undo the integration to get weights. wq = jnp.diff(acc_wq, axis=-1) # Fix negative values to 0, as they should never happen but may due to # numerical issues. wq = jnp.maximum(0, wq) return wq <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/render.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions for shooting and rendering rays.""" import jax import jax.numpy as jnp import jax.scipy as jsp from internal import math from internal import stepfun def lift_gaussian(d, t_mean, t_var, r_var, diag): """Lift a Gaussian defined along a ray to 3D coordinates.""" mean = d[Ellipsis, None, :] * t_mean[Ellipsis, None] d_mag_sq = jnp.maximum(1e-10, jnp.sum(d**2, axis=-1, keepdims=True)) if diag: d_outer_diag = d**2 null_outer_diag = 1 - d_outer_diag / d_mag_sq t_cov_diag = t_var[Ellipsis, None] * d_outer_diag[Ellipsis, None, :] xy_cov_diag = r_var[Ellipsis, None] * null_outer_diag[Ellipsis, None, :] cov_diag = t_cov_diag + xy_cov_diag return mean, cov_diag else: d_outer = d[Ellipsis, :, None] * d[Ellipsis, None, :] eye = jnp.eye(d.shape[-1]) null_outer = eye - d[Ellipsis, :, None] * (d / d_mag_sq)[Ellipsis, None, :] t_cov = t_var[Ellipsis, None, None] * d_outer[Ellipsis, None, :, :] xy_cov = r_var[Ellipsis, None, None] * null_outer[Ellipsis, None, :, :] cov = t_cov + xy_cov return mean, cov def gaussianize_frustum(t0, t1): """Convert intervals along a conical frustum into means an<fim_suffix>d variances.""" # A more stable version of Equation 7 from https://arxiv.org/abs/2103.13415. s = t0 + t1 d = t1 - t0 eps = jnp.finfo(jnp.float32).eps ** 2 ratio = d**2 / jnp.maximum(eps, 3 * s**2 + d**2) t_mean = s * (1 / 2 + ratio) t_var = (1 / 12) * d**2 - (1 / 15) * ratio**2 * (12 * s**2 - d**2) r_var = (1 / 16) * s**2 + d**2 * (5 / 48 - (1 / 15) * ratio) return t_mean, t_var, r_var def conical_frustum_to_gaussian(d, t0, t1, base_radius, diag): """Approximate a 3D conical frustum as a Gaussian distribution (mean+cov). Assumes the ray is originating from the origin, and base_radius is the radius at dist=1. Doesn't assume `d` is normalized. Args: d: jnp.float32 3-vector, the axis of the cone t0: float, the starting distance of the frustum. t1: float, the ending distance of the frustum. base_radius: float, the scale of the radius as a function of distance. diag: boolean, whether or the Gaussian will be diagonal or full-covariance. Returns: a Gaussian (mean and covariance). """ t_mean, t_var, r_var = gaussianize_frustum(t0, t1) r_var *= base_radius**2 mean, cov = lift_gaussian(d, t_mean, t_var, r_var, diag) return mean, cov def cylinder_to_gaussian(d, t0, t1, radius, diag): """Approximate a cylinder as a Gaussian distribution (mean+cov). Assumes the ray is originating from the origin, and radius is the radius. Does not renormalize `d`. Args: d: jnp.float32 3-vector, the axis of the cylinder t0: float, the starting distance of the cylinder. t1: float, the ending distance of the cylinder. radius: float, the radius of the cylinder diag: boolean, whether or the Gaussian will be diagonal or full-covariance. Returns: a Gaussian (mean and covariance). """ t_mean = (t0 + t1) / 2 r_var = radius**2 / 4 t_var = (t1 - t0) ** 2 / 12 return lift_gaussian(d, t_mean, t_var, r_var, diag) def cast_rays(tdist, origins, directions, radii, ray_shape, diag=True): """Cast rays (cone- or cylinder-shaped) and featurize sections of it. Args: tdist: float array, the "fencepost" distances along the ray. origins: float array, the ray origin coordinates. directions: float array, the ray direction vectors. radii: float array, the radii (base radii for cones) of the rays. ray_shape: string, the shape of the ray, must be 'cone' or 'cylinder'. diag: boolean, whether or not the covariance matrices should be diagonal. Returns: a tuple of arrays of means and covariances. """ t0 = tdist[Ellipsis, :-1] t1 = tdist[Ellipsis, 1:] if ray_shape == 'cone': gaussian_fn = conical_frustum_to_gaussian elif ray_shape == 'cylinder': gaussian_fn = cylinder_to_gaussian else: raise ValueError("ray_shape must be 'cone' or 'cylinder'") means, covs = gaussian_fn(directions, t0, t1, radii, diag) means = means + origins[Ellipsis, None, :] return means, covs def compute_alpha_weights_helper(density_delta): """Helper function for compute_alpha_weights.""" log_trans = -jnp.concatenate( [ jnp.zeros_like(density_delta[Ellipsis, :1]), jnp.cumsum(density_delta[Ellipsis, :-1], axis=-1), ], axis=-1, ) alpha = 1 - jnp.exp(-density_delta) trans = jnp.exp(log_trans) weights = alpha * trans return weights def compute_alpha_weights( density, tdist, dirs, **kwargs, ): """Helper function for computing alpha compositing weights.""" t_delta = jnp.diff(tdist) delta = t_delta * jnp.linalg.norm(dirs[Ellipsis, None, :], axis=-1) density_delta = density * delta return compute_alpha_weights_helper(density_delta, **kwargs) def volumetric_rendering( rgbs, weights, tdist, bg_rgbs, compute_extras, extras=None, percentiles = (5, 50, 95), ): """Volumetric Rendering Function. Args: rgbs: jnp.ndarray(float32), color, [batch_size, num_samples, 3] weights: jnp.ndarray(float32), weights, [batch_size, num_samples]. tdist: jnp.ndarray(float32), [batch_size, num_samples]. bg_rgbs: jnp.ndarray(float32), the color(s) to use for the background. compute_extras: bool, if True, compute extra quantities besides color. extras: dict, a set of values along rays to render by alpha compositing. percentiles: depth will be returned for these percentiles. Returns: rendering: a dict containing an rgb image of size [batch_size, 3], and other visualizations if compute_extras=True. """ eps = jnp.finfo(jnp.float32).eps rendering = {} acc = weights.sum(axis=-1) bg_w = jnp.maximum(0, 1 - acc[Ellipsis, None]) # The weight of the background. if rgbs is not None: rgb = (weights[Ellipsis, None] * rgbs).sum(axis=-2) + bg_w * bg_rgbs else: rgb = None rendering['rgb'] = rgb if compute_extras: rendering['acc'] = acc if extras is not None: for k, v in extras.items(): if v is not None: rendering[k] = (weights[Ellipsis, None] * v).sum(axis=-2) expectation = lambda x: (weights * x).sum(axis=-1) / jnp.maximum(eps, acc) t_mids = 0.5 * (tdist[Ellipsis, :-1] + tdist[Ellipsis, 1:]) # For numerical stability this expectation is computing using log-distance. rendering['distance_mean'] = jnp.clip( jnp.nan_to_num(jnp.exp(expectation(jnp.log(t_mids))), jnp.inf), tdist[Ellipsis, 0], tdist[Ellipsis, -1], ) # Normalize the weights to sum to 1. weights_norm = weights / jnp.maximum(eps, acc[Ellipsis, None]) distance_percentiles = stepfun.weighted_percentile( tdist, weights_norm, percentiles ) for i, p in enumerate(percentiles): s = 'median' if p == 50 else 'percentile_' + str(p) rendering['distance_' + s] = distance_percentiles[Ellipsis, i] return rendering <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/math.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with <fim_suffix>clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow)))) <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/linspline.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions for linear splines.""" import functools from internal import math from internal import utils import jax from jax.experimental import checkify import jax.numpy as jnp def check_zero_endpoints(y): checkify.check(jnp.all(y[Ellipsis, 0] == 0), 'Splines must all start with 0.') checkify.check(jnp.all(y[Ellipsis, -1] == 0), 'Splines must all end with 0.') def query(tq, t, v): """Query linear spline (t, v) at tq.""" utils.assert_valid_linspline(t, v) interp = functools.partial(jnp.interp, left=0, right=0) return jnp.vectorize(interp, signature='(n),(m),(m)->(n)')(tq, t, v) def integrate(t, w): """Integrate (t, w) according to the trapezoid rule.""" utils.assert_valid_linspline(t, w) return 0.5 * jnp.sum((w[Ellipsis, :-1] + w[Ellipsis, 1:]) * jnp.diff(t), axis=-1) def normalize(t, w, eps=jnp.finfo(jnp.float32).eps ** 2): """Make w integrate to 1.""" utils.assert_valid_linspline(t, w) return w / jnp.maximum(eps, integrate(t, w))[Ellipsis, None] def insert_knot(ti, t, y): """Inserts knots ti into the linear spline (t, w). Assumes zero endpoints.""" utils.assert_valid_linspline(t, y) check_zero_endpoints(y) # Compute the spline value at the insertion points. yi = query(ti, t, y) # Concatenate the insertion points and values onto the end of each spline. ti_ex = jnp.broadcast_to(ti, t.shape[: -len(ti.shape)] + ti.shape) yi_ex = jnp.broadcast_to(yi, y.shape[: -len(yi.shape)] + yi.shape) to = jnp.concatenate([t, ti_ex], axis=-1) yo = jnp.concatenate([y, yi_ex], axis=-1) # Sort the spline according to t. sort_idx = jnp.argsort(to) to = jnp.take_along_axis(to, sort_idx, axis=-1) yo = jnp.take_along_axis(yo, sort_idx, axis=-1) return to, yo def clamp(t, y, minval, maxval): """Clamp (t, y) to be zero outside of t in [minval, maxval].""" utils.assert_valid_linspline(t, y) check_zero_endpoints(y) # Add in extra points at and immediately above/below the min/max vals. ti = jnp.concatenate( [ math.minus_eps(minval), minval, maxval, math.plus_eps(maxval), ], axis=-1, ) tc, yo = insert_knot(ti, t, y) # Zero the spline values outside of [minval, maxval]. yc = jnp.where(tc > maxval, 0, jnp.where(tc < minval, 0, yo)) return tc, yc def compute_integral(t, y): """Integrate a linear spline into a pie<fim_suffix>cewise quadratic spline.""" utils.assert_valid_linspline(t, y) eps = jnp.finfo(jnp.float32).eps ** 2 dt = jnp.diff(t) a = jnp.diff(y) / jnp.maximum(eps, 2 * dt) b = y[Ellipsis, :-1] # The integral has an ambiguous global offset here, which we set to 0. c1 = 0.5 * jnp.cumsum(dt[Ellipsis, :-1] * (y[Ellipsis, :-2] + y[Ellipsis, 1:-1]), axis=-1) c = jnp.concatenate([jnp.zeros_like(y[Ellipsis, :1]), c1], axis=-1) # This quadratic is parameterized as: # (t - t[i])**2 * a[i] + (t - t[i]) * b[i] + c[i] return a, b, c def sorted_lookup(x, xp): """Lookup `x` at sorted locations `xp`.""" # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( functools.partial(jnp.searchsorted, side='right'), signature='(n),(m)->(m)', )(xp, x) idx0 = jnp.maximum(idx - 1, 0) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) return idx0, idx1 def interpolate_integral(tq, t, a, b, c): """Interpolate into the piecewise quadratic returned by compute_integral().""" utils.assert_valid_stepfun(t, a) utils.assert_valid_stepfun(t, b) utils.assert_valid_stepfun(t, c) # Clip to valid inputs (assumes repeating boundaries). tq = jnp.clip(tq, t[Ellipsis, :1], math.minus_eps(t[Ellipsis, -1:])) # Lookup the quadratic coefficients corresponding to each input query. idx0, _ = sorted_lookup(tq, t) # TODO(barron): It might be faster to stack (a, c, b) during generation and # do a single gather. t0 = jnp.take_along_axis(t, idx0, axis=-1) a0 = jnp.take_along_axis(a, idx0, axis=-1) b0 = jnp.take_along_axis(b, idx0, axis=-1) c0 = jnp.take_along_axis(c, idx0, axis=-1) td = tq - t0 v = a0 * td**2 + b0 * td + c0 return v def blur_stepfun(ts, ys, halfwidth): """Convolve a step function (ts, ys) with a box filter of size `halfwidth`.""" utils.assert_valid_stepfun(ts, ys) # Blur each entire step function by a single `halfwidth` value. # Dilate the t-values by at least numerical epsilon in each direction. ts_lo = ts - halfwidth ts_hi = jnp.maximum(math.plus_eps(ts), ts + halfwidth) # The difference in adjacent `y` values (zero padded) divided by the # difference in adjacent `t` values. ys0 = jnp.concatenate( [jnp.zeros_like(ys[Ellipsis, :1]), ys, jnp.zeros_like(ys[Ellipsis, :1])], axis=-1 ) dy = jnp.diff(ys0) / (ts_hi - ts_lo) # When decreasing t splat a positive second derivative, and when increasing # t splat a negative second derivative. tp = jnp.concatenate([ts_lo, ts_hi], axis=-1) dyp = jnp.concatenate([dy, -dy], axis=-1) # Sort the dilated t-values and their accompanying derivative weights. idx = jnp.argsort(tp, axis=-1) tp = jnp.take_along_axis(tp, idx, axis=-1) dyp = jnp.take_along_axis(dyp, idx[Ellipsis, :-2], axis=-1) # A ramp is the double integral of a delta function, so if we double- # integrate these derivatives you get the sum of a bunch of trapezoids. yp = jnp.cumsum(jnp.diff(tp)[Ellipsis, :-1] * jnp.cumsum(dyp, axis=-1), axis=-1) # Add in the missing first and last endpoint values, which must be zero # because we assume zero padding on `ys`. yp = jnp.concatenate( [jnp.zeros_like(yp[Ellipsis, :1]), yp, jnp.zeros_like(yp[Ellipsis, -1:])], axis=-1 ) return tp, yp <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for reflection directions and directional encodings.""" import math from internal import math as math_lib import jax.numpy as jnp import numpy as np def reflect(viewdirs, normals): """Reflect view directions about normals. The reflection of a vector v about a unit vector n is a vector u such that dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two equations is u = 2 dot(n, v) n - v. Args: viewdirs: [..., 3] array of view directions. normals: [..., 3] array of normal directions (assumed to be unit vectors). Returns: [..., 3] array of reflection directions. """ return ( 2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals - viewdirs ) def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps): """Normalize x to unit length along last axis. Normalizing vectors is surprisingly tricky, because you have to address the case where the denominator in the normalization is tiny or zero, in which case gradients will explode. For this reason, we perform two normalizations: in the forward pass, we clamp the denominator with ~1e-40, but in the backward pass we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the output of this function is unit norm (unless x is very very small) while preventing exploding gradients. Args: x: The array of values to normalize. grad_eps: The value to clip the squared norm by before division in the backward pass. Returns: A normalized array x / ||x||, normalized along the last axis. """ tiny = jnp.finfo(jnp.float32).tiny grad_eps = jnp.maximum(tiny, grad_eps) denom_sq = jnp.sum(x**2, axis=-1, keepdims=True) normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq)) normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq)) # Use `normal_val` in the forward pass but `normal_grad` in the backward pass. normal = math_lib.override_gradient(normal_val, normal_grad) return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal) def compute_weighted_mae(weights, normals, normals_gt): """Compute weighted mean angular error, assuming normals are unit length.""" angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1)) return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum()) def generalized_binomial_coeff(a, k): """Compute generalized binomial coefficients.""" return np.prod(a - np.arange(k)) / math.factorial(k) def assoc_legendre_coeff(l, m, k): """Compute associated Legendre polynomial coefficients. Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the (l, m)th associated Legendre polynomial, P_l^m(cos(theta)). Args: l: associated Legendre polynomial degree. m: associated Legendre polynomial order. k: power of cos(theta). Returns: A float, the coefficient of the term corresponding to the inputs. """ return ( (-1) ** m * 2**l * math.factorial(l) / math.factorial(k) / math.factorial(l - k - m) * generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l) ) def sph_harm_coeff(l, m, k): """Compute spherical harmonic c<fim_suffix>oefficients.""" return np.sqrt( (2.0 * l + 1.0) * math.factorial(l - m) / (4.0 * np.pi * math.factorial(l + m)) ) * assoc_legendre_coeff(l, m, k) def get_ml_array(deg_view): """Create a list with all pairs of (l, m) values to use in the encoding.""" ml_list = [] for i in range(deg_view): l = 2**i # Only use nonnegative m values, later splitting real and imaginary parts. for m in range(l + 1): ml_list.append((m, l)) # Convert list into a numpy array. ml_array = np.array(ml_list).T return ml_array def generate_ide_fn(deg_view): """Generate integrated directional encoding (IDE) function. This function returns a function that computes the integrated directional encoding from Equations 6-8 of arxiv.org/abs/2112.03907. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating integrated directional encoding. Raises: ValueError: if deg_view is larger than 5. """ if deg_view > 5: raise ValueError('Only deg_view of at most 5 is numerically stable.') ml_array = get_ml_array(deg_view) l_max = 2 ** (deg_view - 1) # Create a matrix corresponding to ml_array holding all coefficients, which, # when multiplied (from the right) by the z coordinate Vandermonde matrix, # results in the z component of the encoding. mat = np.zeros((l_max + 1, ml_array.shape[1])) for i, (m, l) in enumerate(ml_array.T): for k in range(l - m + 1): mat[k, i] = sph_harm_coeff(l, m, k) def integrated_dir_enc_fn(xyz, kappa_inv): """Function returning integrated directional encoding (IDE). Args: xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at. kappa_inv: [..., 1] reciprocal of the concentration parameter of the von Mises-Fisher distribution. Returns: An array with the resulting IDE. """ x = xyz[Ellipsis, 0:1] y = xyz[Ellipsis, 1:2] z = xyz[Ellipsis, 2:3] # Compute z Vandermonde matrix. vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1) # Compute x+iy Vandermonde matrix. vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1) # Get spherical harmonics. sph_harms = vmxy * math_lib.matmul(vmz, mat) # Apply attenuation function using the von Mises-Fisher distribution # concentration parameter, kappa. sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1) ide = sph_harms * jnp.exp(-sigma * kappa_inv) # Split into real and imaginary parts and return return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1) return integrated_dir_enc_fn def generate_dir_enc_fn(deg_view): """Generate directional encoding (DE) function. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating directional encoding. """ integrated_dir_enc_fn = generate_ide_fn(deg_view) def dir_enc_fn(xyz): """Function returning directional encoding (DE).""" return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1])) return dir_enc_fn def orientation_loss(w, n, v): """Orientation loss on weights `w`, normals `n`, and -view directions `v`.""" n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1) return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1)) <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/math.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and c<fim_suffix>lipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow)))) <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/render.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions for shooting and rendering rays.""" import jax import jax.numpy as jnp import jax.scipy as jsp from internal import math from internal import stepfun def lift_gaussian(d, t_mean, t_var, r_var, diag): """Lift a Gaussian defined along a ray to 3D coordinates.""" mean = d[Ellipsis, None, :] * t_mean[Ellipsis, None] d_mag_sq = jnp.maximum(1e-10, jnp.sum(d**2, axis=-1, keepdims=True)) if diag: d_outer_diag = d**2 null_outer_diag = 1 - d_outer_diag / d_mag_sq t_cov_diag = t_var[Ellipsis, None] * d_outer_diag[Ellipsis, None, :] xy_cov_diag = r_var[Ellipsis, None] * null_outer_diag[Ellipsis, None, :] cov_diag = t_cov_diag + xy_cov_diag return mean, cov_diag else: d_outer = d[Ellipsis, :, None] * d[Ellipsis, None, :] eye = jnp.eye(d.shape[-1]) null_outer = eye - d[Ellipsis, :, None] * (d / d_mag_sq)[Ellipsis, None, :] t_cov = t_var[Ellipsis, None, None] * d_outer[Ellipsis, None, :, :] xy_cov = r_var[Ellipsis, None, None] * null_outer[Ellipsis, None, :, :] cov = t_cov + xy_cov return mean, cov def gaussianize_frustum(t0, t1): """Convert intervals along a conical frustum into means and variances.""" # A more stable version of Equation 7 from https://arxiv.org/abs/2103.13415. s = t0 + t1 d = t1 - t0 eps = jnp.finfo(jnp.float32).eps ** 2 ratio = d**2 / jnp.maximum(eps, 3 * s**2 + d**2) t_mean = s * (1 / 2 + ratio) t_var = (1 / 12) * d**2 - (1 / 15) * ratio**2 * (12 * s**2 - d**2) r_var = (1 / 16) * s**2 + d**2 * (5 / 48 - (1 / 15) * ratio) return t_mean, t_var, r_var def conical_frustum_to_gaussian(d, t0, t1, base_radius, diag): """Approximate a 3D conical frustum as a Gaussian distribution (mean+cov). Assumes the ray is originating from the origin, and base_radius is the radius at dist=1. Doesn't assume `d` is normalized. Args: d: jnp.float32 3-vector, the axis of the cone t0: float, the starting distance of the frustum. t1: float, the ending distance of the frustum. base_radius: float, the scale of the radius as a function of distance. diag: boolean, whether or the Gaussian will be diagonal or full-covariance. Returns: a Gaussian (mean and covariance). """ t_mean, t_var, r_var = gaussianize_frustum(t0, t1) r_var *= base_radius**2 mean, cov = lift_gaussian(d, t_mean, t_var, r_var, diag) return mean, cov def cylinder_to_gaussian(d, t0, t1, radius, diag): """Approximate a cylinder as <fim_suffix>a Gaussian distribution (mean+cov). Assumes the ray is originating from the origin, and radius is the radius. Does not renormalize `d`. Args: d: jnp.float32 3-vector, the axis of the cylinder t0: float, the starting distance of the cylinder. t1: float, the ending distance of the cylinder. radius: float, the radius of the cylinder diag: boolean, whether or the Gaussian will be diagonal or full-covariance. Returns: a Gaussian (mean and covariance). """ t_mean = (t0 + t1) / 2 r_var = radius**2 / 4 t_var = (t1 - t0) ** 2 / 12 return lift_gaussian(d, t_mean, t_var, r_var, diag) def cast_rays(tdist, origins, directions, radii, ray_shape, diag=True): """Cast rays (cone- or cylinder-shaped) and featurize sections of it. Args: tdist: float array, the "fencepost" distances along the ray. origins: float array, the ray origin coordinates. directions: float array, the ray direction vectors. radii: float array, the radii (base radii for cones) of the rays. ray_shape: string, the shape of the ray, must be 'cone' or 'cylinder'. diag: boolean, whether or not the covariance matrices should be diagonal. Returns: a tuple of arrays of means and covariances. """ t0 = tdist[Ellipsis, :-1] t1 = tdist[Ellipsis, 1:] if ray_shape == 'cone': gaussian_fn = conical_frustum_to_gaussian elif ray_shape == 'cylinder': gaussian_fn = cylinder_to_gaussian else: raise ValueError("ray_shape must be 'cone' or 'cylinder'") means, covs = gaussian_fn(directions, t0, t1, radii, diag) means = means + origins[Ellipsis, None, :] return means, covs def compute_alpha_weights_helper(density_delta): """Helper function for compute_alpha_weights.""" log_trans = -jnp.concatenate( [ jnp.zeros_like(density_delta[Ellipsis, :1]), jnp.cumsum(density_delta[Ellipsis, :-1], axis=-1), ], axis=-1, ) alpha = 1 - jnp.exp(-density_delta) trans = jnp.exp(log_trans) weights = alpha * trans return weights def compute_alpha_weights( density, tdist, dirs, **kwargs, ): """Helper function for computing alpha compositing weights.""" t_delta = jnp.diff(tdist) delta = t_delta * jnp.linalg.norm(dirs[Ellipsis, None, :], axis=-1) density_delta = density * delta return compute_alpha_weights_helper(density_delta, **kwargs) def volumetric_rendering( rgbs, weights, tdist, bg_rgbs, compute_extras, extras=None, percentiles = (5, 50, 95), ): """Volumetric Rendering Function. Args: rgbs: jnp.ndarray(float32), color, [batch_size, num_samples, 3] weights: jnp.ndarray(float32), weights, [batch_size, num_samples]. tdist: jnp.ndarray(float32), [batch_size, num_samples]. bg_rgbs: jnp.ndarray(float32), the color(s) to use for the background. compute_extras: bool, if True, compute extra quantities besides color. extras: dict, a set of values along rays to render by alpha compositing. percentiles: depth will be returned for these percentiles. Returns: rendering: a dict containing an rgb image of size [batch_size, 3], and other visualizations if compute_extras=True. """ eps = jnp.finfo(jnp.float32).eps rendering = {} acc = weights.sum(axis=-1) bg_w = jnp.maximum(0, 1 - acc[Ellipsis, None]) # The weight of the background. if rgbs is not None: rgb = (weights[Ellipsis, None] * rgbs).sum(axis=-2) + bg_w * bg_rgbs else: rgb = None rendering['rgb'] = rgb if compute_extras: rendering['acc'] = acc if extras is not None: for k, v in extras.items(): if v is not None: rendering[k] = (weights[Ellipsis, None] * v).sum(axis=-2) expectation = lambda x: (weights * x).sum(axis=-1) / jnp.maximum(eps, acc) t_mids = 0.5 * (tdist[Ellipsis, :-1] + tdist[Ellipsis, 1:]) # For numerical stability this expectation is computing using log-distance. rendering['distance_mean'] = jnp.clip( jnp.nan_to_num(jnp.exp(expectation(jnp.log(t_mids))), jnp.inf), tdist[Ellipsis, 0], tdist[Ellipsis, -1], ) # Normalize the weights to sum to 1. weights_norm = weights / jnp.maximum(eps, acc[Ellipsis, None]) distance_percentiles = stepfun.weighted_percentile( tdist, weights_norm, percentiles ) for i, p in enumerate(percentiles): s = 'median' if p == 50 else 'percentile_' + str(p) rendering['distance_' + s] = distance_percentiles[Ellipsis, i] return rendering <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/coord.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating coordinate spaces and distances along rays.""" from internal import geopoly from internal import math import jax from jax import random import jax.numpy as jnp import numpy as np def contract(x): """Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077).""" # Clamping to 1 produces correct scale inside |x| < 1 x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True)) scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq z = scale * x return z def inv_contract(z): """The inverse of contract().""" # Clamping to 1 produces correct scale inside |z| < 1 z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True)) inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq x = z / inv_scale return x def track_linearize(fn, mean, cov): """Apply function `fn` to a set of means and covariances, ala a Kalman filter. We can analytically transform a Gaussian parameterized by `mean` and `cov` with a function `fn` by linearizing `fn` around `mean`, and taking advantage of the fact that Covar[Ax + y] = A(Covar[x])A^T (see https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. cov: a tensor of covariances, where the last two axes are the dimensions. Returns: fn_mean: the transformed means. fn_cov: the transformed covariances. """ if (len(mean.shape) + 1) != len(cov.shape): raise ValueError('cov must be non-diagonal') fn_mean, lin_fn = jax.linearize(fn, mean) fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov)) return fn_mean, fn_cov def track_isotropic(fn, mean, scale): """Apply function `fn` to a set of means and scales, ala a Kalman filter. This is the isotropic or scalar equivalent of track_linearize, as we're still linearizing a function and tracking a Gaussian through it, but the input and output Gaussians are all isotropic and are only represented with a single `scale` value (where `scale**2` is the variance of the Gaussian). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. scale: a tensor of scales, with the same shape as means[..., -1]. Returns: fn_mean: the transformed means. fn_scale: the transformed scales. """ if mean.shape[:-1] != scale.shape: raise ValueError( f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.' ) d = mean.shape[-1] fn_mean, lin_fn = jax.linearize(fn, mean) if scale is not None: # Compute the Jacobian of fn function at the locations of each mean. jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)( jnp.broadcast_to(jnp.eye(d), mean.shape + (d,)) ) # The cube root of the determinant of the Jacobian is the geometric mean # of the eigenvalues of the Jacobian, which gives us the isotropic scaling # implied by `fn` at each mean that `scale` should be multiplied by. eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0. abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac))) # Special case d == 3 for speed's sake. fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d)) else: fn_scale = None return fn_mean, fn_scale def contract3_isoscale(x): """A fast version of track_isotropic(contract, *)'s scaling for 3D inputs.""" if x.shape[-1] != 3: raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.') norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1)) # Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq: return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq)) def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None): """Construct a bijection between metric distances and normalized distances. See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a detailed explanation. Args: fn: the function to ray distances. t_near: a tensor of near-plane distances. t_far: a tensor of far-plane distances. fn_inv: Optional, if not None then it's used as the inverse of fn(). Returns: t_to_s: a function that maps distances to normalized distances in [0, 1]. s_to_t: the inverse of t_to_s. """ if fn is None: fn_fwd = lambda x: x fn_inv = lambda x: x else: fn_fwd = fn if fn_inv is None: # A simple mapping from some functions to their inverse. inv_mapping = { 'reciprocal': jnp.reciprocal, 'log': jnp.exp, 'exp': jnp.log, 'sqrt': jnp.square, 'square': jnp.sqrt, } fn_inv = inv_mapping[fn.__name__] fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)] # Forcibly clip t to the range of valid values, to guard against inf's. t_clip = lambda t: jnp.clip(t, t_near, t_far) t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near) s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near)) return t_to_s, s_to_t def expected_sin(mean, var): """Compute the m<fim_suffix>ean of sin(x), x ~ N(mean, var).""" return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value. def integrated_pos_enc(mean, var, min_deg, max_deg): """Encode `x` with sinusoids scaled by 2^[min_deg, max_deg). Args: mean: tensor, the mean coordinates to be encoded var: tensor, the variance of the coordinates to be encoded. min_deg: int, the min degree of the encoding. max_deg: int, the max degree of the encoding. Returns: encoded: jnp.ndarray, encoded variables. """ scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = mean.shape[:-1] + (-1,) scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape) scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape) return expected_sin( jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1), jnp.concatenate([scaled_var] * 2, axis=-1), ) def lift_and_diagonalize(mean, cov, basis): """Project `mean` and `cov` onto basis and diagonalize the projected cov.""" fn_mean = math.matmul(mean, basis) fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2) return fn_mean, fn_cov_diag def pos_enc(x, min_deg, max_deg, append_identity=True): """The positional encoding used by the original NeRF paper.""" scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = x.shape[:-1] + (-1,) scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c). scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c). # Note that we're not using safe_sin, unlike IPE. # (..., s*c + s*c). four_feat = jnp.sin( jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1) ) if append_identity: return jnp.concatenate([x, four_feat], axis=-1) else: return four_feat def sqrtm(mat, return_eigs=False): """Take the matrix square root of a PSD matrix [..., d, d].""" eigvec, eigval = jax.lax.linalg.eigh( mat, symmetrize_input=False, sort_eigenvalues=False ) scaling = math.safe_sqrt(eigval)[Ellipsis, None, :] sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1)) return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat def isotropize(cov, mode='accurate'): """Turn covariances into isotropic covariances with the same determinant.""" d = cov.shape[-1] if d == 1: return cov if mode == 'fast': det = jnp.linalg.det(cov) diag_val = det ** (1 / d) is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det) elif mode == 'accurate': log_det = jnp.linalg.slogdet(cov)[1] diag_val = jnp.exp(log_det / d) is_invalid = ~jnp.isfinite(log_det) else: raise ValueError(f'mode={mode} not implemented.') cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None] # Guard against NaN outputs when `det` is super small. Note that this does not # guard against NaN gradients! cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso) return cov_iso def construct_perp_basis(directions): """Construct a perpendicular basis for each 3-vector in `directions`.""" if directions.shape[-1] != 3: raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D') # To generate a vector perpendicular to `directions`, we take a cross-product # with an arbitrary vector [0, 0, 1]. cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0])) # In the rare case that `directions` is very close to [0, 0, 1], we compute an # alternate cross-product with [1, 1, 1] to use instead. cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0])) use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1) cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a) # Crossing `directions` with `cross1` gives us our 3rd vector. cross2 = jnp.cross(directions, cross1) # Normalize vectors before returning them. normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True)) return normalize(cross1), normalize(cross2) def hexify(rng, *, origins, directions, radii, tdist): """Produce hexagon-shaped samples from ray segments.""" # Construct a base set of angles, by linspacing [0, 2pi] in a specific order. # This is one of two orderings of angles that doesn't induce any anisotropy # into the sample covariance of the multisample coordinates. Any rotation and # mirroring along the z-axis of this ordering is also valid. # There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1]. # This seems to work less well though likely because of the strong correlation # between adjacent angles. thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1]) # Lift the angles to the size of the rays. sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas)) thetas = jnp.broadcast_to(thetas, sz) if rng is not None: # Randomly reverse the order of half of the hexes. key, rng = random.split(rng) flip = random.bernoulli(key, shape=sz[:-1]) thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) # Rotate each hex by some random amount. key, rng = random.split(rng) thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None] else: # If we're deterministic, flip and shift every other hex by 30 degrees. flip = jnp.arange(thetas.shape[-2]) % 2 thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) thetas += (flip * jnp.pi / 6)[Ellipsis, None] # TODO(barron): Plumb through the dx/dy frame for the original ray in the # image plane, to avoid the need of this. perp_axis1, perp_axis2 = construct_perp_basis(directions) # Grab each t-interval's midpoint and half-width. t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:] s = (t0 + t1) / 2 d = (t1 - t0) / 2 # Compute the length along the ray for each multisample, using mip-NeRF math. cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * ( (t1**2 + 2 * s**2)[Ellipsis, None] + (3 / np.sqrt(7)) * (np.arange(6) * (2 / 5) - 1) * math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None] ) # Compute the offset from the ray for each multisample. perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz # Go from ray coordinate to world coordinates. cx = perp_mag * jnp.cos(thetas) cy = perp_mag * jnp.sin(thetas) control = ( origins[Ellipsis, None, None, :] + perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None] + perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None] + directions[Ellipsis, None, None, :] * cz[Ellipsis, None] ) return control, perp_mag def unscented_transform(mean, cov, basis, axis=0): """Construct "sigma points" along `axis` from each mean and covariance.""" d = cov.shape[-1] mean_ex = jnp.expand_dims(mean, axis) if basis == 'mean': # This effectively disables the unscented transform. return mean_ex if basis.startswith('random_'): num_random = int(basis.split('_')[-1]) # TODO(barron): use a non-fixed random seed? noise = random.multivariate_normal( random.PRNGKey(0), jnp.zeros_like(mean), cov, (num_random,) + mean.shape[:-1], ) control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis) return control sqrtm_cov = sqrtm(cov) if any([ basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron'] ]): # Use tessellated regular polyhedra vertices (and vec(0)) as control points. if d != 3: raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.') base_shape, angular_tesselation = basis.split('_') transform = geopoly.generate_basis( base_shape, int(angular_tesselation), remove_symmetries=False ).T transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1) transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None] control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) elif basis == 'julier': # The most basic symmetric unscented transformation from the original paper, # which yields 2*d+1 control points. offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis) control = jnp.concatenate( [mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis ) elif basis == 'menegaz': # A compact unscented transformation from # folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf # which yields d+1 control points. if d == 3: # A hand-optimized version of the d==3 case. sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True) offsets = jnp.concatenate( [-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1 ) control = mean_ex + jnp.moveaxis(offsets, -1, axis) else: transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d # == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1)) transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1) control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) else: raise ValueError(f'basis={basis} not implemented.') return control def compute_control_points( means, covs, rays, tdist, rng, unscented_mip_basis, unscented_scale_mult, ): """Wrapper to compute unscented control points for the MLP class.""" if unscented_mip_basis == 'hexify': control, perp_mag = hexify( rng, origins=rays.origins, directions=rays.directions, radii=rays.radii, tdist=tdist, ) else: # Use a normal unscented transformation. control = unscented_transform( means, covs, basis=unscented_mip_basis, axis=-2, ) if unscented_scale_mult > 0: if rays is None: raise SyntaxError( 'Rays are required as input if unscented_scale_mult > 0.' ) # Mimic the math used by hexify to produce comparable scales. t_recon = jnp.sum( (control - rays.origins[Ellipsis, None, None, :]) * rays.directions[Ellipsis, None, None, :], axis=-1, ) perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon else: perp_mag = None return control, perp_mag <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/math.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x<fim_suffix>` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow)))) <fim_middle>
null
BLOCK_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating step functions (piecewise-constant 1D functions). We have a shared naming and dimension convention for these functions. All input/output step functions are assumed to be aligned along the last axis. `t` always indicates the x coordinates of the *endpoints* of a step function. `y` indicates unconstrained values for the *bins* of a step function `w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin values that *integrate* to <= 1. """ from internal import linspline from internal import math from internal import utils import jax import jax.numpy as jnp import numpy as np def query(tq, t, y, left=None, right=None): """Query step function (t, y) at locations tq. Edges repeat by default.""" utils.assert_valid_stepfun(t, y) # Query the step function to recover the interval value. (i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu()) # Apply boundary conditions. left = y[Ellipsis, :1] if left is None else left right = y[Ellipsis, -1:] if right is None else right yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq) return yq def weight_to_pdf(t, w): """Turn a vector of weights that sums to 1 into a PDF that integrates to 1.""" utils.assert_valid_stepfun(t, w) td = jnp.diff(t) return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td)) def pdf_to_weight(t, p): """Turn a PDF that integrates to 1 into a vector of weights that sums to 1.""" utils.assert_valid_stepfun(t, p) return p * jnp.diff(t) def integrate_weights(w): """Compute the cumulative sum of w, assuming all weight vectors sum to 1. The output's size on the last dimension is one greater than that of the input, because we're computing the integral corresponding to the endpoints of a step function, not the integral of the interior/bin values. Args: w: Tensor, which will be integrated along the last axis. This is assumed to sum to 1 along the last axis, and this function will (silently) break if that is not the case. Returns: cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1 """ cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1)) shape = cw.shape[:-1] + (1,) # Ensure that the CDF starts with exactly 0 and ends with exactly 1. cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1) return cw0 def invert_cdf(u, t, w_logits): """Invert the CDF defined by (t, w) at the points specified by u in [0, 1).""" utils.assert_valid_stepfun(t, w_logits) # Compute the PDF and CDF for each weight vector. w = jax.nn.softmax(w_logits, axis=-1) cw = integrate_weights(w) # Interpolate into the inverse CDF. t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu()) return t_new def sample( rng, t, w_logits, num_samples, single_jitter=False, deterministic_center=False, eps=jnp.finfo(jnp.float32).eps, ): """Piecewise-Constant PDF sampling from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of samples. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. deterministic_center: bool, if False, when `rng` is None return samples that linspace the entire PDF. If True, skip the front and back of the linspace so that the centers of each PDF interval are returned. eps: float, something like numerical epsilon. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) # Draw uniform samples. if rng is None: # Match the behavior of jax.random.uniform() by spanning [0, 1-eps]. if d<fim_suffix>eterministic_center: pad = 1 / (2 * num_samples) u = jnp.linspace(pad, 1.0 - pad - eps, num_samples) else: u = jnp.linspace(0, 1.0 - eps, num_samples) u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,)) else: # `u` is in [0, 1) --- it can be zero, but it can never be 1. u_max = eps + (1 - eps) / num_samples max_jitter = (1 - u_max) / (num_samples - 1) - eps d = 1 if single_jitter else num_samples u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform( rng, t.shape[:-1] + (d,), maxval=max_jitter ) return invert_cdf(u, t, w_logits) def sample_intervals( rng, t, w_logits, num_samples, single_jitter=False, domain=(-jnp.inf, jnp.inf), ): """Sample *intervals* (rather than points) from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of intervals to sample. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. domain: (minval, maxval), the range of valid values for `t`. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) if num_samples <= 1: raise ValueError(f'num_samples must be > 1, is {num_samples}.') # Sample a set of points from the step function. centers = sample( rng, t, w_logits, num_samples, single_jitter, deterministic_center=True ) # The intervals we return will span the midpoints of each adjacent sample. mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2 # Each first/last fencepost is the reflection of the first/last midpoint # around the first/last sampled center. first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1] last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:] samples = jnp.concatenate([first, mid, last], axis=-1) # We clamp to the limits of the input domain, provided by the caller. samples = jnp.clip(samples, *domain) return samples def lossfun_distortion(t, w): """Compute iint w[i] w[j] |t[i] - t[j]| di dj.""" utils.assert_valid_stepfun(t, w) # The loss incurred between all pairs of intervals. ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2 dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :]) loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1) # The loss incurred within each individual interval with itself. loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3 return loss_inter + loss_intra def weighted_percentile(t, w, ps): """Compute the weighted percentiles of a step function. w's must sum to 1.""" utils.assert_valid_stepfun(t, w) cw = integrate_weights(w) # We want to interpolate into the integrated weights according to `ps`. wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( jnp.array(ps) / 100, cw, t ) return wprctile def resample(t, tp, vp, use_avg=False): """Resample a step function defined by (tp, vp) into intervals t. Notation roughly matches jnp.interp. Resamples by summation by default. Args: t: tensor with shape (..., n+1), the endpoints to resample into. tp: tensor with shape (..., m+1), the endpoints of the step function being resampled. vp: tensor with shape (..., m), the values of the step function being resampled. use_avg: bool, if False, return the sum of the step function for each interval in `t`. If True, return the average, weighted by the width of each interval in `t`. Returns: v: tensor with shape (..., n), the values of the resampled step function. """ utils.assert_valid_stepfun(tp, vp) if use_avg: wp = jnp.diff(tp) v_numer = resample(t, tp, vp * wp, use_avg=False) v_denom = resample(t, tp, wp, use_avg=False) v = math.safe_div(v_numer, v_denom) return v acc = jnp.cumsum(vp, axis=-1) acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1) acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( t, tp, acc0 ) v = jnp.diff(acc0_resampled, axis=-1) return v def blur_and_resample_weights(tq, t, w, blur_halfwidth): """Blur the (t, w) histogram by blur_halfwidth, then resample it into tq.""" utils.assert_valid_stepfun(t, w) # Convert the histogram to a PDF. p = weight_to_pdf(t, w) # Blur the PDF step function into a piecewise linear spline PDF. t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth) # Integrate the spline PDF, then query it to get integrated weights. quad = linspline.compute_integral(t_linspline, p_linspline) acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad) # Undo the integration to get weights. wq = jnp.diff(acc_wq, axis=-1) # Fix negative values to 0, as they should never happen but may due to # numerical issues. wq = jnp.maximum(0, wq) return wq <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/coord.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating coordinate spaces and distances along rays.""" from internal import geopoly from internal import math import jax from jax import random import jax.numpy as jnp import numpy as np def contract(x): """Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077).""" # Clamping to 1 produces correct scale inside |x| < 1 x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True)) scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq z = scale * x return z def inv_contract(z): """The inverse of contract().""" # Clamping to 1 produces correct scale inside |z| < 1 z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True)) inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq x = z / inv_scale return x def track_linearize(fn, mean, cov): """Apply function `fn` to a set of means and covariances, ala a Kalman filter. We can analytically transform a Gaussian parameterized by `mean` and `cov` with a function `fn` by linearizing `fn` around `mean`, and taking advantage of the fact that Covar[Ax + y] = A(Covar[x])A^T (see https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. cov: a tensor of covariances, where the last two axes are the dimensions. Returns: fn_mean: the transformed means. fn_cov: the transformed covariances. """ if (len(mean.shape) + 1) != len(cov.shape): raise ValueError('cov must be non-diagonal') fn_mean, lin_fn = jax.linearize(fn, mean) fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov)) return fn_mean, fn_cov def track_isotropic(fn, mean, scale): """Apply function `fn` to a set of means and scales, ala a Kalman filter. This is the isotropic or scalar equivalent of track_linearize, as we're still linearizing a function and tracking a Gaussian through it, but the input and output Gaussians are all isotropic and are only represented with a single `scale` value (where `scale**2` is the variance of the Gaussian). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. scale: a tensor of scales, with the same shape as means[..., -1]. Returns: fn_mean: the transformed means. fn_scale: the transformed scales. """ if mean.shape[:-1] != scale.shape: raise ValueError( f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.' ) d = mean.shape[-1] fn_mean, lin_fn = jax.linearize(fn, mean) if scale<fim_suffix> is not None: # Compute the Jacobian of fn function at the locations of each mean. jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)( jnp.broadcast_to(jnp.eye(d), mean.shape + (d,)) ) # The cube root of the determinant of the Jacobian is the geometric mean # of the eigenvalues of the Jacobian, which gives us the isotropic scaling # implied by `fn` at each mean that `scale` should be multiplied by. eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0. abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac))) # Special case d == 3 for speed's sake. fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d)) else: fn_scale = None return fn_mean, fn_scale def contract3_isoscale(x): """A fast version of track_isotropic(contract, *)'s scaling for 3D inputs.""" if x.shape[-1] != 3: raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.') norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1)) # Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq: return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq)) def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None): """Construct a bijection between metric distances and normalized distances. See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a detailed explanation. Args: fn: the function to ray distances. t_near: a tensor of near-plane distances. t_far: a tensor of far-plane distances. fn_inv: Optional, if not None then it's used as the inverse of fn(). Returns: t_to_s: a function that maps distances to normalized distances in [0, 1]. s_to_t: the inverse of t_to_s. """ if fn is None: fn_fwd = lambda x: x fn_inv = lambda x: x else: fn_fwd = fn if fn_inv is None: # A simple mapping from some functions to their inverse. inv_mapping = { 'reciprocal': jnp.reciprocal, 'log': jnp.exp, 'exp': jnp.log, 'sqrt': jnp.square, 'square': jnp.sqrt, } fn_inv = inv_mapping[fn.__name__] fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)] # Forcibly clip t to the range of valid values, to guard against inf's. t_clip = lambda t: jnp.clip(t, t_near, t_far) t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near) s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near)) return t_to_s, s_to_t def expected_sin(mean, var): """Compute the mean of sin(x), x ~ N(mean, var).""" return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value. def integrated_pos_enc(mean, var, min_deg, max_deg): """Encode `x` with sinusoids scaled by 2^[min_deg, max_deg). Args: mean: tensor, the mean coordinates to be encoded var: tensor, the variance of the coordinates to be encoded. min_deg: int, the min degree of the encoding. max_deg: int, the max degree of the encoding. Returns: encoded: jnp.ndarray, encoded variables. """ scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = mean.shape[:-1] + (-1,) scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape) scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape) return expected_sin( jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1), jnp.concatenate([scaled_var] * 2, axis=-1), ) def lift_and_diagonalize(mean, cov, basis): """Project `mean` and `cov` onto basis and diagonalize the projected cov.""" fn_mean = math.matmul(mean, basis) fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2) return fn_mean, fn_cov_diag def pos_enc(x, min_deg, max_deg, append_identity=True): """The positional encoding used by the original NeRF paper.""" scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = x.shape[:-1] + (-1,) scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c). scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c). # Note that we're not using safe_sin, unlike IPE. # (..., s*c + s*c). four_feat = jnp.sin( jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1) ) if append_identity: return jnp.concatenate([x, four_feat], axis=-1) else: return four_feat def sqrtm(mat, return_eigs=False): """Take the matrix square root of a PSD matrix [..., d, d].""" eigvec, eigval = jax.lax.linalg.eigh( mat, symmetrize_input=False, sort_eigenvalues=False ) scaling = math.safe_sqrt(eigval)[Ellipsis, None, :] sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1)) return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat def isotropize(cov, mode='accurate'): """Turn covariances into isotropic covariances with the same determinant.""" d = cov.shape[-1] if d == 1: return cov if mode == 'fast': det = jnp.linalg.det(cov) diag_val = det ** (1 / d) is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det) elif mode == 'accurate': log_det = jnp.linalg.slogdet(cov)[1] diag_val = jnp.exp(log_det / d) is_invalid = ~jnp.isfinite(log_det) else: raise ValueError(f'mode={mode} not implemented.') cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None] # Guard against NaN outputs when `det` is super small. Note that this does not # guard against NaN gradients! cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso) return cov_iso def construct_perp_basis(directions): """Construct a perpendicular basis for each 3-vector in `directions`.""" if directions.shape[-1] != 3: raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D') # To generate a vector perpendicular to `directions`, we take a cross-product # with an arbitrary vector [0, 0, 1]. cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0])) # In the rare case that `directions` is very close to [0, 0, 1], we compute an # alternate cross-product with [1, 1, 1] to use instead. cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0])) use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1) cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a) # Crossing `directions` with `cross1` gives us our 3rd vector. cross2 = jnp.cross(directions, cross1) # Normalize vectors before returning them. normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True)) return normalize(cross1), normalize(cross2) def hexify(rng, *, origins, directions, radii, tdist): """Produce hexagon-shaped samples from ray segments.""" # Construct a base set of angles, by linspacing [0, 2pi] in a specific order. # This is one of two orderings of angles that doesn't induce any anisotropy # into the sample covariance of the multisample coordinates. Any rotation and # mirroring along the z-axis of this ordering is also valid. # There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1]. # This seems to work less well though likely because of the strong correlation # between adjacent angles. thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1]) # Lift the angles to the size of the rays. sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas)) thetas = jnp.broadcast_to(thetas, sz) if rng is not None: # Randomly reverse the order of half of the hexes. key, rng = random.split(rng) flip = random.bernoulli(key, shape=sz[:-1]) thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) # Rotate each hex by some random amount. key, rng = random.split(rng) thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None] else: # If we're deterministic, flip and shift every other hex by 30 degrees. flip = jnp.arange(thetas.shape[-2]) % 2 thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) thetas += (flip * jnp.pi / 6)[Ellipsis, None] # TODO(barron): Plumb through the dx/dy frame for the original ray in the # image plane, to avoid the need of this. perp_axis1, perp_axis2 = construct_perp_basis(directions) # Grab each t-interval's midpoint and half-width. t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:] s = (t0 + t1) / 2 d = (t1 - t0) / 2 # Compute the length along the ray for each multisample, using mip-NeRF math. cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * ( (t1**2 + 2 * s**2)[Ellipsis, None] + (3 / np.sqrt(7)) * (np.arange(6) * (2 / 5) - 1) * math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None] ) # Compute the offset from the ray for each multisample. perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz # Go from ray coordinate to world coordinates. cx = perp_mag * jnp.cos(thetas) cy = perp_mag * jnp.sin(thetas) control = ( origins[Ellipsis, None, None, :] + perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None] + perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None] + directions[Ellipsis, None, None, :] * cz[Ellipsis, None] ) return control, perp_mag def unscented_transform(mean, cov, basis, axis=0): """Construct "sigma points" along `axis` from each mean and covariance.""" d = cov.shape[-1] mean_ex = jnp.expand_dims(mean, axis) if basis == 'mean': # This effectively disables the unscented transform. return mean_ex if basis.startswith('random_'): num_random = int(basis.split('_')[-1]) # TODO(barron): use a non-fixed random seed? noise = random.multivariate_normal( random.PRNGKey(0), jnp.zeros_like(mean), cov, (num_random,) + mean.shape[:-1], ) control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis) return control sqrtm_cov = sqrtm(cov) if any([ basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron'] ]): # Use tessellated regular polyhedra vertices (and vec(0)) as control points. if d != 3: raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.') base_shape, angular_tesselation = basis.split('_') transform = geopoly.generate_basis( base_shape, int(angular_tesselation), remove_symmetries=False ).T transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1) transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None] control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) elif basis == 'julier': # The most basic symmetric unscented transformation from the original paper, # which yields 2*d+1 control points. offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis) control = jnp.concatenate( [mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis ) elif basis == 'menegaz': # A compact unscented transformation from # folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf # which yields d+1 control points. if d == 3: # A hand-optimized version of the d==3 case. sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True) offsets = jnp.concatenate( [-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1 ) control = mean_ex + jnp.moveaxis(offsets, -1, axis) else: transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d # == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1)) transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1) control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) else: raise ValueError(f'basis={basis} not implemented.') return control def compute_control_points( means, covs, rays, tdist, rng, unscented_mip_basis, unscented_scale_mult, ): """Wrapper to compute unscented control points for the MLP class.""" if unscented_mip_basis == 'hexify': control, perp_mag = hexify( rng, origins=rays.origins, directions=rays.directions, radii=rays.radii, tdist=tdist, ) else: # Use a normal unscented transformation. control = unscented_transform( means, covs, basis=unscented_mip_basis, axis=-2, ) if unscented_scale_mult > 0: if rays is None: raise SyntaxError( 'Rays are required as input if unscented_scale_mult > 0.' ) # Mimic the math used by hexify to produce comparable scales. t_recon = jnp.sum( (control - rays.origins[Ellipsis, None, None, :]) * rays.directions[Ellipsis, None, None, :], axis=-1, ) perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon else: perp_mag = None return control, perp_mag <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating step functions (piecewise-constant 1D functions). We have a shared naming and dimension convention for these functions. All input/output step functions are assumed to be aligned along the last axis. `t` always indicates the x coordinates of the *endpoints* of a step function. `y` indicates unconstrained values for the *bins* of a step function `w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin values that *integrate* to <= 1. """ from internal import linspline from internal import math from internal import utils import jax import jax.numpy as jnp import numpy as np def query(tq, t, y, left=None, right=None): """Query step function (t, y) at locations tq. Edges repeat by default.""" utils.assert_valid_stepfun(t, y) # Query the step function to recover the interval value. (i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu()) # Apply boundary conditions. left = y[Ellipsis, :1] if left is None else left right = y[Ellipsis, -1:] if right is None else right yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq) return yq def weight_to_pdf(t, w): """Turn a vector of weights that sums to 1 into a PDF that integrates to 1.""" utils.assert_valid_stepfun(t, w) td = jnp.diff(t) return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td)) def pdf_to_weight(t, p): """Turn a PDF that integrates to 1 into a vector of weights that sums to 1.""" utils.assert_valid_stepfun(t, p) return p * jnp.diff(t) def integrate_weights(w): """Compute the cumulative sum of w, assuming all weight vectors sum to 1. The output's size on the last dimension is one greater than that of the input, because we're computing the integral corresponding to the endpoints of a step function, not the integral of the interior/bin values. Args: w: Tensor, which will be integrated along the last axis. This is assumed to sum to 1 along the last axis, and this function will (silently) break if that is not the case. Returns: cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1 """ cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1)) shape = cw.shape[:-1] + (1,) # Ensure that the CDF starts with exactly 0 and ends with exactly 1. cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1) return cw0 def invert_cdf(u, t, w_logits): """Invert the CDF defined by (t, w) at the points specified by u in [0, 1).""" utils.assert_valid_stepfun(t, w_logits) # Compute the PDF and CDF for each weight vector. w = jax.nn.softmax(w_logits, axis=-1) cw = integrate_weights(w) # Interpolate into the inverse CDF. t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu()) return t_new def sample( rng, t, w_logits, num_samples, single_jitter=False, deterministic_center=False, eps=jnp.finfo(jnp.float32).eps, ): """Piecewise-Constant PDF sampling from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of samples. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. deterministic_center: bool, if False, when `rng` is None return samples that linspace the entire PDF. If True, skip the front and back of the linspace so that the centers of each PDF interval are returned. eps: float, something like numerical epsilon. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) # Draw uniform samples. if rn<fim_suffix>g is None: # Match the behavior of jax.random.uniform() by spanning [0, 1-eps]. if deterministic_center: pad = 1 / (2 * num_samples) u = jnp.linspace(pad, 1.0 - pad - eps, num_samples) else: u = jnp.linspace(0, 1.0 - eps, num_samples) u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,)) else: # `u` is in [0, 1) --- it can be zero, but it can never be 1. u_max = eps + (1 - eps) / num_samples max_jitter = (1 - u_max) / (num_samples - 1) - eps d = 1 if single_jitter else num_samples u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform( rng, t.shape[:-1] + (d,), maxval=max_jitter ) return invert_cdf(u, t, w_logits) def sample_intervals( rng, t, w_logits, num_samples, single_jitter=False, domain=(-jnp.inf, jnp.inf), ): """Sample *intervals* (rather than points) from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of intervals to sample. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. domain: (minval, maxval), the range of valid values for `t`. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) if num_samples <= 1: raise ValueError(f'num_samples must be > 1, is {num_samples}.') # Sample a set of points from the step function. centers = sample( rng, t, w_logits, num_samples, single_jitter, deterministic_center=True ) # The intervals we return will span the midpoints of each adjacent sample. mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2 # Each first/last fencepost is the reflection of the first/last midpoint # around the first/last sampled center. first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1] last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:] samples = jnp.concatenate([first, mid, last], axis=-1) # We clamp to the limits of the input domain, provided by the caller. samples = jnp.clip(samples, *domain) return samples def lossfun_distortion(t, w): """Compute iint w[i] w[j] |t[i] - t[j]| di dj.""" utils.assert_valid_stepfun(t, w) # The loss incurred between all pairs of intervals. ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2 dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :]) loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1) # The loss incurred within each individual interval with itself. loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3 return loss_inter + loss_intra def weighted_percentile(t, w, ps): """Compute the weighted percentiles of a step function. w's must sum to 1.""" utils.assert_valid_stepfun(t, w) cw = integrate_weights(w) # We want to interpolate into the integrated weights according to `ps`. wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( jnp.array(ps) / 100, cw, t ) return wprctile def resample(t, tp, vp, use_avg=False): """Resample a step function defined by (tp, vp) into intervals t. Notation roughly matches jnp.interp. Resamples by summation by default. Args: t: tensor with shape (..., n+1), the endpoints to resample into. tp: tensor with shape (..., m+1), the endpoints of the step function being resampled. vp: tensor with shape (..., m), the values of the step function being resampled. use_avg: bool, if False, return the sum of the step function for each interval in `t`. If True, return the average, weighted by the width of each interval in `t`. Returns: v: tensor with shape (..., n), the values of the resampled step function. """ utils.assert_valid_stepfun(tp, vp) if use_avg: wp = jnp.diff(tp) v_numer = resample(t, tp, vp * wp, use_avg=False) v_denom = resample(t, tp, wp, use_avg=False) v = math.safe_div(v_numer, v_denom) return v acc = jnp.cumsum(vp, axis=-1) acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1) acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( t, tp, acc0 ) v = jnp.diff(acc0_resampled, axis=-1) return v def blur_and_resample_weights(tq, t, w, blur_halfwidth): """Blur the (t, w) histogram by blur_halfwidth, then resample it into tq.""" utils.assert_valid_stepfun(t, w) # Convert the histogram to a PDF. p = weight_to_pdf(t, w) # Blur the PDF step function into a piecewise linear spline PDF. t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth) # Integrate the spline PDF, then query it to get integrated weights. quad = linspline.compute_integral(t_linspline, p_linspline) acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad) # Undo the integration to get weights. wq = jnp.diff(acc_wq, axis=-1) # Fix negative values to 0, as they should never happen but may due to # numerical issues. wq = jnp.maximum(0, wq) return wq <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" import concurrent import enum import os import queue import threading import time from typing import Any, Callable, Iterable, Optional, TypeVar, Union from absl import logging import flax import jax from jax import random import jax.numpy as jnp import numpy as np _Array = Union[np.ndarray, jnp.ndarray] @flax.struct.dataclass class Rays: """All tensors must have the same num_dims and first n-1 dims must match. This dataclass contains spatially meaningful quantities associated with the ray that can be calculated by the function casting the ray, as well as all metadata necessary for the ray to be rendered by the Model class. """ origins: Optional[_Array] = None directions: Optional[_Array] = None viewdirs: Optional[_Array] = None radii: Optional[_Array] = None imageplane: Optional[_Array] = None pixels: Optional[_Array] = None lossmult: Optional[_Array] = None near: Optional[_Array] = None far: Optional[_Array] = None cam_idx: Optional[_Array] = None exposure_idx: Optional[_Array] = None exposure_values: Optional[_Array] = None device_idx: Optional[_Array] = None def generate_random_rays( rng, n, origin_lo, origin_hi, radius_lo, radius_hi, near_lo, near_hi, far_lo, far_hi, include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): """Generate a random Rays datastructure.""" key, rng = random.split(rng) origins = random.uniform( key, shape=[n, 3], minval=origin_lo, maxval=origin_hi ) key, rng = random.split(rng) directions = random.normal(key, shape=[n, 3]) directions /= jnp.sqrt( jnp.maximum( jnp.finfo(jnp.float32).tiny, jnp.sum(directions**2, axis=-1, keepdims=True), ) ) viewdirs = directions key, rng = random.split(rng) radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi) key, rng = random.split(rng) near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi) key, rng = random.split(rng) far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi) imageplane = jnp.zeros([n, 2]) lossmult = jnp.zeros([n, 1]) key, rng = random.split(rng) pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024) int_scalar = jnp.int32(jnp.zeros([n, 1])) exposure_kwargs = {} if include_exposure_idx: exposure_kwargs['exposure_idx'] = int_scalar if include_exposure_values: exposure_kwargs['exposure_values'] = jnp.zeros([n, 1]) if include_device_idx: exposure_kwargs['device_idx'] = int_scalar random_rays = Rays( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, pixels=pixels, lossmult=lossmult, near=near, far=far, cam_idx=int_scalar, **exposure_kwargs, ) return random_rays # Dummy Rays object that can be used to initialize NeRF model. def dummy_rays( include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): return generate_random_rays( random.PRNGKey(0), n=100, origin_lo=-1.5, origin_hi=1.5, radius_lo=1e-5, radius_hi=1e-3, near_lo=0.0, near_hi=1.0, far_lo=10, far_hi=10000, include_exposure_idx=include_exposure_idx, include_exposure_values=include_exposure_values, include_device_idx=include_device_idx, ) @flax.struct.dataclass class Batch: """Data batch for NeRF training or testing. This dataclass contains rays and also per-pixel data that is necessary for computing the loss term or evaluating metrics but NOT necessary for rendering. """ rays: Rays rgb: Optional[_Array] = None disps: Optional[_Array] = None normals: Optional[_Array] = None alphas: Optional[_Array] = None masks: Optional[_Array] = None class DataSplit(enum.Enum): """Dataset split.""" TRAIN = 'train' TEST = 'test' class BatchingMethod(enum.Enum): """Draw rays randomly from a single image or all images, in each batch.""" ALL_IMAGES = 'all_images' SINGLE_IMAGE = 'single_image' def open_file(pth, mode='r'): return open(pth, mode=mode) def file_exists(pth): return os.path.exists(pth) def listdir(pth): return os.listdir(pth) def isdir(pth): return os.path.isdir(pth) def makedirs(pth): if not file_exists(pth): os.makedirs(pth) def device_is_tpu(): return jax.local_devices()[0].platform == 'tpu' def shard(xs): """Split data into shards for multiple devices along the first dimension.""" return jax.tree_util.tree_map( lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs ) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) if padding > 0: y = y[:-padding] return y def load_npy(pth): """Load an numpy array cast to float32.""" with open_file(pth, 'rb') as f: x = np.load(f).astype(np.float32) return x def assert_valid_stepfun(t, y): """Assert that step function (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1] + 1: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a step function.' ) def assert_valid_linspline(t, y): """Assert that piecewise linear spline (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1]: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.' ) _FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]]) def iterate_in_separate_thread( queue_size = 3, ): """Decorator factory that iterates a function in a separate thread. Args: queue_size: Keep at most queue_size elements in memory. Returns: Decorator that will iterate a function in a separate thread. """ def decorator( fn, ): def result_fn(*args, **kwargs): results_queue = queue.Queue(queue_size) populating_data = True populating_data_lock = threading.Lock() def thread_fn(): # Mark has_data as a variable that's outside of thread_fn # Otherwise, `populating_data = True` creates a local variable nonlocal populating_data try: for item in fn(*args, **kwargs): results_queue.put(item) finally: # Set populating_data to False regardless of exceptions to stop # iterations with populating_data_lock: populating_data = False # Use executor + futures instead of Thread to propagate exceptions with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: thread_fn_future = executor.submit(thread_fn) while True: with populating_data_lock: if not populating<fim_suffix>_data and results_queue.empty(): break get_start = time.time() try: # Set timeout to allow for exceptions to be propagated. next_value = results_queue.get(timeout=1.0) except queue.Empty: continue logging.info('Got data in %0.3fs', time.time() - get_start) yield next_value # Thread exception will be raised here thread_fn_future.result() return result_fn return decorator <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/coord.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating coordinate spaces and distances along rays.""" from internal import geopoly from internal import math import jax from jax import random import jax.numpy as jnp import numpy as np def contract(x): """Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077).""" # Clamping to 1 produces correct scale inside |x| < 1 x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True)) scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq z = scale * x return z def inv_contract(z): """The inverse of contract().""" # Clamping to 1 produces correct scale inside |z| < 1 z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True)) inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq x = z / inv_scale return x def track_linearize(fn, mean, cov): """Apply function `fn` to a set of means and covariances, ala a Kalman filter. We can analytically transform a Gaussian parameterized by `mean` and `cov` with a function `fn` by linearizing `fn` around `mean`, and taking advantage of the fact that Covar[Ax + y] = A(Covar[x])A^T (see https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. cov: a tensor of covariances, where the last two axes are the dimensions. Returns: fn_mean: the transformed means. fn_cov: the transformed covariances. """ if (len(mean.shape) + 1) != len(cov.shape): raise ValueError('cov must be non-diagonal') fn_mean, lin_fn = jax.linearize(fn, mean) fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov)) return fn_mean, fn_cov def track_isotropic(fn, mean, scale): """Apply function `fn` to a set of means and scales, ala a Kalman filter. This is the isotropic or scalar equivalent of track_linearize, as we're still linearizing a function and tracking a Gaussian through it, but the input and output Gaussians are all isotropic and are only represented with a single `scale` value (where `scale**2` is the variance of the Gaussian). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. scale: a tensor of scales, with the same shape as means[..., -1]. Returns: fn_mean: the transformed means. fn_scale: the transformed scales. """ if mean.shape[:-1] != scale.shape: raise ValueError( f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.' ) d = mean.shape[-1] fn_mean, lin_fn = jax.linearize(fn, mean) if scale is not None: # Compute the Jacobian of fn function at the locations of each mean. jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)( jnp.broadcast_to(jnp.eye(d), mean.shape + (d,)) ) # The cube root of the determinant of the Jacobian is the geometric mean # of the eigenvalues of the Jacobian, which gives us the isotropic scaling # implied by `fn` at each mean that `scale` should be multiplied by. eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0. abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac))) # Special case d == 3 for speed's sake. fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d)) else: fn_scale = None return fn_mean, fn_scale def contract3_isoscale(x): """A fast version of track_isotropic(contract, *)'s scaling for 3D inputs.""" if x.shape[-1] != 3: raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.') norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1)) # Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq: return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq)) def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None): """Construct a bijection between metric distances and normalized distances. See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a detailed explanation. Args: fn: the function to ray distances. t_near: a tensor of near-plane distances. t_far: a tensor of far-plane distances. fn_inv: Optional, if not None then it's used as the inverse of fn(). Returns: t_to_s: a function that maps distances to normalized distances in [0, 1]. s_to_t: the inverse of t_to_s. """ if fn is None: fn_fwd = lambda x: x fn_inv = lambda x: x else: fn_fwd = fn if fn_inv is None: # A simple mapping from some functions to their inverse. inv_mapping = { 'reciprocal': jnp.reciprocal, 'log': jnp.exp, 'exp': jnp.log, 'sqrt': jnp.square, 'square': jnp.sqrt, } fn_inv = inv_mapping[fn.__name__] fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)] # Forcibly clip t to the range of valid values, to guard against inf's. t_clip = lambda t: jnp.clip(t, t_near, t_far) t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near) s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near)) return t_to_s, s_to_t def expected_sin(mean, var): """Compute the mean of sin(x), x ~ N(mean, var).""" return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value. def integrated_pos_enc(mean, var, min_deg, max_deg): """Encode `x` with sinusoids scaled by 2^[min_deg, max_deg). Args: mean: tensor, the mean coordinates to be encoded var: tensor, the variance of the coordinates to be encoded. min_deg: int, the min degree of the encoding. max_deg: int, the max degree of the encoding. Returns: encoded: jnp.ndarray, encoded variables. """ scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = mean.shape[:-1] + (-1,) scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape) scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape) return expected_sin( jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1), jnp.concatenate([scaled_var] * 2, axis=-1), ) def lift_and_diagonalize(mean, cov, basis): """Project `mean` and `cov` onto basis and diagonalize the projected cov.""" fn_mean = math.matmul(mean, basis) fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2) return fn_mean, fn_cov_diag def pos_enc(x, min_deg, max_deg, append_identity=True): """The positional encoding used by the original NeRF paper.""" scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = x.shape[:-1] + (-1,) scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c). scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c). # Note that we're not using safe_sin, unlike IPE. # (..., s*c + s*c). four_feat = jnp.sin( jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1) ) if append_identity: return jnp.concatenate([x, four_feat], axis=-1) else: return four_feat def sqrtm(mat, return_eigs=False): """Take the matrix square root of a PSD matrix [..., d, d].""" eigvec, eigval = jax.lax.linalg.eigh( mat, symmetrize_input=False, sort_eigenvalues=False ) scaling = math.safe_sqrt(eigval)[Ellipsis, None, :] sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1)) return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat def isotropize(cov, mode='accurate'): """Turn covariances into isotropic covariances with the same determinant.""" d = cov.shape[-1] if d == 1: return cov if mod<fim_suffix>e == 'fast': det = jnp.linalg.det(cov) diag_val = det ** (1 / d) is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det) elif mode == 'accurate': log_det = jnp.linalg.slogdet(cov)[1] diag_val = jnp.exp(log_det / d) is_invalid = ~jnp.isfinite(log_det) else: raise ValueError(f'mode={mode} not implemented.') cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None] # Guard against NaN outputs when `det` is super small. Note that this does not # guard against NaN gradients! cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso) return cov_iso def construct_perp_basis(directions): """Construct a perpendicular basis for each 3-vector in `directions`.""" if directions.shape[-1] != 3: raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D') # To generate a vector perpendicular to `directions`, we take a cross-product # with an arbitrary vector [0, 0, 1]. cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0])) # In the rare case that `directions` is very close to [0, 0, 1], we compute an # alternate cross-product with [1, 1, 1] to use instead. cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0])) use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1) cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a) # Crossing `directions` with `cross1` gives us our 3rd vector. cross2 = jnp.cross(directions, cross1) # Normalize vectors before returning them. normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True)) return normalize(cross1), normalize(cross2) def hexify(rng, *, origins, directions, radii, tdist): """Produce hexagon-shaped samples from ray segments.""" # Construct a base set of angles, by linspacing [0, 2pi] in a specific order. # This is one of two orderings of angles that doesn't induce any anisotropy # into the sample covariance of the multisample coordinates. Any rotation and # mirroring along the z-axis of this ordering is also valid. # There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1]. # This seems to work less well though likely because of the strong correlation # between adjacent angles. thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1]) # Lift the angles to the size of the rays. sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas)) thetas = jnp.broadcast_to(thetas, sz) if rng is not None: # Randomly reverse the order of half of the hexes. key, rng = random.split(rng) flip = random.bernoulli(key, shape=sz[:-1]) thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) # Rotate each hex by some random amount. key, rng = random.split(rng) thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None] else: # If we're deterministic, flip and shift every other hex by 30 degrees. flip = jnp.arange(thetas.shape[-2]) % 2 thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) thetas += (flip * jnp.pi / 6)[Ellipsis, None] # TODO(barron): Plumb through the dx/dy frame for the original ray in the # image plane, to avoid the need of this. perp_axis1, perp_axis2 = construct_perp_basis(directions) # Grab each t-interval's midpoint and half-width. t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:] s = (t0 + t1) / 2 d = (t1 - t0) / 2 # Compute the length along the ray for each multisample, using mip-NeRF math. cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * ( (t1**2 + 2 * s**2)[Ellipsis, None] + (3 / np.sqrt(7)) * (np.arange(6) * (2 / 5) - 1) * math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None] ) # Compute the offset from the ray for each multisample. perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz # Go from ray coordinate to world coordinates. cx = perp_mag * jnp.cos(thetas) cy = perp_mag * jnp.sin(thetas) control = ( origins[Ellipsis, None, None, :] + perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None] + perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None] + directions[Ellipsis, None, None, :] * cz[Ellipsis, None] ) return control, perp_mag def unscented_transform(mean, cov, basis, axis=0): """Construct "sigma points" along `axis` from each mean and covariance.""" d = cov.shape[-1] mean_ex = jnp.expand_dims(mean, axis) if basis == 'mean': # This effectively disables the unscented transform. return mean_ex if basis.startswith('random_'): num_random = int(basis.split('_')[-1]) # TODO(barron): use a non-fixed random seed? noise = random.multivariate_normal( random.PRNGKey(0), jnp.zeros_like(mean), cov, (num_random,) + mean.shape[:-1], ) control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis) return control sqrtm_cov = sqrtm(cov) if any([ basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron'] ]): # Use tessellated regular polyhedra vertices (and vec(0)) as control points. if d != 3: raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.') base_shape, angular_tesselation = basis.split('_') transform = geopoly.generate_basis( base_shape, int(angular_tesselation), remove_symmetries=False ).T transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1) transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None] control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) elif basis == 'julier': # The most basic symmetric unscented transformation from the original paper, # which yields 2*d+1 control points. offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis) control = jnp.concatenate( [mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis ) elif basis == 'menegaz': # A compact unscented transformation from # folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf # which yields d+1 control points. if d == 3: # A hand-optimized version of the d==3 case. sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True) offsets = jnp.concatenate( [-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1 ) control = mean_ex + jnp.moveaxis(offsets, -1, axis) else: transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d # == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1)) transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1) control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) else: raise ValueError(f'basis={basis} not implemented.') return control def compute_control_points( means, covs, rays, tdist, rng, unscented_mip_basis, unscented_scale_mult, ): """Wrapper to compute unscented control points for the MLP class.""" if unscented_mip_basis == 'hexify': control, perp_mag = hexify( rng, origins=rays.origins, directions=rays.directions, radii=rays.radii, tdist=tdist, ) else: # Use a normal unscented transformation. control = unscented_transform( means, covs, basis=unscented_mip_basis, axis=-2, ) if unscented_scale_mult > 0: if rays is None: raise SyntaxError( 'Rays are required as input if unscented_scale_mult > 0.' ) # Mimic the math used by hexify to produce comparable scales. t_recon = jnp.sum( (control - rays.origins[Ellipsis, None, None, :]) * rays.directions[Ellipsis, None, None, :], axis=-1, ) perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon else: perp_mag = None return control, perp_mag <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" import concurrent import enum import os import queue import threading import time from typing import Any, Callable, Iterable, Optional, TypeVar, Union from absl import logging import flax import jax from jax import random import jax.numpy as jnp import numpy as np _Array = Union[np.ndarray, jnp.ndarray] @flax.struct.dataclass class Rays: """All tensors must have the same num_dims and first n-1 dims must match. This dataclass contains spatially meaningful quantities associated with the ray that can be calculated by the function casting the ray, as well as all metadata necessary for the ray to be rendered by the Model class. """ origins: Optional[_Array] = None directions: Optional[_Array] = None viewdirs: Optional[_Array] = None radii: Optional[_Array] = None imageplane: Optional[_Array] = None pixels: Optional[_Array] = None lossmult: Optional[_Array] = None near: Optional[_Array] = None far: Optional[_Array] = None cam_idx: Optional[_Array] = None exposure_idx: Optional[_Array] = None exposure_values: Optional[_Array] = None device_idx: Optional[_Array] = None def generate_random_rays( rng, n, origin_lo, origin_hi, radius_lo, radius_hi, near_lo, near_hi, far_lo, far_hi, include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): """Generate a random Rays datastructure.""" key, rng = random.split(rng) origins = random.uniform( key, shape=[n, 3], minval=origin_lo, maxval=origin_hi ) key, rng = random.split(rng) directions = random.normal(key, shape=[n, 3]) directions /= jnp.sqrt( jnp.maximum( jnp.finfo(jnp.float32).tiny, jnp.sum(directions**2, axis=-1, keepdims=True), ) ) viewdirs = directions key, rng = random.split(rng) radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi) key, rng = random.split(rng) near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi) key, rng = random.split(rng) far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi) imageplane = jnp.zeros([n, 2]) lossmult = jnp.zeros([n, 1]) key, rng = random.split(rng) pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024) int_scalar = jnp.int32(jnp.zeros([n, 1])) exposure_kwargs = {} if include_exposure_idx: exposure_kwargs['exposure_idx'] = int_scalar if include_exposure_values: exposure_kwargs['exposure_values'] = jnp.zeros([n, 1]) if include_device_idx: exposure_kwargs['device_idx'] = int_scalar random_rays = Rays( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, pixels=pixels, lossmult=lossmult, near=near, far=far, cam_idx=int_scalar, **exposure_kwargs, ) return random_rays # Dummy Rays object that can be used to initialize NeRF model. def dummy_rays( include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): return generate_random_rays( random.PRNGKey(0), n=100, origin_lo=-1.5, origin_hi=1.5, radius_lo=1e-5, radius_hi=1e-3, near_lo=0.0, near_hi=1.0, far_lo=10, far_hi=10000, include_exposure_idx=include_exposure_idx, include_exposure_values=include_exposure_values, include_device_idx=include_device_idx, ) @flax.struct.dataclass class Batch: """Data batch for NeRF training or testing. This dataclass contains rays and also per-pixel data that is necessary for computing the loss term or evaluating metrics but NOT necessary for rendering. """ rays: Rays rgb: Optional[_Array] = None disps: Optional[_Array] = None normals: Optional[_Array] = None alphas: Optional[_Array] = None masks: Optional[_Array] = None class DataSplit(enum.Enum): """Dataset split.""" TRAIN = 'train' TEST = 'test' class BatchingMethod(enum.Enum): """Draw rays randomly from a single image or all images, in each batch.""" ALL_IMAGES = 'all_images' SINGLE_IMAGE = 'single_image' def open_file(pth, mode='r'): return open(pth, mode=mode) def file_exists(pth): return os.path.exists(pth) def listdir(pth): return os.listdir(pth) def isdir(pth): return os.path.isdir(pth) def makedirs(pth): if not file_exists(pth): os.makedirs(pth) def device_is_tpu(): return jax.local_devices()[0].platform == 'tpu' def shard(xs): """Split data into shards for multiple devices along the first dimension.""" return jax.tree_util.tree_map( lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs ) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) if padding > 0: y = y[:-padding] return y def load_npy(pth): """Load an numpy array cast to float32.""" with open_file(pth, 'rb') as f: x = np.load(f).astype(np.float32) return x def assert_valid_stepfun(t, y): """Assert that step function (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1] + 1: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a step function.' ) def assert_valid_linspline(t, y): """Assert that piecewise linear spline (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1]: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.' ) _FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]]) def iterate_in_separate_thread( queue_size = 3, ): """Decorator factory that iterates a function in a separate thread. Args: queue_size: Keep at most queue_size elements in memory. Returns: Decorator that will iterate a function in a separate thread. """ def decorator( fn, ): def result_fn(*args, **kwargs): results_queue = queue.Queue(queue_size) populating_data = True populating_data_lock = threading.Lock() def thread_fn(): # Mark has_data as a variable that's outside of thread_fn # Otherwise, `populating_data = True` creates a local variable nonlocal populating_data try: for item in fn(*args, **kwargs): results_queue.put(item) finally: # Set populating_data to False regardless of exceptions to stop # iterations with populating_data_lock: populating_data = False # Use executor + futures instead of Thread to propagate exceptions with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: thread_fn_future = executor.submit(thread_fn) while True: with populating_data_lock: if not populating_data an<fim_suffix>d results_queue.empty(): break get_start = time.time() try: # Set timeout to allow for exceptions to be propagated. next_value = results_queue.get(timeout=1.0) except queue.Empty: continue logging.info('Got data in %0.3fs', time.time() - get_start) yield next_value # Thread exception will be raised here thread_fn_future.result() return result_fn return decorator <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/geopoly.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for constructing geodesic polyhedron, which are used as a basis.""" import itertools import numpy as np def compute_sq_dist(mat0, mat1=None): """Compute the squared Euclidean distance between all pairs of columns.""" if mat1 is None: mat1 = mat0 # Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y. sq_norm0 = np.sum(mat0**2, 0) sq_norm1 = np.sum(mat1**2, 0) sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1 sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors. return sq_dist def compute_tesselation_weights(v): """Tesselate the vertices of a triangle by a factor of `v`.""" if v < 1: raise ValueError(f'v {v} must be >= 1') int_weights = [] for i in range(v + 1): for j in range(v + 1 - i): int_weights.append((i, j, v - (i + j))) int_weights = np.array(int_weights) weights = int_weights / v # Barycentric weights. return weights def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4): """Tesselate the vertices of a geodesic polyhedron. Args: base_verts: tensor of floats, the vertex coordinates of the geodesic. base_faces: tensor of ints, the indices of the vertices of base_verts that constitute eachface of the polyhedra. v: int, the factor of the tesselation (v==1 is a no-op). eps: float, a small value used to determine if two vertices are the same. Returns: verts: a tensor of floats, the coordinates of the tesselated vertices. """ if not isinstance(v, int): raise ValueError(f'v {v} must an integer') tri_weights = compute_tesselation_weights(v) verts = [] for base_face in base_faces: new_verts = np.matmul(tri_weights, base_verts[base_face, :]) new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True)) verts.append(new_verts) verts = np.concatenate(verts, 0) sq_dist = compute_sq_dist(verts.T) assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist]) unique = np.unique(assignment) verts = verts[unique, :] return verts def generate_basis( base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4 ): """Generates a 3D basis by tesselating a geometric polyhedron. Args: base_shape: string, the name of the starting polyhedron, must be either 'tetrahedron', 'icosahedron' or 'octahedron'. angular_tesselation: int, the number of times to tesselate the polyhedron, must be >= 1 (a value of 1 is a no-op to the polyhedron). remove_symmetries: bool, if True then remove the symmetric basis columns, which is usually a good idea because otherwise projections onto the basis will have redundant negative copies of each other. eps: float, a small number used to determine symmetries. Returns: basis: a matrix with shape [3, n]. """ if base_shape == 'tetrahedron': verts = np.array([ (np.sqrt(8 / 9), 0, -1 / 3), (-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3), (-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3), (0, 0, 1), ]) faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)]) elif base_shape == 'icosahedron': a = (np.sqrt(5) + 1) / 2 verts = np.array([ (-1, 0, a), (1, 0, a), (-1, 0, -a), (1, 0, -a), (0, a, 1), (0, a, -1), (0, -a, 1), (0, -a, -1), (a, 1, 0), (-a, 1, 0), (a, -1, 0), (-a, -1, 0), ]) / np.sqrt(a + 2) faces = np.array([ (0, 4, 1), (0, 9, 4), (9, 5, 4), (4, 5, 8), (4, 8, 1), (8, 10, 1), (8, 3, 10), (5, 3, 8), (5, 2, 3), (2, 7, 3), (7, 10, 3), (7, 6, 10), (7, 11, 6), (11, 0, 6), (0, 1, 6), (6, 1, 10), (9, 0, 11), (9, 11, 2), (9, 2, 5), (7, 2, 11), ]) elif base_shape == 'octahedron': verts = np.array( [(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)] ) corners = np.array(list(itertools.product([-1, 1], repeat=3))) pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2) faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1) else: raise ValueError(f'base_shape {base_shape} not supported') verts = tesselate_geodesic(verts, faces, angular_tesselation) if remove_symmetries<fim_suffix>: # Remove elements of `verts` that are reflections of each other. match = compute_sq_dist(verts.T, -verts.T) < eps verts = verts[~np.any(np.triu(match), axis=0), :] basis = verts[:, ::-1] return basis <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/coord.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating coordinate spaces and distances along rays.""" from internal import geopoly from internal import math import jax from jax import random import jax.numpy as jnp import numpy as np def contract(x): """Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077).""" # Clamping to 1 produces correct scale inside |x| < 1 x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True)) scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq z = scale * x return z def inv_contract(z): """The inverse of contract().""" # Clamping to 1 produces correct scale inside |z| < 1 z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True)) inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq x = z / inv_scale return x def track_linearize(fn, mean, cov): """Apply function `fn` to a set of means and covariances, ala a Kalman filter. We can analytically transform a Gaussian parameterized by `mean` and `cov` with a function `fn` by linearizing `fn` around `mean`, and taking advantage of the fact that Covar[Ax + y] = A(Covar[x])A^T (see https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. cov: a tensor of covariances, where the last two axes are the dimensions. Returns: fn_mean: the transformed means. fn_cov: the transformed covariances. """ if (len(mean.shape) + 1) != len(cov.shape): raise ValueError('cov must be non-diagonal') fn_mean, lin_fn = jax.linearize(fn, mean) fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov)) return fn_mean, fn_cov def track_isotropic(fn, mean, scale): """Apply function `fn` to a set of means and scales, ala a Kalman filter. This is the isotropic or scalar equivalent of track_linearize, as we're still linearizing a function and tracking a Gaussian through it, but the input and output Gaussians are all isotropic and are only represented with a single `scale` value (where `scale**2` is the variance of the Gaussian). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. scale: a tensor of scales, with the same shape as means[..., -1]. Returns: fn_mean: the transformed means. fn_scale: the transformed scales. """ if mean.shape[:-1] != scale.shape: raise ValueError( f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.' ) d = mean.shape[-1] fn_mean, lin_fn = jax.linearize(fn, mean) if scale is not None: # Compute the Jacobian of fn function at the locations of each mean. jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)( jnp.broadcast_to(jnp.eye(d), mean.shape + (d,)) ) # The cube root of the determinant of the Jacobian is the geometric mean # of the eigenvalues of the Jacobian, which gives us the isotropic scaling # implied by `fn` at each mean that `scale` should be multiplied by. eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0. abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac))) # Special case d == 3 for speed's sake. fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d)) else: fn_scale = None return fn_mean, fn_scale def contract3_isoscale(x): """A fast version of track_isotropic(contract, *)'s scaling for 3D inputs.""" if x.shape[-1] != 3: raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.') norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1)) # Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq: return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq)) def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None): """Construct a bijection between metric distances and normalized distances. See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a detailed explanation. Args: fn: the function to ray distances. t_near: a tensor of near-plane distances. t_far: a tensor of far-plane distances. fn_inv: Optional, if not None then it's used as the inverse of fn(). Returns: t_to_s: a function that maps distances to normalized distances in [0, 1]. s_to_t: the inverse of t_to_s. """ if fn is None: fn_fwd = lambda x: x fn_inv = lambda x: x else: fn_fwd = fn if fn_inv is <fim_suffix>None: # A simple mapping from some functions to their inverse. inv_mapping = { 'reciprocal': jnp.reciprocal, 'log': jnp.exp, 'exp': jnp.log, 'sqrt': jnp.square, 'square': jnp.sqrt, } fn_inv = inv_mapping[fn.__name__] fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)] # Forcibly clip t to the range of valid values, to guard against inf's. t_clip = lambda t: jnp.clip(t, t_near, t_far) t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near) s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near)) return t_to_s, s_to_t def expected_sin(mean, var): """Compute the mean of sin(x), x ~ N(mean, var).""" return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value. def integrated_pos_enc(mean, var, min_deg, max_deg): """Encode `x` with sinusoids scaled by 2^[min_deg, max_deg). Args: mean: tensor, the mean coordinates to be encoded var: tensor, the variance of the coordinates to be encoded. min_deg: int, the min degree of the encoding. max_deg: int, the max degree of the encoding. Returns: encoded: jnp.ndarray, encoded variables. """ scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = mean.shape[:-1] + (-1,) scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape) scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape) return expected_sin( jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1), jnp.concatenate([scaled_var] * 2, axis=-1), ) def lift_and_diagonalize(mean, cov, basis): """Project `mean` and `cov` onto basis and diagonalize the projected cov.""" fn_mean = math.matmul(mean, basis) fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2) return fn_mean, fn_cov_diag def pos_enc(x, min_deg, max_deg, append_identity=True): """The positional encoding used by the original NeRF paper.""" scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = x.shape[:-1] + (-1,) scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c). scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c). # Note that we're not using safe_sin, unlike IPE. # (..., s*c + s*c). four_feat = jnp.sin( jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1) ) if append_identity: return jnp.concatenate([x, four_feat], axis=-1) else: return four_feat def sqrtm(mat, return_eigs=False): """Take the matrix square root of a PSD matrix [..., d, d].""" eigvec, eigval = jax.lax.linalg.eigh( mat, symmetrize_input=False, sort_eigenvalues=False ) scaling = math.safe_sqrt(eigval)[Ellipsis, None, :] sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1)) return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat def isotropize(cov, mode='accurate'): """Turn covariances into isotropic covariances with the same determinant.""" d = cov.shape[-1] if d == 1: return cov if mode == 'fast': det = jnp.linalg.det(cov) diag_val = det ** (1 / d) is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det) elif mode == 'accurate': log_det = jnp.linalg.slogdet(cov)[1] diag_val = jnp.exp(log_det / d) is_invalid = ~jnp.isfinite(log_det) else: raise ValueError(f'mode={mode} not implemented.') cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None] # Guard against NaN outputs when `det` is super small. Note that this does not # guard against NaN gradients! cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso) return cov_iso def construct_perp_basis(directions): """Construct a perpendicular basis for each 3-vector in `directions`.""" if directions.shape[-1] != 3: raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D') # To generate a vector perpendicular to `directions`, we take a cross-product # with an arbitrary vector [0, 0, 1]. cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0])) # In the rare case that `directions` is very close to [0, 0, 1], we compute an # alternate cross-product with [1, 1, 1] to use instead. cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0])) use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1) cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a) # Crossing `directions` with `cross1` gives us our 3rd vector. cross2 = jnp.cross(directions, cross1) # Normalize vectors before returning them. normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True)) return normalize(cross1), normalize(cross2) def hexify(rng, *, origins, directions, radii, tdist): """Produce hexagon-shaped samples from ray segments.""" # Construct a base set of angles, by linspacing [0, 2pi] in a specific order. # This is one of two orderings of angles that doesn't induce any anisotropy # into the sample covariance of the multisample coordinates. Any rotation and # mirroring along the z-axis of this ordering is also valid. # There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1]. # This seems to work less well though likely because of the strong correlation # between adjacent angles. thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1]) # Lift the angles to the size of the rays. sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas)) thetas = jnp.broadcast_to(thetas, sz) if rng is not None: # Randomly reverse the order of half of the hexes. key, rng = random.split(rng) flip = random.bernoulli(key, shape=sz[:-1]) thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) # Rotate each hex by some random amount. key, rng = random.split(rng) thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None] else: # If we're deterministic, flip and shift every other hex by 30 degrees. flip = jnp.arange(thetas.shape[-2]) % 2 thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) thetas += (flip * jnp.pi / 6)[Ellipsis, None] # TODO(barron): Plumb through the dx/dy frame for the original ray in the # image plane, to avoid the need of this. perp_axis1, perp_axis2 = construct_perp_basis(directions) # Grab each t-interval's midpoint and half-width. t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:] s = (t0 + t1) / 2 d = (t1 - t0) / 2 # Compute the length along the ray for each multisample, using mip-NeRF math. cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * ( (t1**2 + 2 * s**2)[Ellipsis, None] + (3 / np.sqrt(7)) * (np.arange(6) * (2 / 5) - 1) * math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None] ) # Compute the offset from the ray for each multisample. perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz # Go from ray coordinate to world coordinates. cx = perp_mag * jnp.cos(thetas) cy = perp_mag * jnp.sin(thetas) control = ( origins[Ellipsis, None, None, :] + perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None] + perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None] + directions[Ellipsis, None, None, :] * cz[Ellipsis, None] ) return control, perp_mag def unscented_transform(mean, cov, basis, axis=0): """Construct "sigma points" along `axis` from each mean and covariance.""" d = cov.shape[-1] mean_ex = jnp.expand_dims(mean, axis) if basis == 'mean': # This effectively disables the unscented transform. return mean_ex if basis.startswith('random_'): num_random = int(basis.split('_')[-1]) # TODO(barron): use a non-fixed random seed? noise = random.multivariate_normal( random.PRNGKey(0), jnp.zeros_like(mean), cov, (num_random,) + mean.shape[:-1], ) control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis) return control sqrtm_cov = sqrtm(cov) if any([ basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron'] ]): # Use tessellated regular polyhedra vertices (and vec(0)) as control points. if d != 3: raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.') base_shape, angular_tesselation = basis.split('_') transform = geopoly.generate_basis( base_shape, int(angular_tesselation), remove_symmetries=False ).T transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1) transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None] control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) elif basis == 'julier': # The most basic symmetric unscented transformation from the original paper, # which yields 2*d+1 control points. offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis) control = jnp.concatenate( [mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis ) elif basis == 'menegaz': # A compact unscented transformation from # folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf # which yields d+1 control points. if d == 3: # A hand-optimized version of the d==3 case. sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True) offsets = jnp.concatenate( [-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1 ) control = mean_ex + jnp.moveaxis(offsets, -1, axis) else: transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d # == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1)) transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1) control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) else: raise ValueError(f'basis={basis} not implemented.') return control def compute_control_points( means, covs, rays, tdist, rng, unscented_mip_basis, unscented_scale_mult, ): """Wrapper to compute unscented control points for the MLP class.""" if unscented_mip_basis == 'hexify': control, perp_mag = hexify( rng, origins=rays.origins, directions=rays.directions, radii=rays.radii, tdist=tdist, ) else: # Use a normal unscented transformation. control = unscented_transform( means, covs, basis=unscented_mip_basis, axis=-2, ) if unscented_scale_mult > 0: if rays is None: raise SyntaxError( 'Rays are required as input if unscented_scale_mult > 0.' ) # Mimic the math used by hexify to produce comparable scales. t_recon = jnp.sum( (control - rays.origins[Ellipsis, None, None, :]) * rays.directions[Ellipsis, None, None, :], axis=-1, ) perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon else: perp_mag = None return control, perp_mag <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/geopoly.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for constructing geodesic polyhedron, which are used as a basis.""" import itertools import numpy as np def compute_sq_dist(mat0, mat1=None): """Compute the squared Euclidean distance between all pairs of columns.""" if mat1 is<fim_suffix> None: mat1 = mat0 # Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y. sq_norm0 = np.sum(mat0**2, 0) sq_norm1 = np.sum(mat1**2, 0) sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1 sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors. return sq_dist def compute_tesselation_weights(v): """Tesselate the vertices of a triangle by a factor of `v`.""" if v < 1: raise ValueError(f'v {v} must be >= 1') int_weights = [] for i in range(v + 1): for j in range(v + 1 - i): int_weights.append((i, j, v - (i + j))) int_weights = np.array(int_weights) weights = int_weights / v # Barycentric weights. return weights def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4): """Tesselate the vertices of a geodesic polyhedron. Args: base_verts: tensor of floats, the vertex coordinates of the geodesic. base_faces: tensor of ints, the indices of the vertices of base_verts that constitute eachface of the polyhedra. v: int, the factor of the tesselation (v==1 is a no-op). eps: float, a small value used to determine if two vertices are the same. Returns: verts: a tensor of floats, the coordinates of the tesselated vertices. """ if not isinstance(v, int): raise ValueError(f'v {v} must an integer') tri_weights = compute_tesselation_weights(v) verts = [] for base_face in base_faces: new_verts = np.matmul(tri_weights, base_verts[base_face, :]) new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True)) verts.append(new_verts) verts = np.concatenate(verts, 0) sq_dist = compute_sq_dist(verts.T) assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist]) unique = np.unique(assignment) verts = verts[unique, :] return verts def generate_basis( base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4 ): """Generates a 3D basis by tesselating a geometric polyhedron. Args: base_shape: string, the name of the starting polyhedron, must be either 'tetrahedron', 'icosahedron' or 'octahedron'. angular_tesselation: int, the number of times to tesselate the polyhedron, must be >= 1 (a value of 1 is a no-op to the polyhedron). remove_symmetries: bool, if True then remove the symmetric basis columns, which is usually a good idea because otherwise projections onto the basis will have redundant negative copies of each other. eps: float, a small number used to determine symmetries. Returns: basis: a matrix with shape [3, n]. """ if base_shape == 'tetrahedron': verts = np.array([ (np.sqrt(8 / 9), 0, -1 / 3), (-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3), (-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3), (0, 0, 1), ]) faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)]) elif base_shape == 'icosahedron': a = (np.sqrt(5) + 1) / 2 verts = np.array([ (-1, 0, a), (1, 0, a), (-1, 0, -a), (1, 0, -a), (0, a, 1), (0, a, -1), (0, -a, 1), (0, -a, -1), (a, 1, 0), (-a, 1, 0), (a, -1, 0), (-a, -1, 0), ]) / np.sqrt(a + 2) faces = np.array([ (0, 4, 1), (0, 9, 4), (9, 5, 4), (4, 5, 8), (4, 8, 1), (8, 10, 1), (8, 3, 10), (5, 3, 8), (5, 2, 3), (2, 7, 3), (7, 10, 3), (7, 6, 10), (7, 11, 6), (11, 0, 6), (0, 1, 6), (6, 1, 10), (9, 0, 11), (9, 11, 2), (9, 2, 5), (7, 2, 11), ]) elif base_shape == 'octahedron': verts = np.array( [(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)] ) corners = np.array(list(itertools.product([-1, 1], repeat=3))) pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2) faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1) else: raise ValueError(f'base_shape {base_shape} not supported') verts = tesselate_geodesic(verts, faces, angular_tesselation) if remove_symmetries: # Remove elements of `verts` that are reflections of each other. match = compute_sq_dist(verts.T, -verts.T) < eps verts = verts[~np.any(np.triu(match), axis=0), :] basis = verts[:, ::-1] return basis <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/math.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device<fim_suffix>_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow)))) <fim_middle>
null
IF
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating step functions (piecewise-constant 1D functions). We have a shared naming and dimension convention for these functions. All input/output step functions are assumed to be aligned along the last axis. `t` always indicates the x coordinates of the *endpoints* of a step function. `y` indicates unconstrained values for the *bins* of a step function `w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin values that *integrate* to <= 1. """ from internal import linspline from internal import math from internal import utils import jax import jax.numpy as jnp import numpy as np def query(tq, t, y, left=None, right=None): """Query step function (t, y) at locations tq. Edges repeat by default.""" utils.assert_valid_stepfun(t, y) # Query the step function to recover the interval value. (i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu()) # Apply boundary conditions. left = y[Ellipsis, :1] if left is None else left right = y[Ellipsis, -1:] if right is None else right yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq) return yq def weight_to_pdf(t, w): """Turn a vector of weights that sums to 1 into a PDF that integrates to 1.""" utils.assert_valid_stepfun(t, w) td = jnp.diff(t) return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td)) def pdf_to_weight(t, p): """Turn a PDF that integrates to 1 into a vector of weights that sums to 1.""" utils.assert_valid_stepfun(t, p) return p * jnp.diff(t) def integrate_weights(w): """Compute the cumulative sum of w, assuming all weight vectors sum to 1. The output's size on the last dimension is one greater than that of the input, because we're computing the integral corresponding to the endpoints of a step function, not the integral of the interior/bin values. Args: w: Tensor, which will be integrated along the last axis. This is assumed to sum to 1 along the last axis, and this function will (silently) break if that is not the case. Returns: cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1 """ cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1)) shape = cw.shape[:-1] + (1,) # Ensure that the CDF starts with exactly 0 and ends with exactly 1. cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1) return cw0 def invert_cdf(u, t, w_logits): """Invert the CDF defined by (t, w) at the points specified by u in [0, 1).""" utils.assert_valid_stepfun(t, w_logits) # Compute the PDF and CDF for each weight vector. w = jax.nn.softmax(w_logits, axis=-1) cw = integrate_weights(w) # Interpolate into the inverse CDF. t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu()) return t_new def sample( rng, t, w_logits, num_samples, single_jitter=False, deterministic_center=False, eps=jnp.finfo(jnp.float32).eps, ): """Piecewise-Constant PDF sampling from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of samples. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. deterministic_center: bool, if False, when `rng` is None return samples that linspace the entire PDF. If True, skip the front and back of the linspace so that the centers of each PDF interval are returned. eps: float, something like numerical epsilon. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) # Draw uniform samples. if rng is None: # Match the behavior of jax.random.uniform() by spanning [0, 1-eps]. if deterministic_center: pad = 1 / (2 * num_samples) u = jnp.linspace(pad, 1.0 - pad - eps, num_samples) else: u = jnp.linspace(0, 1.0 - eps, num_samples) u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,)) else: # `u` is in [0, 1) --- it can be zero, but it can never be 1. u_max = eps + (1 - eps) / num_samples max_jitter = (1 - u_max) / (num_samples - 1) - eps d = 1 if single_jitter else num_samples u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform( rng, t.shape[:-1] + (d,), maxval=max_jitter ) return invert_cdf(u, t, w_logits) def sample_intervals( rng, t, w_logits, num_samples, single_jitter=False, domain=(-jnp.inf, jnp.inf), ): """Sample *intervals* (rather than points) from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of intervals to sample. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. domain: (minval, maxval), the range of valid values for `t`. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) if num_samples <= 1: raise ValueError(f'num_samples must be > 1, is {num_samples}.') # Sample a set of points from the step function. centers = sample( rng, t, w_logits, num_samples, single_jitter, deterministic_center=True ) # The intervals we return will span the midpoints of each adjacent sample. mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2 # Each first/last fencepost is the reflection of the first/last midpoint # around the first/last sampled center. first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1] last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:] samples = jnp.concatenate([first, mid, last], axis=-1) # We clamp to the limits of the input domain, provided by the caller. samples = jnp.clip(samples, *domain) return samples def lossfun_distortion(t, w): """Compute iint w[i] w[j] |t[i] - t[j]| di dj.""" utils.assert_valid_stepfun(t, w) # The loss incurred between all pairs of intervals. ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2 dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :]) loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1) # The loss incurred within each individual interval with itself. loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3 return loss_inter + loss_intra def weighted_percentile(t, w, ps): """Compute the weighted percentiles of a step function. w's must sum to 1.""" utils.assert_valid_stepfun(t, w) cw = integrate_weights(w) # We want to interpolate into the integrated weights according to `ps`. wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( jnp.array(ps) / 100, cw, t ) return wprctile def resample(t, tp, vp, use_avg=False): """Resample a step function defined by (tp, vp) into intervals t. Notation roughly matches jnp.interp. Resamples by summation by default. Args: t: tensor with shape (..., n+1), the endpoints to resample into. tp: tensor with shape (..., m+1), the endpoints of the step function being resampled. vp: tensor with shape (..., m), the values of the step function being resampled. use_avg: bool, if False, return the sum of the step function for each interval in `t`. If True, return the average, weighted by the width of each interval in `t`. Returns: v: tensor with shape (..., n), the values of the resampled step function. """ utils.assert_valid_stepfun(tp, vp) if use_avg: wp = jnp.diff(tp) v_numer = resample(t, tp, vp * wp, use_avg=False) v_denom = resample(t, tp, wp, use_avg=False) v = math.safe_div(v_numer, v_denom) return v acc = jnp.cumsum(vp, axis=-1) acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1) acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( t, tp, acc0 ) v = jnp.diff(acc0_resampled, axis=-1) return v def blur_and_resample_weights(tq, t, w, blur_halfwidth): """Blur the (t, w) histogram by blur_halfwidth, then resample it into tq.""" utils.assert_valid_stepfun(t, w) # Convert the histogram to a PDF. p = weight_to_pdf(t, w) # Blur the PDF step function into a piecewise linear spline PDF. t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth) # Integrate the spline PDF, then query it to get integrated weights. quad = linspline.compute_integral(t_linspline, p_linspline) acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad) # Undo the integration to get weights. wq = jnp.diff(acc_w<fim_suffix>q, axis=-1) # Fix negative values to 0, as they should never happen but may due to # numerical issues. wq = jnp.maximum(0, wq) return wq <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/geopoly.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for constructing geodesic polyhedron, which are used as a basis.""" import itertools import numpy as np def compute_sq_dist(mat0, mat1=None): """Compute the squared Euclidean distance between all pairs of columns.""" if mat1 is None: mat1 = mat0 # Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y. sq_norm0 = np.sum(mat0**2, 0) sq_norm1 = np.sum(mat1**2, 0) sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1 sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors. return sq_dist def compute_tesselation_weights(v): """Tesselate the vertices of a triangle by a factor of `v`.""" if v < 1: raise ValueError(f'v {v} must be >= 1') int_weights = [] for i in range(v + 1): for j in range(v + 1 - i): int_weights.append((i, j, v - (i + j))) int_weights = np.array(int_weights) weights = int_weights / v # Barycentric weights. return weights def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4): """Tesselate the vertices of a geodesic polyhedron. Args: base_verts: tensor of floats, the vertex coordinates of the geodesic. base_faces: tensor of ints, the indices of the vertices of base_verts that constitute eachface of the polyhedra. v: int, the factor of the tesselation (v==1 is a no-op). eps: float, a small value used to determine if two vertices are the same. Returns: verts: a tensor of floats, the coordinates of the tesselated vertices. """ if not isinstance(v, int): raise ValueError(f'v {v} must an integer') tri_weights = compute_tesselation_weights(v) verts = [] for base_face in base_faces: new_verts = np.matmul(tri_weights, base_verts[base_face, :]) new_verts /= np.s<fim_suffix>qrt(np.sum(new_verts**2, 1, keepdims=True)) verts.append(new_verts) verts = np.concatenate(verts, 0) sq_dist = compute_sq_dist(verts.T) assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist]) unique = np.unique(assignment) verts = verts[unique, :] return verts def generate_basis( base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4 ): """Generates a 3D basis by tesselating a geometric polyhedron. Args: base_shape: string, the name of the starting polyhedron, must be either 'tetrahedron', 'icosahedron' or 'octahedron'. angular_tesselation: int, the number of times to tesselate the polyhedron, must be >= 1 (a value of 1 is a no-op to the polyhedron). remove_symmetries: bool, if True then remove the symmetric basis columns, which is usually a good idea because otherwise projections onto the basis will have redundant negative copies of each other. eps: float, a small number used to determine symmetries. Returns: basis: a matrix with shape [3, n]. """ if base_shape == 'tetrahedron': verts = np.array([ (np.sqrt(8 / 9), 0, -1 / 3), (-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3), (-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3), (0, 0, 1), ]) faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)]) elif base_shape == 'icosahedron': a = (np.sqrt(5) + 1) / 2 verts = np.array([ (-1, 0, a), (1, 0, a), (-1, 0, -a), (1, 0, -a), (0, a, 1), (0, a, -1), (0, -a, 1), (0, -a, -1), (a, 1, 0), (-a, 1, 0), (a, -1, 0), (-a, -1, 0), ]) / np.sqrt(a + 2) faces = np.array([ (0, 4, 1), (0, 9, 4), (9, 5, 4), (4, 5, 8), (4, 8, 1), (8, 10, 1), (8, 3, 10), (5, 3, 8), (5, 2, 3), (2, 7, 3), (7, 10, 3), (7, 6, 10), (7, 11, 6), (11, 0, 6), (0, 1, 6), (6, 1, 10), (9, 0, 11), (9, 11, 2), (9, 2, 5), (7, 2, 11), ]) elif base_shape == 'octahedron': verts = np.array( [(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)] ) corners = np.array(list(itertools.product([-1, 1], repeat=3))) pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2) faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1) else: raise ValueError(f'base_shape {base_shape} not supported') verts = tesselate_geodesic(verts, faces, angular_tesselation) if remove_symmetries: # Remove elements of `verts` that are reflections of each other. match = compute_sq_dist(verts.T, -verts.T) < eps verts = verts[~np.any(np.triu(match), axis=0), :] basis = verts[:, ::-1] return basis <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating step functions (piecewise-constant 1D functions). We have a shared naming and dimension convention for these functions. All input/output step functions are assumed to be aligned along the last axis. `t` always indicates the x coordinates of the *endpoints* of a step function. `y` indicates unconstrained values for the *bins* of a step function `w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin values that *integrate* to <= 1. """ from internal import linspline from internal import math from internal import utils import jax import jax.numpy as jnp import numpy as np def query(tq, t, y, left=None, right=None): """Query step function (t, y) at locations tq. Edges repeat by default.""" utils.assert_valid_stepfun(t, y) # Query the step function to recover the interval value. (i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu()) # Apply boundary conditions. left = y[Ellipsis, :1] if left is None else left right = y[Ellipsis, -1:] if right is None else right yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq) return yq def weight_to_pdf(t, w): """Turn a vector of weights that sums to 1 into a PDF that integrates to 1.""" utils.assert_valid_stepfun(t, w) td = jnp.diff(t) return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td)) def pdf_to_weight(t, p): """Turn a PDF that integrates to 1 into a vector of weights that sums to 1.""" utils.assert_valid_stepfun(t, p) return p * jnp.diff(t) def integrate_weights(w): """Compute the cumulative sum of w, assuming all weight vectors sum to 1. The output's size on the last dimension is one greater than that of the input, because we're computing the integral corresponding to the endpoints of a step function, not the integral of the interior/bin values. Args: w: Tensor, which will be integrated along the last axis. This is assumed to sum to 1 along the last axis, and this function will (silently) break if that is not the case. Returns: cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1 """ cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1)) shape = cw.shape[:-1] + (1,) # Ensure that the CDF starts with exactly 0 and ends with exactly 1. cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1) return cw0 def invert_cdf(u, t, w_logits): """Invert the CDF defined by (t, w) at the points specified by u in [0, 1).""" utils.assert_valid_stepfun(t, w_logits) # Compute the PDF and CDF for each weight vector. w = jax.nn.softmax(w_logits, axis=-1) cw = integrate_weights(w) # Interpolate into the inverse CDF. t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu()) return<fim_suffix> t_new def sample( rng, t, w_logits, num_samples, single_jitter=False, deterministic_center=False, eps=jnp.finfo(jnp.float32).eps, ): """Piecewise-Constant PDF sampling from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of samples. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. deterministic_center: bool, if False, when `rng` is None return samples that linspace the entire PDF. If True, skip the front and back of the linspace so that the centers of each PDF interval are returned. eps: float, something like numerical epsilon. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) # Draw uniform samples. if rng is None: # Match the behavior of jax.random.uniform() by spanning [0, 1-eps]. if deterministic_center: pad = 1 / (2 * num_samples) u = jnp.linspace(pad, 1.0 - pad - eps, num_samples) else: u = jnp.linspace(0, 1.0 - eps, num_samples) u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,)) else: # `u` is in [0, 1) --- it can be zero, but it can never be 1. u_max = eps + (1 - eps) / num_samples max_jitter = (1 - u_max) / (num_samples - 1) - eps d = 1 if single_jitter else num_samples u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform( rng, t.shape[:-1] + (d,), maxval=max_jitter ) return invert_cdf(u, t, w_logits) def sample_intervals( rng, t, w_logits, num_samples, single_jitter=False, domain=(-jnp.inf, jnp.inf), ): """Sample *intervals* (rather than points) from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of intervals to sample. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. domain: (minval, maxval), the range of valid values for `t`. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) if num_samples <= 1: raise ValueError(f'num_samples must be > 1, is {num_samples}.') # Sample a set of points from the step function. centers = sample( rng, t, w_logits, num_samples, single_jitter, deterministic_center=True ) # The intervals we return will span the midpoints of each adjacent sample. mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2 # Each first/last fencepost is the reflection of the first/last midpoint # around the first/last sampled center. first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1] last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:] samples = jnp.concatenate([first, mid, last], axis=-1) # We clamp to the limits of the input domain, provided by the caller. samples = jnp.clip(samples, *domain) return samples def lossfun_distortion(t, w): """Compute iint w[i] w[j] |t[i] - t[j]| di dj.""" utils.assert_valid_stepfun(t, w) # The loss incurred between all pairs of intervals. ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2 dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :]) loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1) # The loss incurred within each individual interval with itself. loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3 return loss_inter + loss_intra def weighted_percentile(t, w, ps): """Compute the weighted percentiles of a step function. w's must sum to 1.""" utils.assert_valid_stepfun(t, w) cw = integrate_weights(w) # We want to interpolate into the integrated weights according to `ps`. wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( jnp.array(ps) / 100, cw, t ) return wprctile def resample(t, tp, vp, use_avg=False): """Resample a step function defined by (tp, vp) into intervals t. Notation roughly matches jnp.interp. Resamples by summation by default. Args: t: tensor with shape (..., n+1), the endpoints to resample into. tp: tensor with shape (..., m+1), the endpoints of the step function being resampled. vp: tensor with shape (..., m), the values of the step function being resampled. use_avg: bool, if False, return the sum of the step function for each interval in `t`. If True, return the average, weighted by the width of each interval in `t`. Returns: v: tensor with shape (..., n), the values of the resampled step function. """ utils.assert_valid_stepfun(tp, vp) if use_avg: wp = jnp.diff(tp) v_numer = resample(t, tp, vp * wp, use_avg=False) v_denom = resample(t, tp, wp, use_avg=False) v = math.safe_div(v_numer, v_denom) return v acc = jnp.cumsum(vp, axis=-1) acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1) acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( t, tp, acc0 ) v = jnp.diff(acc0_resampled, axis=-1) return v def blur_and_resample_weights(tq, t, w, blur_halfwidth): """Blur the (t, w) histogram by blur_halfwidth, then resample it into tq.""" utils.assert_valid_stepfun(t, w) # Convert the histogram to a PDF. p = weight_to_pdf(t, w) # Blur the PDF step function into a piecewise linear spline PDF. t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth) # Integrate the spline PDF, then query it to get integrated weights. quad = linspline.compute_integral(t_linspline, p_linspline) acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad) # Undo the integration to get weights. wq = jnp.diff(acc_wq, axis=-1) # Fix negative values to 0, as they should never happen but may due to # numerical issues. wq = jnp.maximum(0, wq) return wq <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/linspline.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions for linear splines.""" import functools from internal import math from internal import utils import jax from jax.experimental import checkify import jax.numpy as jnp def check_zero_endpoints(y): checkify.check(jnp.all(y[Ellipsis, 0] == 0), 'Splines must all start with 0.') checkify.check(jnp.all(y[Ellipsis, -1] == 0), 'Splines must all end with 0.') def query(tq, t, v): """Query linear spline (t, v) at tq.""" utils.assert_valid_linspline(t, v) interp = functools.partial(jnp.interp, left=0, right=0) return jnp.vectorize(interp, signature='(n),(m),(m)->(n)')(tq, t, v) def integrate(t, w): """Integrate (t, w) according to the trapezoid rule.""" utils.assert_valid_linspline(t, w) return 0.5 * jnp.sum((w[Ellipsis, :-1] + w[Ellipsis, 1:]) * jnp.diff(t), axis=-1) def normalize(t, w, eps=jnp.finfo(jnp.float32).eps ** 2): """Make w integrate to 1.""" utils.assert_valid_linspline(t, w) return w / jnp.maximum(eps, integrate(t, w))[Ellipsis, None] def insert_knot(ti, t, y): """Inserts knots ti into the linear spline (t, w). Assumes zero endpoints.""" utils.assert_valid_linspline(t, y) check_zero_endpoints(y) # Compute the spline value at the insertion points. yi = query(ti, t, y) # Concatenate the insertion points and values onto the end of each spline. ti_ex = jnp.broadcast_to(ti, t.shape[: -len(ti.shape)] + ti.shape) yi_ex = jnp.broadcast_to(yi, y.shape[: -len(yi.shape)] + yi.shape) to = jnp.concatenate([t, ti_ex], axis=-1) yo = jnp.concatenate([y, yi_ex], axis=-1) # Sort the spline according to t. sort_idx = jnp.argsort(to) to = jnp.take_along_axis(to, sort_idx, axis=-1) yo = jnp.take_along_axis(yo, sort_idx, axis=-1) return to, yo def clamp(t, y, minval, maxval): """Clamp (t, y) to be zero outside of t in [minval, maxval].""" utils.assert_valid_linspline(t, y) check_zero_endpoints(y) # Add in extra points at and immediately above/below the min/max vals. ti = jnp.concatenate( [ math.minus_eps(minval), minval, maxval, math.plus_eps(maxval), ], axis=-1, ) tc, yo = insert_knot(ti, t, y) # Zero the spline values outside of [minval, maxval]. yc = jnp.where(tc > maxval, 0, jnp.where(tc < minval, 0, yo)) return tc, yc def compute_integral(t, y): """Integrate a linear spline into a piecewise quadratic spline.""" utils.assert_valid_linspline(t, y) eps = jnp.finfo(jnp.float32).eps ** 2 dt = jnp.diff(t) a = jnp.diff(y) / jnp.maximum(eps, 2 * dt) b = y[Ellipsis, :-1] # The integral has an ambiguous global offset here, which we set to 0. c1 = 0.5 * jnp.cumsum(dt[Ellipsis, :-1] * (y[Ellipsis, :-2] + y[Ellipsis, 1:-1]), axis=-1) c = jnp.concatena<fim_suffix>te([jnp.zeros_like(y[Ellipsis, :1]), c1], axis=-1) # This quadratic is parameterized as: # (t - t[i])**2 * a[i] + (t - t[i]) * b[i] + c[i] return a, b, c def sorted_lookup(x, xp): """Lookup `x` at sorted locations `xp`.""" # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( functools.partial(jnp.searchsorted, side='right'), signature='(n),(m)->(m)', )(xp, x) idx0 = jnp.maximum(idx - 1, 0) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) return idx0, idx1 def interpolate_integral(tq, t, a, b, c): """Interpolate into the piecewise quadratic returned by compute_integral().""" utils.assert_valid_stepfun(t, a) utils.assert_valid_stepfun(t, b) utils.assert_valid_stepfun(t, c) # Clip to valid inputs (assumes repeating boundaries). tq = jnp.clip(tq, t[Ellipsis, :1], math.minus_eps(t[Ellipsis, -1:])) # Lookup the quadratic coefficients corresponding to each input query. idx0, _ = sorted_lookup(tq, t) # TODO(barron): It might be faster to stack (a, c, b) during generation and # do a single gather. t0 = jnp.take_along_axis(t, idx0, axis=-1) a0 = jnp.take_along_axis(a, idx0, axis=-1) b0 = jnp.take_along_axis(b, idx0, axis=-1) c0 = jnp.take_along_axis(c, idx0, axis=-1) td = tq - t0 v = a0 * td**2 + b0 * td + c0 return v def blur_stepfun(ts, ys, halfwidth): """Convolve a step function (ts, ys) with a box filter of size `halfwidth`.""" utils.assert_valid_stepfun(ts, ys) # Blur each entire step function by a single `halfwidth` value. # Dilate the t-values by at least numerical epsilon in each direction. ts_lo = ts - halfwidth ts_hi = jnp.maximum(math.plus_eps(ts), ts + halfwidth) # The difference in adjacent `y` values (zero padded) divided by the # difference in adjacent `t` values. ys0 = jnp.concatenate( [jnp.zeros_like(ys[Ellipsis, :1]), ys, jnp.zeros_like(ys[Ellipsis, :1])], axis=-1 ) dy = jnp.diff(ys0) / (ts_hi - ts_lo) # When decreasing t splat a positive second derivative, and when increasing # t splat a negative second derivative. tp = jnp.concatenate([ts_lo, ts_hi], axis=-1) dyp = jnp.concatenate([dy, -dy], axis=-1) # Sort the dilated t-values and their accompanying derivative weights. idx = jnp.argsort(tp, axis=-1) tp = jnp.take_along_axis(tp, idx, axis=-1) dyp = jnp.take_along_axis(dyp, idx[Ellipsis, :-2], axis=-1) # A ramp is the double integral of a delta function, so if we double- # integrate these derivatives you get the sum of a bunch of trapezoids. yp = jnp.cumsum(jnp.diff(tp)[Ellipsis, :-1] * jnp.cumsum(dyp, axis=-1), axis=-1) # Add in the missing first and last endpoint values, which must be zero # because we assume zero padding on `ys`. yp = jnp.concatenate( [jnp.zeros_like(yp[Ellipsis, :1]), yp, jnp.zeros_like(yp[Ellipsis, -1:])], axis=-1 ) return tp, yp <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/math.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe<fim_suffix>_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow)))) <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/coord.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating coordinate spaces and distances along rays.""" from internal import geopoly from internal import math import jax from jax import random import jax.numpy as jnp import numpy as np def contract(x): """Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077).""" # Clamping to 1 produces correct scale inside |x| < 1 x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True)) scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq z = scale * x ret<fim_suffix>urn z def inv_contract(z): """The inverse of contract().""" # Clamping to 1 produces correct scale inside |z| < 1 z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True)) inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq x = z / inv_scale return x def track_linearize(fn, mean, cov): """Apply function `fn` to a set of means and covariances, ala a Kalman filter. We can analytically transform a Gaussian parameterized by `mean` and `cov` with a function `fn` by linearizing `fn` around `mean`, and taking advantage of the fact that Covar[Ax + y] = A(Covar[x])A^T (see https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. cov: a tensor of covariances, where the last two axes are the dimensions. Returns: fn_mean: the transformed means. fn_cov: the transformed covariances. """ if (len(mean.shape) + 1) != len(cov.shape): raise ValueError('cov must be non-diagonal') fn_mean, lin_fn = jax.linearize(fn, mean) fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov)) return fn_mean, fn_cov def track_isotropic(fn, mean, scale): """Apply function `fn` to a set of means and scales, ala a Kalman filter. This is the isotropic or scalar equivalent of track_linearize, as we're still linearizing a function and tracking a Gaussian through it, but the input and output Gaussians are all isotropic and are only represented with a single `scale` value (where `scale**2` is the variance of the Gaussian). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. scale: a tensor of scales, with the same shape as means[..., -1]. Returns: fn_mean: the transformed means. fn_scale: the transformed scales. """ if mean.shape[:-1] != scale.shape: raise ValueError( f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.' ) d = mean.shape[-1] fn_mean, lin_fn = jax.linearize(fn, mean) if scale is not None: # Compute the Jacobian of fn function at the locations of each mean. jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)( jnp.broadcast_to(jnp.eye(d), mean.shape + (d,)) ) # The cube root of the determinant of the Jacobian is the geometric mean # of the eigenvalues of the Jacobian, which gives us the isotropic scaling # implied by `fn` at each mean that `scale` should be multiplied by. eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0. abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac))) # Special case d == 3 for speed's sake. fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d)) else: fn_scale = None return fn_mean, fn_scale def contract3_isoscale(x): """A fast version of track_isotropic(contract, *)'s scaling for 3D inputs.""" if x.shape[-1] != 3: raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.') norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1)) # Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq: return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq)) def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None): """Construct a bijection between metric distances and normalized distances. See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a detailed explanation. Args: fn: the function to ray distances. t_near: a tensor of near-plane distances. t_far: a tensor of far-plane distances. fn_inv: Optional, if not None then it's used as the inverse of fn(). Returns: t_to_s: a function that maps distances to normalized distances in [0, 1]. s_to_t: the inverse of t_to_s. """ if fn is None: fn_fwd = lambda x: x fn_inv = lambda x: x else: fn_fwd = fn if fn_inv is None: # A simple mapping from some functions to their inverse. inv_mapping = { 'reciprocal': jnp.reciprocal, 'log': jnp.exp, 'exp': jnp.log, 'sqrt': jnp.square, 'square': jnp.sqrt, } fn_inv = inv_mapping[fn.__name__] fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)] # Forcibly clip t to the range of valid values, to guard against inf's. t_clip = lambda t: jnp.clip(t, t_near, t_far) t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near) s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near)) return t_to_s, s_to_t def expected_sin(mean, var): """Compute the mean of sin(x), x ~ N(mean, var).""" return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value. def integrated_pos_enc(mean, var, min_deg, max_deg): """Encode `x` with sinusoids scaled by 2^[min_deg, max_deg). Args: mean: tensor, the mean coordinates to be encoded var: tensor, the variance of the coordinates to be encoded. min_deg: int, the min degree of the encoding. max_deg: int, the max degree of the encoding. Returns: encoded: jnp.ndarray, encoded variables. """ scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = mean.shape[:-1] + (-1,) scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape) scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape) return expected_sin( jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1), jnp.concatenate([scaled_var] * 2, axis=-1), ) def lift_and_diagonalize(mean, cov, basis): """Project `mean` and `cov` onto basis and diagonalize the projected cov.""" fn_mean = math.matmul(mean, basis) fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2) return fn_mean, fn_cov_diag def pos_enc(x, min_deg, max_deg, append_identity=True): """The positional encoding used by the original NeRF paper.""" scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = x.shape[:-1] + (-1,) scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c). scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c). # Note that we're not using safe_sin, unlike IPE. # (..., s*c + s*c). four_feat = jnp.sin( jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1) ) if append_identity: return jnp.concatenate([x, four_feat], axis=-1) else: return four_feat def sqrtm(mat, return_eigs=False): """Take the matrix square root of a PSD matrix [..., d, d].""" eigvec, eigval = jax.lax.linalg.eigh( mat, symmetrize_input=False, sort_eigenvalues=False ) scaling = math.safe_sqrt(eigval)[Ellipsis, None, :] sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1)) return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat def isotropize(cov, mode='accurate'): """Turn covariances into isotropic covariances with the same determinant.""" d = cov.shape[-1] if d == 1: return cov if mode == 'fast': det = jnp.linalg.det(cov) diag_val = det ** (1 / d) is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det) elif mode == 'accurate': log_det = jnp.linalg.slogdet(cov)[1] diag_val = jnp.exp(log_det / d) is_invalid = ~jnp.isfinite(log_det) else: raise ValueError(f'mode={mode} not implemented.') cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None] # Guard against NaN outputs when `det` is super small. Note that this does not # guard against NaN gradients! cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso) return cov_iso def construct_perp_basis(directions): """Construct a perpendicular basis for each 3-vector in `directions`.""" if directions.shape[-1] != 3: raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D') # To generate a vector perpendicular to `directions`, we take a cross-product # with an arbitrary vector [0, 0, 1]. cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0])) # In the rare case that `directions` is very close to [0, 0, 1], we compute an # alternate cross-product with [1, 1, 1] to use instead. cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0])) use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1) cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a) # Crossing `directions` with `cross1` gives us our 3rd vector. cross2 = jnp.cross(directions, cross1) # Normalize vectors before returning them. normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True)) return normalize(cross1), normalize(cross2) def hexify(rng, *, origins, directions, radii, tdist): """Produce hexagon-shaped samples from ray segments.""" # Construct a base set of angles, by linspacing [0, 2pi] in a specific order. # This is one of two orderings of angles that doesn't induce any anisotropy # into the sample covariance of the multisample coordinates. Any rotation and # mirroring along the z-axis of this ordering is also valid. # There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1]. # This seems to work less well though likely because of the strong correlation # between adjacent angles. thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1]) # Lift the angles to the size of the rays. sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas)) thetas = jnp.broadcast_to(thetas, sz) if rng is not None: # Randomly reverse the order of half of the hexes. key, rng = random.split(rng) flip = random.bernoulli(key, shape=sz[:-1]) thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) # Rotate each hex by some random amount. key, rng = random.split(rng) thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None] else: # If we're deterministic, flip and shift every other hex by 30 degrees. flip = jnp.arange(thetas.shape[-2]) % 2 thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) thetas += (flip * jnp.pi / 6)[Ellipsis, None] # TODO(barron): Plumb through the dx/dy frame for the original ray in the # image plane, to avoid the need of this. perp_axis1, perp_axis2 = construct_perp_basis(directions) # Grab each t-interval's midpoint and half-width. t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:] s = (t0 + t1) / 2 d = (t1 - t0) / 2 # Compute the length along the ray for each multisample, using mip-NeRF math. cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * ( (t1**2 + 2 * s**2)[Ellipsis, None] + (3 / np.sqrt(7)) * (np.arange(6) * (2 / 5) - 1) * math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None] ) # Compute the offset from the ray for each multisample. perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz # Go from ray coordinate to world coordinates. cx = perp_mag * jnp.cos(thetas) cy = perp_mag * jnp.sin(thetas) control = ( origins[Ellipsis, None, None, :] + perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None] + perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None] + directions[Ellipsis, None, None, :] * cz[Ellipsis, None] ) return control, perp_mag def unscented_transform(mean, cov, basis, axis=0): """Construct "sigma points" along `axis` from each mean and covariance.""" d = cov.shape[-1] mean_ex = jnp.expand_dims(mean, axis) if basis == 'mean': # This effectively disables the unscented transform. return mean_ex if basis.startswith('random_'): num_random = int(basis.split('_')[-1]) # TODO(barron): use a non-fixed random seed? noise = random.multivariate_normal( random.PRNGKey(0), jnp.zeros_like(mean), cov, (num_random,) + mean.shape[:-1], ) control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis) return control sqrtm_cov = sqrtm(cov) if any([ basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron'] ]): # Use tessellated regular polyhedra vertices (and vec(0)) as control points. if d != 3: raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.') base_shape, angular_tesselation = basis.split('_') transform = geopoly.generate_basis( base_shape, int(angular_tesselation), remove_symmetries=False ).T transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1) transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None] control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) elif basis == 'julier': # The most basic symmetric unscented transformation from the original paper, # which yields 2*d+1 control points. offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis) control = jnp.concatenate( [mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis ) elif basis == 'menegaz': # A compact unscented transformation from # folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf # which yields d+1 control points. if d == 3: # A hand-optimized version of the d==3 case. sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True) offsets = jnp.concatenate( [-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1 ) control = mean_ex + jnp.moveaxis(offsets, -1, axis) else: transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d # == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1)) transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1) control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) else: raise ValueError(f'basis={basis} not implemented.') return control def compute_control_points( means, covs, rays, tdist, rng, unscented_mip_basis, unscented_scale_mult, ): """Wrapper to compute unscented control points for the MLP class.""" if unscented_mip_basis == 'hexify': control, perp_mag = hexify( rng, origins=rays.origins, directions=rays.directions, radii=rays.radii, tdist=tdist, ) else: # Use a normal unscented transformation. control = unscented_transform( means, covs, basis=unscented_mip_basis, axis=-2, ) if unscented_scale_mult > 0: if rays is None: raise SyntaxError( 'Rays are required as input if unscented_scale_mult > 0.' ) # Mimic the math used by hexify to produce comparable scales. t_recon = jnp.sum( (control - rays.origins[Ellipsis, None, None, :]) * rays.directions[Ellipsis, None, None, :], axis=-1, ) perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon else: perp_mag = None return control, perp_mag <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/math.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) val<fim_suffix>s = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow)))) <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for reflection directions and directional encodings.""" import math from internal import math as math_lib import jax.numpy as jnp import numpy as np def reflect(viewdirs, normals): """Reflect view directions about normals. The reflection of a vector v about a unit vector n is a vector u such that dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two equations is u = 2 dot(n, v) n - v. Args: viewdirs: [..., 3] array of view directions. normals: [..., 3] array of normal directions (assumed to be unit vectors). Returns: [..., 3] array of reflection directions. """ return ( 2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals - viewdirs ) def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps): """Normalize x to unit length along last axis. Normalizing vectors is surprisingly tricky, because you have to address the case where the denominator in the normalization is tiny or zero, in which case gradients will explode. For this reason, we perform two normalizations: in the forward pass, we clamp the denominator with ~1e-40, but in the backward pass we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the output of this function is unit norm (unless x is very very small) while preventing exploding gradients. Args: x: The array of values to normalize. grad_eps: The value to clip the squared norm by before division in the backward pass. Returns: A normalized array x / ||x||, normalized along the last axis. """ tiny = jnp.finfo(jnp.float32).tiny grad_eps = jnp.maximum(tiny, grad_eps) denom_sq = jnp.sum(x**2, axis=-1, keepdims=True) normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq)) normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq)) # Use `normal_val` in the forward pass but `normal_grad` in the backward pass. normal = math_lib.override_gradient(normal_val, normal_grad) return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal) def compute_weighted_mae(weights, normals, normals_gt): """Compute weighted mean angular error, assuming normals are unit length.""" angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1)) return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum()) def generalized_binomial_coeff(a, k): """Compute generalized binomial coefficients.""" return np.prod(a - np.arange(k)) / math.factorial(k) def assoc_legendre_coeff(l, m, k): """Compute associated Legendre polynomial coefficients. Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the (l, m)th associated Legendre polynomial, P_l^m(cos(theta)). Args: l: associated Legendre polynomial degree. m: associated Legendre polynomial order. k: power of cos(theta). Returns: A float, the coefficient of the term corresponding to the inputs. """ return ( (-1) ** m * 2**l * math.factorial(l) / math.factorial(k) / math.factorial(l - k - m) * generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l) ) def sph_harm_coeff(l, m, k): """Compute spherical harmonic coefficients.""" return np.sqrt( (2.0 * l + 1.0) * math.factorial(l - m) / (4.0 * np.pi * math.factorial(l + m)) ) * assoc_legendre_coeff(l, m, k) def get_ml_array(deg_view): """Create a list with all pairs of (l, m) values to use in the encoding.""" ml_list = [] for i in range(deg_view): l = 2**i # Only use nonnegative m values, later splitting real and imaginary parts. for m in range(l + 1): ml_list.append((m, l)) # Convert list into a numpy array. ml_array = np.array(ml_list).T return ml_array def generate_ide_fn(deg_view): """Generate integrated directional encoding (IDE) function. This function returns a function that computes the integrated directional encoding from Equations 6-8 of arxiv.org/abs/2112.03907. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating integrated directional encoding. Raises: ValueError: if deg_view is larger than 5. """ if deg_view > 5: raise ValueError('Only deg_view of at most 5 is numerically stable.') ml_array = get_ml_array(deg_view) l_max = 2 ** (deg_view - 1) # Create a matrix corresponding to ml_array holding all coefficients, which, # when multiplied (from the right) by the z coordinate Vandermonde matrix, # results in the z component of the encoding. mat = np.zeros((l_max + 1, ml_array.shape[1])) for i, (m, l) in enumerate(ml_array.T): for k in range(l - m + 1): mat[k, i] = sph_harm_coeff(l, m, k) def integrated_dir_enc_fn(xyz, kappa_inv): """Function returning integrated directional encoding (IDE). Args: xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at. kappa_inv: [..., 1] reciprocal of the concentration parameter of the von Mises-Fisher distribution. Returns: An array with the resulting IDE. """ x = xyz[Ellipsis, 0:1] y = xyz[Ellipsis, 1:2] z = xyz[Ellipsis, 2:3] # Compute z Vandermonde matrix. vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1) # Compute x+iy Vandermonde matrix. vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1) # Get spherical harmonics. sph_harms = vmxy * math_lib.matmul(vmz, mat) # Apply attenuation function using the von Mises-Fisher distribution # concentration parameter, kappa. sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1) ide = sph_harms * jnp.exp(-sigm<fim_suffix>a * kappa_inv) # Split into real and imaginary parts and return return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1) return integrated_dir_enc_fn def generate_dir_enc_fn(deg_view): """Generate directional encoding (DE) function. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating directional encoding. """ integrated_dir_enc_fn = generate_ide_fn(deg_view) def dir_enc_fn(xyz): """Function returning directional encoding (DE).""" return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1])) return dir_enc_fn def orientation_loss(w, n, v): """Orientation loss on weights `w`, normals `n`, and -view directions `v`.""" n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1) return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1)) <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for reflection directions and directional encodings.""" import math from internal import math as math_lib import jax.numpy as jnp import numpy as np def reflect(viewdirs, normals): """Reflect view directions about normals. The reflection of a vector v about a unit vector n is a vector u such that dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two equations is u = 2 dot(n, v) n - v. Args: viewdirs: [..., 3] array of view directions. normals: [..., 3] array of normal directions (assumed to be unit vectors). Returns: [..., 3] array of reflection directions. """ return ( 2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals - viewdirs ) def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps): """Normalize x to unit length along last axis. Normalizing vectors is surprisingly tricky, because you have to address the case where the denominator in the normalization is tiny or zero, in which case gradients will explode. For this reason, we perform two normalizations: in the forward pass, we clamp the denominator with ~1e-40, but in the backward pass we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the output of this function is unit norm (unless x is very very small) while preventing exploding gradients. Args: x: The array of values to normalize. grad_eps: The value to clip the squared norm by before division in the backward pass. Returns: A normalized array x / ||x||, normalized along the last axis. """ tiny = jnp.finfo(jnp.float32).tiny grad_eps = jnp.maximum(tiny, grad_eps) denom_sq = jnp.sum(x**2, axis=-1, keepdims=True) normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq)) normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq)) # Use `normal_val` in the forward pass but `normal_grad` in the backward pass. normal = math_lib.override_gradient(normal_val, normal_grad) return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal) def compute_weighted_mae(weights, normals, normals_gt): """Compute weighted mean angular error, assuming normals are unit length.""" angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1)) return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum()) def generalized_binomial_coeff(a, k): """Compute generalized binomial coefficients.""" return np.prod(a - np.arange(k)) / math.factorial(k) def assoc_legendre_coeff(l, m, k): """Compute associated Legendre polynomial coefficients. Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the (l, m)th associated Legendre polynomial, P_l^m(cos(theta)). Args: l: associated Legendre polynomial degree. m: associated Legendre polynomial order. k: power of cos(theta). Returns: A float, the coefficient of the term corresponding to the inputs. """ return ( (-1) ** m * 2**l * math.factorial(l) / math.factorial(k) / math.factorial(l - k - m) * generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l) ) def sph_harm_coeff(l, m, k): """Compute spherical harmonic coefficients.""" return np.sqrt( (2.0 * l + 1.0) * math.factorial(l - m) / (4.0 * np.pi * math.factorial(l + m)) ) * assoc_legendre_coeff(l, m, k) def get_ml_array(deg_view): """Create a list with all pairs of (l, m) values to use in the encoding.""" ml_list = [] for i in range(deg_view): l = 2**i # Only use nonnegative m values, later splitting real and imaginary parts. for m in range(l + 1): ml_list.append((m, l)) # Convert list into a numpy array. ml_array = np.array(ml_list).T return ml_array def generate_ide_fn(deg_view): """Generate integrated directional encoding (IDE) function. This function returns a function that computes the integrated directional encoding from Equations 6-8 of arxiv.org/abs/2112.03907. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating integrated directional encoding. Raises: ValueError: if deg_view is larger than 5. """ if deg_view > 5: raise ValueError('Only deg_view of at most 5 is numerically stable.') ml_array = get_ml_array(deg_view) l_max = 2 ** (deg_view - 1) # Create a matrix corresponding to ml_array holding all coefficients, which, # when multiplied (from the right) by the z coordinate Vandermonde matrix, # results in the z component of the encoding. mat = np.zeros((l_max + 1, ml_array.shape[1])) for i, (m, l) in enumerate(ml_array.T): for k in range(l - m + 1): mat[k, i] = sph_harm_coeff(l, m, k) def integrated_dir_enc_fn(xyz, kappa_inv): """Function returning integrated directional encoding (IDE). Args: xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at. kappa_inv: [..., 1] reciprocal of the concentration parameter of the von Mises-Fisher distribution. Returns: An array with the resulting IDE. """ x = xyz[Ellipsis, 0:1] y = xyz[Ellipsis, 1:2] z = xyz[Ellipsis, 2:3] # Compute z Vandermonde matrix. vmz = jnp.concatenate([z**i for i in range(mat.shape[<fim_suffix>0])], axis=-1) # Compute x+iy Vandermonde matrix. vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1) # Get spherical harmonics. sph_harms = vmxy * math_lib.matmul(vmz, mat) # Apply attenuation function using the von Mises-Fisher distribution # concentration parameter, kappa. sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1) ide = sph_harms * jnp.exp(-sigma * kappa_inv) # Split into real and imaginary parts and return return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1) return integrated_dir_enc_fn def generate_dir_enc_fn(deg_view): """Generate directional encoding (DE) function. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating directional encoding. """ integrated_dir_enc_fn = generate_ide_fn(deg_view) def dir_enc_fn(xyz): """Function returning directional encoding (DE).""" return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1])) return dir_enc_fn def orientation_loss(w, n, v): """Orientation loss on weights `w`, normals `n`, and -view directions `v`.""" n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1) return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1)) <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/coord.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating coordinate spaces and distances along rays.""" from internal import geopoly from internal import math import jax from jax import random import jax.numpy as jnp import numpy as np def contract(x): """Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077).""" # Clamping to 1 produces correct scale inside |x| < 1 x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True)) scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq z = scale * x return z def inv_contract(z): """The inverse of contract().""" # Clamping to 1 produces correct scale inside |z| < 1 z_mag_sq = jnp.maximum(1, jn<fim_suffix>p.sum(z**2, axis=-1, keepdims=True)) inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq x = z / inv_scale return x def track_linearize(fn, mean, cov): """Apply function `fn` to a set of means and covariances, ala a Kalman filter. We can analytically transform a Gaussian parameterized by `mean` and `cov` with a function `fn` by linearizing `fn` around `mean`, and taking advantage of the fact that Covar[Ax + y] = A(Covar[x])A^T (see https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. cov: a tensor of covariances, where the last two axes are the dimensions. Returns: fn_mean: the transformed means. fn_cov: the transformed covariances. """ if (len(mean.shape) + 1) != len(cov.shape): raise ValueError('cov must be non-diagonal') fn_mean, lin_fn = jax.linearize(fn, mean) fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov)) return fn_mean, fn_cov def track_isotropic(fn, mean, scale): """Apply function `fn` to a set of means and scales, ala a Kalman filter. This is the isotropic or scalar equivalent of track_linearize, as we're still linearizing a function and tracking a Gaussian through it, but the input and output Gaussians are all isotropic and are only represented with a single `scale` value (where `scale**2` is the variance of the Gaussian). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. scale: a tensor of scales, with the same shape as means[..., -1]. Returns: fn_mean: the transformed means. fn_scale: the transformed scales. """ if mean.shape[:-1] != scale.shape: raise ValueError( f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.' ) d = mean.shape[-1] fn_mean, lin_fn = jax.linearize(fn, mean) if scale is not None: # Compute the Jacobian of fn function at the locations of each mean. jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)( jnp.broadcast_to(jnp.eye(d), mean.shape + (d,)) ) # The cube root of the determinant of the Jacobian is the geometric mean # of the eigenvalues of the Jacobian, which gives us the isotropic scaling # implied by `fn` at each mean that `scale` should be multiplied by. eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0. abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac))) # Special case d == 3 for speed's sake. fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d)) else: fn_scale = None return fn_mean, fn_scale def contract3_isoscale(x): """A fast version of track_isotropic(contract, *)'s scaling for 3D inputs.""" if x.shape[-1] != 3: raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.') norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1)) # Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq: return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq)) def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None): """Construct a bijection between metric distances and normalized distances. See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a detailed explanation. Args: fn: the function to ray distances. t_near: a tensor of near-plane distances. t_far: a tensor of far-plane distances. fn_inv: Optional, if not None then it's used as the inverse of fn(). Returns: t_to_s: a function that maps distances to normalized distances in [0, 1]. s_to_t: the inverse of t_to_s. """ if fn is None: fn_fwd = lambda x: x fn_inv = lambda x: x else: fn_fwd = fn if fn_inv is None: # A simple mapping from some functions to their inverse. inv_mapping = { 'reciprocal': jnp.reciprocal, 'log': jnp.exp, 'exp': jnp.log, 'sqrt': jnp.square, 'square': jnp.sqrt, } fn_inv = inv_mapping[fn.__name__] fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)] # Forcibly clip t to the range of valid values, to guard against inf's. t_clip = lambda t: jnp.clip(t, t_near, t_far) t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near) s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near)) return t_to_s, s_to_t def expected_sin(mean, var): """Compute the mean of sin(x), x ~ N(mean, var).""" return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value. def integrated_pos_enc(mean, var, min_deg, max_deg): """Encode `x` with sinusoids scaled by 2^[min_deg, max_deg). Args: mean: tensor, the mean coordinates to be encoded var: tensor, the variance of the coordinates to be encoded. min_deg: int, the min degree of the encoding. max_deg: int, the max degree of the encoding. Returns: encoded: jnp.ndarray, encoded variables. """ scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = mean.shape[:-1] + (-1,) scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape) scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape) return expected_sin( jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1), jnp.concatenate([scaled_var] * 2, axis=-1), ) def lift_and_diagonalize(mean, cov, basis): """Project `mean` and `cov` onto basis and diagonalize the projected cov.""" fn_mean = math.matmul(mean, basis) fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2) return fn_mean, fn_cov_diag def pos_enc(x, min_deg, max_deg, append_identity=True): """The positional encoding used by the original NeRF paper.""" scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = x.shape[:-1] + (-1,) scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c). scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c). # Note that we're not using safe_sin, unlike IPE. # (..., s*c + s*c). four_feat = jnp.sin( jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1) ) if append_identity: return jnp.concatenate([x, four_feat], axis=-1) else: return four_feat def sqrtm(mat, return_eigs=False): """Take the matrix square root of a PSD matrix [..., d, d].""" eigvec, eigval = jax.lax.linalg.eigh( mat, symmetrize_input=False, sort_eigenvalues=False ) scaling = math.safe_sqrt(eigval)[Ellipsis, None, :] sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1)) return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat def isotropize(cov, mode='accurate'): """Turn covariances into isotropic covariances with the same determinant.""" d = cov.shape[-1] if d == 1: return cov if mode == 'fast': det = jnp.linalg.det(cov) diag_val = det ** (1 / d) is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det) elif mode == 'accurate': log_det = jnp.linalg.slogdet(cov)[1] diag_val = jnp.exp(log_det / d) is_invalid = ~jnp.isfinite(log_det) else: raise ValueError(f'mode={mode} not implemented.') cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None] # Guard against NaN outputs when `det` is super small. Note that this does not # guard against NaN gradients! cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso) return cov_iso def construct_perp_basis(directions): """Construct a perpendicular basis for each 3-vector in `directions`.""" if directions.shape[-1] != 3: raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D') # To generate a vector perpendicular to `directions`, we take a cross-product # with an arbitrary vector [0, 0, 1]. cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0])) # In the rare case that `directions` is very close to [0, 0, 1], we compute an # alternate cross-product with [1, 1, 1] to use instead. cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0])) use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1) cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a) # Crossing `directions` with `cross1` gives us our 3rd vector. cross2 = jnp.cross(directions, cross1) # Normalize vectors before returning them. normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True)) return normalize(cross1), normalize(cross2) def hexify(rng, *, origins, directions, radii, tdist): """Produce hexagon-shaped samples from ray segments.""" # Construct a base set of angles, by linspacing [0, 2pi] in a specific order. # This is one of two orderings of angles that doesn't induce any anisotropy # into the sample covariance of the multisample coordinates. Any rotation and # mirroring along the z-axis of this ordering is also valid. # There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1]. # This seems to work less well though likely because of the strong correlation # between adjacent angles. thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1]) # Lift the angles to the size of the rays. sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas)) thetas = jnp.broadcast_to(thetas, sz) if rng is not None: # Randomly reverse the order of half of the hexes. key, rng = random.split(rng) flip = random.bernoulli(key, shape=sz[:-1]) thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) # Rotate each hex by some random amount. key, rng = random.split(rng) thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None] else: # If we're deterministic, flip and shift every other hex by 30 degrees. flip = jnp.arange(thetas.shape[-2]) % 2 thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) thetas += (flip * jnp.pi / 6)[Ellipsis, None] # TODO(barron): Plumb through the dx/dy frame for the original ray in the # image plane, to avoid the need of this. perp_axis1, perp_axis2 = construct_perp_basis(directions) # Grab each t-interval's midpoint and half-width. t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:] s = (t0 + t1) / 2 d = (t1 - t0) / 2 # Compute the length along the ray for each multisample, using mip-NeRF math. cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * ( (t1**2 + 2 * s**2)[Ellipsis, None] + (3 / np.sqrt(7)) * (np.arange(6) * (2 / 5) - 1) * math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None] ) # Compute the offset from the ray for each multisample. perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz # Go from ray coordinate to world coordinates. cx = perp_mag * jnp.cos(thetas) cy = perp_mag * jnp.sin(thetas) control = ( origins[Ellipsis, None, None, :] + perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None] + perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None] + directions[Ellipsis, None, None, :] * cz[Ellipsis, None] ) return control, perp_mag def unscented_transform(mean, cov, basis, axis=0): """Construct "sigma points" along `axis` from each mean and covariance.""" d = cov.shape[-1] mean_ex = jnp.expand_dims(mean, axis) if basis == 'mean': # This effectively disables the unscented transform. return mean_ex if basis.startswith('random_'): num_random = int(basis.split('_')[-1]) # TODO(barron): use a non-fixed random seed? noise = random.multivariate_normal( random.PRNGKey(0), jnp.zeros_like(mean), cov, (num_random,) + mean.shape[:-1], ) control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis) return control sqrtm_cov = sqrtm(cov) if any([ basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron'] ]): # Use tessellated regular polyhedra vertices (and vec(0)) as control points. if d != 3: raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.') base_shape, angular_tesselation = basis.split('_') transform = geopoly.generate_basis( base_shape, int(angular_tesselation), remove_symmetries=False ).T transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1) transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None] control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) elif basis == 'julier': # The most basic symmetric unscented transformation from the original paper, # which yields 2*d+1 control points. offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis) control = jnp.concatenate( [mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis ) elif basis == 'menegaz': # A compact unscented transformation from # folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf # which yields d+1 control points. if d == 3: # A hand-optimized version of the d==3 case. sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True) offsets = jnp.concatenate( [-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1 ) control = mean_ex + jnp.moveaxis(offsets, -1, axis) else: transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d # == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1)) transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1) control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) else: raise ValueError(f'basis={basis} not implemented.') return control def compute_control_points( means, covs, rays, tdist, rng, unscented_mip_basis, unscented_scale_mult, ): """Wrapper to compute unscented control points for the MLP class.""" if unscented_mip_basis == 'hexify': control, perp_mag = hexify( rng, origins=rays.origins, directions=rays.directions, radii=rays.radii, tdist=tdist, ) else: # Use a normal unscented transformation. control = unscented_transform( means, covs, basis=unscented_mip_basis, axis=-2, ) if unscented_scale_mult > 0: if rays is None: raise SyntaxError( 'Rays are required as input if unscented_scale_mult > 0.' ) # Mimic the math used by hexify to produce comparable scales. t_recon = jnp.sum( (control - rays.origins[Ellipsis, None, None, :]) * rays.directions[Ellipsis, None, None, :], axis=-1, ) perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon else: perp_mag = None return control, perp_mag <fim_middle>
null
STATEMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" import concurrent import enum import os import queue import threading import time from typing import Any, Callable, Iterable, Optional, TypeVar, Union from absl import logging import flax import jax from jax import random import jax.numpy as jnp import numpy as np _Array = Union[np.ndarray, jnp.ndarray] @flax.struct.dataclass class Rays: """All tensors must have the same num_dims and first n-1 dims must match. This dataclass contains spatially meaningful quantities associated with the ray that can be calculated by the function casting the ray, as well as all metadata necessary for the ray to be rendered by the Model class. """ origins: Optional[_Array] = None directions: Optional[_Array] = None viewdirs: Optional[_Array] = None radii: Optional[_Array] = None imageplane: Optional[_Array] = None pixels: Optional[_Array] = None lossmult: Optional[_Array] = None near: Optional[_Array] = None far: Optional[_Array] = None cam_idx: Optional[_Array] = None exposure_idx: Optional[_Array] = None exposure_values: Optional[_Array] = None device_idx: Optional[_Array] = None def generate_random_rays( rng, n, origin_lo, origin_hi, radius_lo, radius_hi, near_lo, near_hi, far_lo, far_hi, include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): """Generate a random Rays datastructure.""" key, rng = random.split(rng) origins = random.uniform( key, shape=[n, 3], minval=origin_lo, maxval=origin_hi ) key, rng = random.split(rng) directions = random.normal(key, shape=[n, 3]) directions /= jnp.sqrt( jnp.maximum( jnp.finfo(jnp.float32).tiny, jnp.sum(directions**2, axis=-1, keepdims=True), ) ) viewdirs = directions key, rng = random.split(rng) radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi) key, rng = random.split(rng) near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi) key, rng = random.split(rng) far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi) imageplane = jnp.zeros([n, 2]) lossmult = jnp.zeros([n, 1]) key, rng = random.split(rng) pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024) int_scalar = jnp.int32(jnp.zeros([n, 1])) exposure_kwargs = {} if include_exposure_idx: exposure_kwargs['exposure_idx'] = int_scalar if include_exposure_values: exposure_kwargs['exposure_values'] = jnp.zeros([n, 1]) if include_device_idx: exposure_kwargs['device_idx'] = int_scalar random_rays = Rays( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, pixels=pixels, lossmult=lossmult, near=near, far=far, cam_idx=int_scalar, **exposure_kwargs, ) return random_rays # Dummy Rays object that can be used to initialize NeRF model. def dummy_rays( include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): return generate_random_rays( random.PRNGKey(0), n=100, origin_lo=-1.5, origin_hi=1.5, radius_lo=1e-5, radius_hi=1e-3, near_lo=0.0, near_hi=1.0, far_lo=10, far_hi=10000, include_exposure_idx=include_exposure_idx, include_exposure_values=include_exposure_values, include_device_idx=include_device_idx, ) @flax.struct.dataclass class Batch: """Data batch for NeRF training or testing. This dataclass contains rays and also per-pixel data that is necessary for computing the loss term or evaluating metrics but NOT necessary for rendering. """ rays: Rays rgb: Optional[_Array] = None disps: Optional[_Array] = None normals: Optional[_Array] = None alphas: Optional[_Array] = None masks: Optional[_Array] = None class DataSplit(enum.Enum): """Dataset split.""" TRAIN = 'train' TEST = 'test' class BatchingMethod(enum.Enum): """Draw rays randomly from a single image or all images, in each batch.""" ALL_IMAGES = 'all_images' SINGLE_IMAGE = 'single_image' def open_file(pth, mode='r'): return open(pth, mode=mode) def file_exists(pth): return os.path.exists(pth) def listdir(pth): return os.listdir(pth) def isdir(pth): return os.path.isdir(pth) def makedirs(pth): if not file_exists(pth): os.makedirs(pth) def device_is_tpu(): return jax.local_devices()[0].platform == 'tpu' def shard(xs): """Split data into shards for multiple devices along the first dimension.""" return jax.tree_util.tree_map( lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs ) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) if padding > 0: y = y[:-padding] return y def load_npy(pth): """Load an numpy array cast to float32.""" with open_file(pth, 'rb') as f: x = np.load(f).astype(np.float32) return x def assert_valid_stepfun(t, y): """Assert that step function (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1] + 1: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a step function.' ) def assert_valid_linspline(t, y): """Assert that piecewise linear spline (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1]: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.' ) _FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]]) def iterate_in_separate_thread( queue_size = 3, ): """Decorator factory that iterates a function in a separate thread. Args: queue_size: Keep at most queue_size elements in memory. Returns: Decorator that will iterate a function in a separate thread. """ def decorator( fn, ): def result_fn(*args, **kwargs): results_queue = queue.Queue(queue_size) populating_data = True populating_data_lock = threading.Lock() def thread_fn(): # Mark has_data as a variable that's outside of thread_fn # Otherwise, `populating_data = True` creates a local variable nonlocal populating_data try: for item in fn(*args, **kwargs): results_queue.put(item) finally: # Set populating_data to False regardless of exceptions to stop # iterations with populating_data_lock: populating_data = False # Use executor + futures instead of Thread to propagate exceptions with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: thread_fn_future = executor.submit(thread_fn) while True: with populating_data_lock: if not populating_data and results_queue.empty(): break get_start = time.time() try: # Set timeout to allow for exceptions to be propagated. next_value = results_queue.get(timeout=1.0) except queue.Empty: continue logging.info('Got data in %0.3fs', time.time() - get_start) yield next_value # T<fim_suffix>hread exception will be raised here thread_fn_future.result() return result_fn return decorator <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating step functions (piecewise-constant 1D functions). We have a shared naming and dimension convention for these functions. All input/output step functions are assumed to be aligned along the last axis. `t` always indicates the x coordinates of the *endpoints* of a step function. `y` indicates unconstrained values for the *bins* of a step function `w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin values that *integrate* to <= 1. """ from internal import linspline from internal import math from internal import utils import jax import jax.numpy as jnp import numpy as np def query(tq, t, y, left=None, right=None): """Query step function (t, y) at locations tq. Edges repeat by default.""" utils.assert_valid_stepfun(t, y) # Query the step function to recover the interval value. (i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu()) # Apply boundary conditions. left = y[Ellipsis, :1] if left is None else left right = y[Ellipsis, -1:] if right is None else right yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq) return yq def weight_to_pdf(t, w): """Turn a vector of weights that sums to 1 into a PDF that integrates to 1.""" utils.assert_valid_stepfun(t, w) td = jnp.diff(t) return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td)) def pdf_to_weight(t, p): """Turn a PDF that integrates to 1 into a vector of weights that sums to 1.""" utils.assert_valid_stepfun(t, p) return p * jnp.diff(t) def integrate_weights(w): """Compute the cumulative sum of w, assuming all weight vectors sum to 1. The output's size on the last dimension is one greater than that of the input, because we're computing the integral corresponding to the endpoints of a step function, not the integral of the interior/bin values. Args: w: Tensor, which will be integrated along the last axis. This is assumed to sum to 1 along the last axis, and this function will (silently) break if that is not the case. Returns: cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1 """ cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1)) shape = cw.shape[:-1] + (1,) # Ensure that the CDF starts with exactly 0 and ends with exactly 1. cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1) return cw0 def invert_cdf(u, t, w_logits): """Invert the CDF defined by (t, w) at the points specified by u in [0, 1).""" utils.assert_valid_stepfun(t, w_logits) # Compute the PDF and CDF for each weight vector. w = jax.nn.softmax(w_logits, axis=-1) cw = integrate_weights(w) # Interpolate into the inverse CDF. t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu()) return t_new def sample( rng, t, w_logits, num_samples, single_jitter=False, deterministic_center=False, eps=jnp.finfo(jnp.float32).eps, ): """Piecewise-Constant PDF sampling from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of samples. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. deterministic_center: bool, if False, when `rng` is None return samples that linspace the entire PDF. If True, skip the front and back of the linspace so that the centers of each PDF interval are returned. eps: float, something like numerical epsilon. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) # Draw uniform samples. if rng is None: # Match the behavior of jax.random.uniform() by spanning [0, 1-eps]. if deterministic_center: pad = 1 / (2 * num_samples) u = jnp.linspace(pad, 1.0 - pad - eps, num_samples) else: u = jnp.linspace(0, 1.0 - eps, num_samples) u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,)) else: # `u` is in [0, 1) --- it can be zero, but it can never be 1. u_max = eps + (1 - eps) / num_samples max_jitter = (1 - u_max) / (num_samples - 1) - eps d = 1 if single_jitter else num_samples u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform( rng, t.shape[:-1] + (d,), maxval=max_jitter ) return invert_cdf(u, t, w_logits) def sample_intervals( rng, t, w_logits, num_samples, single_jitter=False, domain=(-jnp.inf, jnp.inf), ): """Sample *intervals* (rather than points) from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of intervals to sample. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. domain: (minval, maxval), the range of valid values for `t`. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) if num_samples <= 1: raise ValueError(f'num_samples must be > 1, is {num_samples}.') # Sample a set of points from the step fu<fim_suffix>nction. centers = sample( rng, t, w_logits, num_samples, single_jitter, deterministic_center=True ) # The intervals we return will span the midpoints of each adjacent sample. mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2 # Each first/last fencepost is the reflection of the first/last midpoint # around the first/last sampled center. first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1] last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:] samples = jnp.concatenate([first, mid, last], axis=-1) # We clamp to the limits of the input domain, provided by the caller. samples = jnp.clip(samples, *domain) return samples def lossfun_distortion(t, w): """Compute iint w[i] w[j] |t[i] - t[j]| di dj.""" utils.assert_valid_stepfun(t, w) # The loss incurred between all pairs of intervals. ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2 dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :]) loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1) # The loss incurred within each individual interval with itself. loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3 return loss_inter + loss_intra def weighted_percentile(t, w, ps): """Compute the weighted percentiles of a step function. w's must sum to 1.""" utils.assert_valid_stepfun(t, w) cw = integrate_weights(w) # We want to interpolate into the integrated weights according to `ps`. wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( jnp.array(ps) / 100, cw, t ) return wprctile def resample(t, tp, vp, use_avg=False): """Resample a step function defined by (tp, vp) into intervals t. Notation roughly matches jnp.interp. Resamples by summation by default. Args: t: tensor with shape (..., n+1), the endpoints to resample into. tp: tensor with shape (..., m+1), the endpoints of the step function being resampled. vp: tensor with shape (..., m), the values of the step function being resampled. use_avg: bool, if False, return the sum of the step function for each interval in `t`. If True, return the average, weighted by the width of each interval in `t`. Returns: v: tensor with shape (..., n), the values of the resampled step function. """ utils.assert_valid_stepfun(tp, vp) if use_avg: wp = jnp.diff(tp) v_numer = resample(t, tp, vp * wp, use_avg=False) v_denom = resample(t, tp, wp, use_avg=False) v = math.safe_div(v_numer, v_denom) return v acc = jnp.cumsum(vp, axis=-1) acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1) acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( t, tp, acc0 ) v = jnp.diff(acc0_resampled, axis=-1) return v def blur_and_resample_weights(tq, t, w, blur_halfwidth): """Blur the (t, w) histogram by blur_halfwidth, then resample it into tq.""" utils.assert_valid_stepfun(t, w) # Convert the histogram to a PDF. p = weight_to_pdf(t, w) # Blur the PDF step function into a piecewise linear spline PDF. t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth) # Integrate the spline PDF, then query it to get integrated weights. quad = linspline.compute_integral(t_linspline, p_linspline) acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad) # Undo the integration to get weights. wq = jnp.diff(acc_wq, axis=-1) # Fix negative values to 0, as they should never happen but may due to # numerical issues. wq = jnp.maximum(0, wq) return wq <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for reflection directions and directional encodings.""" import math from internal import math as math_lib import jax.numpy as jnp import numpy as np def reflect(viewdirs, normals): """Reflect view directions about normals. The reflection of a vector v about a unit vector n is a vector u such that dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two equations is u = 2 dot(n, v) n - v. Args: viewdirs: [..., 3] array of view directions. normals: [..., 3] array of normal directions (assumed to be unit vectors). Returns: [..., 3] array of reflection directions. """ return ( 2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals - viewdirs ) def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps): """Normalize x to unit length along last axis. Normalizing vectors is surprisingly tricky, because you have to address the case where the denominator in the normalization is tiny or zero, in which case gradients will explode. For this reason, we perform two normalizations: in the forward pass, we clamp the denominator with ~1e-40, but in the backward pass we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the output of this function is unit norm (unless x is very very small) while preventing exploding gradients. Args: x: The array of values to normalize. grad_eps: The value to clip the squared norm by before division in the backward pass. Returns: A normalized array x / ||x||, normalized along the last axis. """ tiny = jnp.finfo(jnp.float32).tiny grad_eps = jnp.maximum(tiny, grad_eps) denom_sq = jnp.sum(x**2, axis=-1, keepdims=True) normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq)) normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq)) # Use `normal_val` in the forward pass but `normal_grad` in the backward pass. normal = math_lib.override_gradient(normal_val, normal_grad) return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal) def compute_weighted_mae(weights, normals, normals_gt): """Compute weighted mean angular error, assuming normals are unit length.""" angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1)) return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum()) def generalized_binomial_coeff(a, k): """Compute generalized binomial coefficients.""" return np.prod(a - np.arange(k)) / math.factorial(k) def assoc_legendre_coeff(l, m, k): """Compute associated Legendre polynomial coefficients. Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the (l, m)th associated Legendre polynomial, P_l^m(cos(theta)). Args: l: associated Legendre polynomial degree. m: associated Legendre polynomial order. k: power of cos(theta). Returns: A float, the coefficient of the term corresponding to the inputs. """ return ( (-1) ** m * 2**l * math.factorial(l) / math.factorial(k) / math.factorial(l - k - m) * generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l) ) def sph_harm_coeff(l, m, k): """Compute spherical harmonic coefficients.""" return np.sqrt( (2.0 * l + 1.0) * math.factorial(l - m) / (4.0 * np.pi * math.factorial(l + m)) ) * assoc_legendre_coeff(l, m, k) def get_ml_array(deg_view): """Create a list with all pairs of (l, m) values to use in the encoding.""" ml_list = [] for i in range(deg_view): l = 2**i # Only use nonnegative m values, later splitting real and imaginary parts. for m in range(l + 1): ml_list.append((m, l)) # Convert list into a numpy array. ml_array = np.array(ml_list).T return ml_array def generate_ide_fn(deg_view): """Generate integrated directional encoding (IDE) function. This function returns a function that computes the integrated directional encoding from Equations 6-8 of arxiv.org/abs/2112.03907. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating integrated directional encoding. Raises: ValueError: if deg_view is larger than 5. """ if deg_view > 5: raise ValueError('Only deg_view of at most 5 is numerically stable.') ml_array = get_ml_array(deg_view) l_max = 2 ** (deg_view - 1) # Create a matrix corresponding to ml_array holding all coefficients, which, # when multiplied (from the right) by the z coordinate Vandermonde matrix, # results in the z component of the encoding. mat = np.zeros((l_max + 1, ml_array.shape[1])) for i, (m, l) in enumerate(ml_array.T): for k in range(l - m + 1): mat[k, i] = sph_harm_coeff(l, m, k) def integrated_dir_enc_fn(xyz, kappa_inv): """Function returning integrated directional encoding (IDE). Args: xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at. kappa_inv: [..., 1] reciprocal of the concentration parameter of the von Mises-Fisher distribution. Returns: An array with the resulting IDE. """ x = xyz[Ellipsis, 0:1] y = xyz[Ellipsis, 1:2] z = xyz[Ellipsis, 2:3] # Compute z Vandermonde matrix. vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1) # Compute x+iy Vandermonde matrix. vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1) # Get spherical harmonics. sph_harms = vmxy * math_lib.matmul(vmz, mat) # Apply attenuation function using the von Mises-Fisher distribution # concentration paramete<fim_suffix>r, kappa. sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1) ide = sph_harms * jnp.exp(-sigma * kappa_inv) # Split into real and imaginary parts and return return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1) return integrated_dir_enc_fn def generate_dir_enc_fn(deg_view): """Generate directional encoding (DE) function. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating directional encoding. """ integrated_dir_enc_fn = generate_ide_fn(deg_view) def dir_enc_fn(xyz): """Function returning directional encoding (DE).""" return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1])) return dir_enc_fn def orientation_loss(w, n, v): """Orientation loss on weights `w`, normals `n`, and -view directions `v`.""" n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1) return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1)) <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating step functions (piecewise-constant 1D functions). We have a shared naming and dimension convention for these functions. All input/output step functions are assumed to be aligned along the last axis. `t` always indicates the x coordinates of the *endpoints* of a step function. `y` indicates unconstrained values for the *bins* of a step function `w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin values that *integrate* to <= 1. """ from internal import linspline from internal import math from internal import utils import jax import jax.numpy as jnp import numpy as np def query(tq, t, y, left=None, right=None): """Query step function (t, y) at locations tq. Edges repeat by default.""" utils.assert_valid_stepfun(t, y) # Query the step function to recover the interval value. (i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu()) # Apply boundary conditions. left = y[Ellipsis, :1] if left is None else left right = y[Ellipsis, -1:] if right is None else right yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq) return yq def weight_to_pdf(t, w): """Turn a vector of weights that sums to 1 into a PDF that integrates to 1.""" utils.assert_valid_stepfun(t, w) td = jnp.diff(t) return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td)) def pdf_to_weight(t, p): """Turn a PDF that integrates to 1 into a vector of weights that sums to 1.""" utils.assert_valid_stepfun(t, p) return p * jnp.diff(t) def integrate_weights(w): """Compute the cumulative sum of w, assuming all weight vectors sum to 1. The output's size on the last dimension is one greater than that of the input, because we're computing the integral corresponding to the endpoints of a step function, not the integral of the interior/bin values. Args: w: Tensor, which will be integrated along the last axis. This is assumed to sum to 1 along the last axis, and this function will (silently) break if that is not the case. Returns: cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1 """ cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1)) shape = cw.shape[:-1] + (1,) # Ensure that the CDF starts with exactly 0 and ends with exactly 1. cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1) return cw0 def invert_cdf(u, t, w_logits): """Invert the CDF defined by (t, w) at the points specified by u in [0, 1).""" utils.assert_valid_stepfun(t, w_logits) # Compute the PDF and CDF for each weight vector. w = jax.nn.softmax(w_logits, axis=-1) cw = integrate_weights(w) # Interpolate into the inverse CDF. t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu()) return t_new def sample( rng, t, w_logits, num_samples, single_jitter=False, deterministic_center=False, eps=jnp.finfo(jnp.float32).eps, ): """Piecewise-Constant PDF sampling from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of samples. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. deterministic_center: bool, if False, when `rng` is None return samples that linspace the entire PDF. If True, skip the front and back of the linspace so that the centers of each PDF interval are returned. eps: float, something like numerical epsilon. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) # Draw uniform samples. if rng is None: # Match the behavior of jax.random.uniform() by spanning [0, 1-eps]. if deterministic_center: pad = 1 / (2 * num_samples) u = jnp.linspace(pad, 1.0 - pad - eps, num_samples) else: u = jnp.linspace(0, 1.0 - eps, num_samples) u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,)) else: # `u` is in [0, 1) --- it can be zero, but it can never be 1. u_max = eps + (1 - eps) / num_samples max_jitter = (1 - u_max) / (num_samples - 1) - eps d = 1 if single_jitter else num_samples u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform( rng, t.shape[:-1] + (d,), maxval=max_jitter ) return invert_cdf(u, t, w_logits) def sample_intervals( rng, t, w_logits, num_samples, single_jitter=False, domain=(-jnp.inf, jnp.inf), ): """Sample *intervals* (rather than points) from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of intervals to sample. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. domain: (minval, maxval), the range of valid values for `t`. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) if num_samples <= 1: raise ValueError(f'num_samples must be > 1, is {num_samples}.') # Sample a set of points from the step function. centers = sample( rng, t, w_logits, num_samples, single_jitter, deterministic_center=True ) # The intervals we return will span the midpoints of each adjacent sample. mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2 # Each first/last fencepost is the reflection of the first/last midpoint # around the first/last sampled center. first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1] last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:] samples = jnp.concatenate([first, mid, last], axis=-1) # We clamp to the limits of the input domain, provided by the caller. samples = jnp.clip(samples, *domain) return samples def lossfun_distortion(t, w): """Compute iint w[i] w[j] |t[i] - t[j]| di dj.""" utils.assert_valid_stepfun(t, w) # The loss incurred between all pairs of intervals. ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2 dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :]) loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1) # The loss incurred within each individual interval <fim_suffix>with itself. loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3 return loss_inter + loss_intra def weighted_percentile(t, w, ps): """Compute the weighted percentiles of a step function. w's must sum to 1.""" utils.assert_valid_stepfun(t, w) cw = integrate_weights(w) # We want to interpolate into the integrated weights according to `ps`. wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( jnp.array(ps) / 100, cw, t ) return wprctile def resample(t, tp, vp, use_avg=False): """Resample a step function defined by (tp, vp) into intervals t. Notation roughly matches jnp.interp. Resamples by summation by default. Args: t: tensor with shape (..., n+1), the endpoints to resample into. tp: tensor with shape (..., m+1), the endpoints of the step function being resampled. vp: tensor with shape (..., m), the values of the step function being resampled. use_avg: bool, if False, return the sum of the step function for each interval in `t`. If True, return the average, weighted by the width of each interval in `t`. Returns: v: tensor with shape (..., n), the values of the resampled step function. """ utils.assert_valid_stepfun(tp, vp) if use_avg: wp = jnp.diff(tp) v_numer = resample(t, tp, vp * wp, use_avg=False) v_denom = resample(t, tp, wp, use_avg=False) v = math.safe_div(v_numer, v_denom) return v acc = jnp.cumsum(vp, axis=-1) acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1) acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( t, tp, acc0 ) v = jnp.diff(acc0_resampled, axis=-1) return v def blur_and_resample_weights(tq, t, w, blur_halfwidth): """Blur the (t, w) histogram by blur_halfwidth, then resample it into tq.""" utils.assert_valid_stepfun(t, w) # Convert the histogram to a PDF. p = weight_to_pdf(t, w) # Blur the PDF step function into a piecewise linear spline PDF. t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth) # Integrate the spline PDF, then query it to get integrated weights. quad = linspline.compute_integral(t_linspline, p_linspline) acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad) # Undo the integration to get weights. wq = jnp.diff(acc_wq, axis=-1) # Fix negative values to 0, as they should never happen but may due to # numerical issues. wq = jnp.maximum(0, wq) return wq <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/linspline.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Helper functions for linear splines.""" import functools from internal import math from internal import utils import jax from jax.experimental import checkify import jax.numpy as jnp def check_zero_endpoints(y): checkify.check(jnp.all(y[Ellipsis, 0] == 0), 'Splines must all start with 0.') checkify.check(jnp.all(y[Ellipsis, -1] == 0), 'Splines must all end with 0.') def query(tq, t, v): """Query linear spline (t, v) at tq.""" utils.assert_valid_linspline(t, v) interp = functools.partial(jnp.interp, left=0, right=0) return jnp.vectorize(interp, signature='(n),(m),(m)->(n)')(tq, t, v) def integrate(t, w): """Integrate (t, w) according to the trapezoid rule.""" utils.assert_valid_linspline(t, w) return 0.5 * jnp.sum((w[Ellipsis, :-1] + w[Ellipsis, 1:]) * jnp.diff(t), axis=-1) def normalize(t, w, eps=jnp.finfo(jnp.float32).eps ** 2): """Make w integrate to 1.""" utils.assert_valid_linspline(t, w) return w / jnp.maximum(eps, integrate(t, w))[Ellipsis, None] def insert_knot(ti, t, y): """Inserts knots ti into the linear spline (t, w). Assumes zero endpoints.""" utils.assert_valid_linspline(t, y) check_zero_endpoints(y) # Compute the spline value at the insertion points. yi = query(ti, t, y) # Concatenate the insertion points and values onto the end of each spline. ti_ex = jnp.broadcast_to(ti, t.shape[: -len(ti.shape)] + ti.shape) yi_ex = jnp.broadcast_to(yi, y.shape[: -len(yi.shape)] + yi.shape) to = jnp.concatenate([t, ti_ex], axis=-1) yo = jnp.concatenate([y, yi_ex], axis=-1) # Sort the spline according to t. sort_idx = jnp.argsort(to) to = jnp.take_along_axis(to, sort_idx, axis=-1) yo = jnp.take_along_axis(yo, sort_idx, axis=-1) return to, yo def clamp(t, y, minval, maxval): """Clamp (t, y) to be zero outside of t in [minval, maxval].""" utils.assert_valid_linspline(t, y) check_zero_endpoints(y) # Add in extra points at and immediately above/below the min/max vals. ti = jnp.concatenate( [ math.minus_eps(minval), minval, maxval, math.plus_eps(maxval), ], axis=-1, ) tc, yo = insert_knot(ti, t, y) # Zero the spline values outside of [minval, maxval]. yc = jnp.where(tc > maxval, 0, jnp.where(tc < minval, 0, yo)) return tc, yc def compute_integral(t, y): """Integrate a linear spline into a piecewise quadratic spline.""" utils.assert_valid_linspline(t, y) eps = jnp.finfo(jnp.float32).eps ** 2 dt = jnp.diff(t) a = jnp.diff(y) / jnp.maximum(eps, 2 * dt) b = y[Ellipsis, :-1] # The integral has an ambiguous global offset here, which we set to 0. c1 = 0.5 * jnp.cumsum(dt[Ellipsis, :-1] * (y[Ellipsis, :-2] + y[Ellipsis, 1:-1]), axis=-1) c = jnp.concatenate([jnp.zeros_like(y[Ellipsis, :1]), c1], axis=-1) # This quadratic is parameterized as: # (t - t[i])**2 * a[i] + (t - t[i]) * b[i] + c[i] return a, b, c def sorted_lookup(x, xp): """Lookup `x` at sorted locations `xp`.""" # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( functools.partial(jnp.searchsorted, side='right'), signature='(n),(m)->(m)', )(xp, x) idx0 = jnp.maximum(idx - 1, 0) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) return idx0, idx1 def interpolate_integral(tq, t, a, b, c): """Interpolate into the piecewise quadratic returned by compute_integral().""" utils.assert_valid_stepfun(t, a) utils.assert_valid_stepfun(t, b) utils.assert_valid_stepfun(t, c) # Clip to valid inputs (assumes repeating boundaries). tq = jnp.clip(tq, t[Ellipsis, :1], math.minus_eps(t[Ellipsis, -1:])) # Lookup the quadratic coefficients corresponding to each input query. idx0, _ = sorted_lookup(tq, t) # TODO(barron): It might be faster to stack (a, c, b) during generation and # do a single gather. t0 = jnp.take_along_axis(t, idx0, axis=-1) a0 = jnp.take_along_axis(a, idx0, axis=-1) b0 = jnp.take_along_axis(b, idx0, axis=-1) c0 = jnp.take_along_axis(c, idx0, axis=-1) td = tq - t0 v = a0 * td**2 + b0 * td + c0 return v def blur_stepfun(ts, ys, halfwidth): """Convolve a step function (ts, ys) with a box filter of size `halfwidth`.""" utils.assert_valid_stepfun(ts, ys) # Blur each entire step function by a single `halfwidth` value. # Dilate the t-values by at least n<fim_suffix>umerical epsilon in each direction. ts_lo = ts - halfwidth ts_hi = jnp.maximum(math.plus_eps(ts), ts + halfwidth) # The difference in adjacent `y` values (zero padded) divided by the # difference in adjacent `t` values. ys0 = jnp.concatenate( [jnp.zeros_like(ys[Ellipsis, :1]), ys, jnp.zeros_like(ys[Ellipsis, :1])], axis=-1 ) dy = jnp.diff(ys0) / (ts_hi - ts_lo) # When decreasing t splat a positive second derivative, and when increasing # t splat a negative second derivative. tp = jnp.concatenate([ts_lo, ts_hi], axis=-1) dyp = jnp.concatenate([dy, -dy], axis=-1) # Sort the dilated t-values and their accompanying derivative weights. idx = jnp.argsort(tp, axis=-1) tp = jnp.take_along_axis(tp, idx, axis=-1) dyp = jnp.take_along_axis(dyp, idx[Ellipsis, :-2], axis=-1) # A ramp is the double integral of a delta function, so if we double- # integrate these derivatives you get the sum of a bunch of trapezoids. yp = jnp.cumsum(jnp.diff(tp)[Ellipsis, :-1] * jnp.cumsum(dyp, axis=-1), axis=-1) # Add in the missing first and last endpoint values, which must be zero # because we assume zero padding on `ys`. yp = jnp.concatenate( [jnp.zeros_like(yp[Ellipsis, :1]), yp, jnp.zeros_like(yp[Ellipsis, -1:])], axis=-1 ) return tp, yp <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/geopoly.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for constructing geodesic polyhedron, which are used as a basis.""" import itertools import numpy as np def compute_sq_dist(mat0, mat1=None): """Compute the squared Euclidean distance between all pairs of columns.""" if mat1 is None: mat1 = mat0 # Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y. sq_norm0 = np.sum(mat0**2, 0) sq_norm1 = np.sum(mat1**2, 0) sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1 sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors. return sq_dist def compute_tesselation_weights(v): """Tesselate the vertices of a triangle by a factor of `v`.""" if v < 1: raise ValueError(f'v {v} must be >= 1') int_weights = [] for i in range(v + 1): for j in range(v + 1 - i): int_weights.append((i, j, v - (i + j))) int_weights = np.array(int_weights) weights = int_weights / v # Barycentric w<fim_suffix>eights. return weights def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4): """Tesselate the vertices of a geodesic polyhedron. Args: base_verts: tensor of floats, the vertex coordinates of the geodesic. base_faces: tensor of ints, the indices of the vertices of base_verts that constitute eachface of the polyhedra. v: int, the factor of the tesselation (v==1 is a no-op). eps: float, a small value used to determine if two vertices are the same. Returns: verts: a tensor of floats, the coordinates of the tesselated vertices. """ if not isinstance(v, int): raise ValueError(f'v {v} must an integer') tri_weights = compute_tesselation_weights(v) verts = [] for base_face in base_faces: new_verts = np.matmul(tri_weights, base_verts[base_face, :]) new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True)) verts.append(new_verts) verts = np.concatenate(verts, 0) sq_dist = compute_sq_dist(verts.T) assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist]) unique = np.unique(assignment) verts = verts[unique, :] return verts def generate_basis( base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4 ): """Generates a 3D basis by tesselating a geometric polyhedron. Args: base_shape: string, the name of the starting polyhedron, must be either 'tetrahedron', 'icosahedron' or 'octahedron'. angular_tesselation: int, the number of times to tesselate the polyhedron, must be >= 1 (a value of 1 is a no-op to the polyhedron). remove_symmetries: bool, if True then remove the symmetric basis columns, which is usually a good idea because otherwise projections onto the basis will have redundant negative copies of each other. eps: float, a small number used to determine symmetries. Returns: basis: a matrix with shape [3, n]. """ if base_shape == 'tetrahedron': verts = np.array([ (np.sqrt(8 / 9), 0, -1 / 3), (-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3), (-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3), (0, 0, 1), ]) faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)]) elif base_shape == 'icosahedron': a = (np.sqrt(5) + 1) / 2 verts = np.array([ (-1, 0, a), (1, 0, a), (-1, 0, -a), (1, 0, -a), (0, a, 1), (0, a, -1), (0, -a, 1), (0, -a, -1), (a, 1, 0), (-a, 1, 0), (a, -1, 0), (-a, -1, 0), ]) / np.sqrt(a + 2) faces = np.array([ (0, 4, 1), (0, 9, 4), (9, 5, 4), (4, 5, 8), (4, 8, 1), (8, 10, 1), (8, 3, 10), (5, 3, 8), (5, 2, 3), (2, 7, 3), (7, 10, 3), (7, 6, 10), (7, 11, 6), (11, 0, 6), (0, 1, 6), (6, 1, 10), (9, 0, 11), (9, 11, 2), (9, 2, 5), (7, 2, 11), ]) elif base_shape == 'octahedron': verts = np.array( [(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)] ) corners = np.array(list(itertools.product([-1, 1], repeat=3))) pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2) faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1) else: raise ValueError(f'base_shape {base_shape} not supported') verts = tesselate_geodesic(verts, faces, angular_tesselation) if remove_symmetries: # Remove elements of `verts` that are reflections of each other. match = compute_sq_dist(verts.T, -verts.T) < eps verts = verts[~np.any(np.triu(match), axis=0), :] basis = verts[:, ::-1] return basis <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/coord.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating coordinate spaces and distances along rays.""" from internal import geopoly from internal import math import jax from jax import random import jax.numpy as jnp import numpy as np def contract(x): """Contracts points towards the origin (Eq 10 of arxiv.org/abs/2111.12077).""" # Clamping to 1 produces correct scale inside |x| < 1 x_mag_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1, keepdims=True)) scale = (2 * jnp.sqrt(x_mag_sq) - 1) / x_mag_sq z = scale * x return z def inv_contract(z): """The inverse of contract().""" # Clamping to 1 produces correct scale inside |z| < 1 z_mag_sq = jnp.maximum(1, jnp.sum(z**2, axis=-1, keepdims=True)) inv_scale = 2 * jnp.sqrt(z_mag_sq) - z_mag_sq x = z / inv_scale return x def track_linearize(fn, mean, cov): """Apply function `fn` to a set of means and covariances, ala a Kalman filter. We can analytically transform a Gaussian parameterized by `mean` and `cov` with a function `fn` by linearizing `fn` around `mean`, and taking advantage of the fact that Covar[Ax + y] = A(Covar[x])A^T (see https://cs.nyu.edu/~roweis/notes/gaussid.pdf for details). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. cov: a tensor of covariances, where the last two axes are the dimensions. Returns: fn_mean: the transformed means. fn_cov: the transformed covariances. """ if (len(mean.shape) + 1) != len(cov.shape): raise ValueError('cov must be non-diagonal') fn_mean, lin_fn = jax.linearize(fn, mean) fn_cov = jax.vmap(lin_fn, -1, -2)(jax.vmap(lin_fn, -1, -2)(cov)) return fn_mean, fn_cov def track_isotropic(fn, mean, scale): """Apply function `fn` to a set of means and scales, ala a Kalman filter. This is the isotropic or scalar equivalent of track_linearize, as we're still linearizing a function and tracking a Gaussian through it, but the input and output Gaussians are all isotropic and are only represented with a single `scale` value (where `scale**2` is the variance of the Gaussian). Args: fn: A function that can be applied to `mean`. mean: a tensor of Gaussian means, where the last axis is the dimension. scale: a tensor of scales, with the same shape as means[..., -1]. Returns: fn_mean: the transformed means. fn_scale: the transformed scales. """ if mean.shape[:-1] != scale.shape: raise ValueError( f'mean.shape[:-1] {mean.shape}[:-1] != scale.shape {scale.shape}.' ) d = mean.shape[-1] fn_mean, lin_fn = jax.linearize(fn, mean) if scale is not None: # Compute the Jacobian of fn function at the locations of each mean. jac = jax.vmap(lin_fn, in_axes=-1, out_axes=-1)( jnp.broadcast_to(jnp.eye(d), mean.shape + (d,)) ) # The cube root of the determinant of the Jacobian is the geometric mean # of the eigenvalues of the Jacobian, which gives us the isotropic scaling # implied by `fn` at each mean that `scale` should be multiplied by. eps = jnp.finfo(jnp.float32).tiny # Guard against an inf gradient at 0. abs_det = jnp.maximum(eps, jnp.abs(jnp.linalg.det(jac))) # Special case d == 3 for speed's sake. fn_scale = scale * (jnp.cbrt(abs_det) if d == 3 else abs_det ** (1 / d)) else: fn_scale = None return fn_mean, fn_scale def contract3_isoscale(x): """A fast version of track_isotropic(contract, *)'s scaling for 3D inputs.""" if x.shape[-1] != 3: raise ValueError(f'Inputs must be 3D, are {x.shape[-1]}D.') norm_sq = jnp.maximum(1, jnp.sum(x**2, axis=-1)) # Equivalent to cbrt((2 * sqrt(norm_sq) - 1) ** 2) / norm_sq: return jnp.exp(2 / 3 * jnp.log(2 * jnp.sqrt(norm_sq) - 1) - jnp.log(norm_sq)) def construct_ray_warps(fn, t_near, t_far, *, fn_inv=None): """Construct a bijection between metric distances and normalized distances. See the text around Equation 11 in https://arxiv.org/abs/2111.12077 for a detailed explanation. Args: fn: the function to ray distances. t_near: a tensor of near-plane distances. t_far: a tensor of far-plane distances. fn_inv: Optional, if not None then it's used as the inverse of fn(). Returns: t_to_s: a function that maps distances to normalized distances in [0, 1]. s_to_t: the inverse of t_to_s. """ if fn is None: fn_fwd = lambda x: x fn_inv = lambda x: x else: fn_fwd = fn if fn_inv is None: # A simple mapping from some functions to their inverse. inv_mapping = { 'reciprocal': jnp.reciprocal, 'log': jnp.exp, 'exp': jnp.log, 'sqrt': jnp.square, 'square': jnp.sqrt, } fn_inv = inv_mapping[fn.__name__] fn_t_near, fn_t_far = [fn_fwd(t) for t in (t_near, t_far)] # Forcibly clip t to the range of valid values, to guard against inf's. t_clip = lambda t: jnp.clip(t, t_near, t_far) t_to_s = lambda t: (fn_fwd(t_clip(t)) - fn_t_near) / (fn_t_far - fn_t_near) s_to_t = lambda s: t_clip(fn_inv(s * fn_t_far + (1 - s) * fn_t_near)) return t_to_s, s_to_t def expected_sin(mean, var): """Compute the mean of sin(x), x ~ N(mean, var).""" return jnp.exp(-0.5 * var) * math.safe_sin(mean) # large var -> small value. def integrated_pos_enc(mean, var, min_deg, max_deg): """Encode `x` with sinusoids scaled by 2^[min_deg, max_deg). Args: mean: tensor, the mean coordinates to be encoded var: tensor, the variance of the coordinates to be encoded. min_deg: int, the min degree of the encoding. max_deg: int, the max degree of the encoding. Returns: encoded: jnp.ndarray, encoded variables. """ scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = mean.shape[:-1] + (-1,) scaled_mean = jnp.reshape(mean[Ellipsis, None, :] * scales[:, None], shape) scaled_var = jnp.reshape(var[Ellipsis, None, :] * scales[:, None] ** 2, shape) return expected_sin( jnp.concatenate([scaled_mean, scaled_mean + 0.5 * jnp.pi], axis=-1), jnp.concatenate([scaled_var] * 2, axis=-1), ) def lift_and_diagonalize(mean, cov, basis): """Project `mean` and `cov` onto basis and diagonalize the projected cov.""" fn_mean = math.matmul(mean, basis) fn_cov_diag = jnp.sum(basis * math.matmul(cov, basis), axis=-2) return fn_mean, fn_cov_diag def pos_enc(x, min_deg, max_deg, append_identity=True): """The positional encoding used by the original NeRF paper.""" scales = 2.0 ** jnp.arange(min_deg, max_deg) shape = x.shape[:-1] + (-1,) scaled_x = x[Ellipsis, None, :] * scales[:, None] # (..., s, c). scaled_x = jnp.reshape(scaled_x, shape) # (..., s*c). # Note that we're not using safe_sin, unlike IPE. # (..., s*c + s*c). four_feat = jnp.sin( jnp.concatenate([scaled_x, scaled_x + 0.5 * jnp.pi], axis=-1) ) if append_identity: return jnp.concatenate([x, four_feat], axis=-1) else: return four_feat def sqrtm(mat, return_eigs=False): """Take the matrix square root of a PSD matrix [..., d, d].""" eigvec, eigval = jax.lax.linalg.eigh( mat, symmetrize_input=False, sort_eigenvalues=False ) scaling = math.safe_sqrt(eigval)[Ellipsis, None, :] sqrtm_mat = math.matmul(eigvec * scaling, jnp.moveaxis(eigvec, -2, -1)) return (sqrtm_mat, (eigvec, eigval)) if return_eigs else sqrtm_mat def isotropize(cov, mode='accurate'): """Turn covariances into isotropic covariances with the same determinant.""" d = cov.shape[-1] if d == 1: return cov if mode == 'fast': det = jnp.linalg.det(cov) diag_val = det ** (1 / d) is_invalid = (det <= jnp.finfo(jnp.float32).tiny) | ~jnp.isfinite(det) elif mode == 'accurate': log_det = jnp.linalg.slogdet(cov)[1] diag_val = jnp.exp(log_det / d) is_invalid = ~jnp.isfinite(log_det) else: raise ValueError(f'mode={mode} not implemented.') cov_iso = jnp.eye(d) * diag_val[Ellipsis, None, None] # Guard against NaN outputs when `det` is super small. Note that this does n<fim_suffix>ot # guard against NaN gradients! cov_iso = jnp.where(is_invalid[Ellipsis, None, None], jnp.zeros_like(cov), cov_iso) return cov_iso def construct_perp_basis(directions): """Construct a perpendicular basis for each 3-vector in `directions`.""" if directions.shape[-1] != 3: raise ValueError(f'directions must be 3D, but is {directions.shape[-1]}D') # To generate a vector perpendicular to `directions`, we take a cross-product # with an arbitrary vector [0, 0, 1]. cross1a = jnp.cross(directions, np.array([0.0, 0.0, 1.0])) # In the rare case that `directions` is very close to [0, 0, 1], we compute an # alternate cross-product with [1, 1, 1] to use instead. cross1b = jnp.cross(directions, np.array([1.0, 1.0, 1.0])) use_b = jnp.all(jnp.abs(cross1a) < np.finfo(np.float32).eps, axis=-1) cross1 = jnp.where(use_b[Ellipsis, None], cross1b, cross1a) # Crossing `directions` with `cross1` gives us our 3rd vector. cross2 = jnp.cross(directions, cross1) # Normalize vectors before returning them. normalize = lambda z: z / jnp.sqrt(jnp.sum(z**2, axis=-1, keepdims=True)) return normalize(cross1), normalize(cross2) def hexify(rng, *, origins, directions, radii, tdist): """Produce hexagon-shaped samples from ray segments.""" # Construct a base set of angles, by linspacing [0, 2pi] in a specific order. # This is one of two orderings of angles that doesn't induce any anisotropy # into the sample covariance of the multisample coordinates. Any rotation and # mirroring along the z-axis of this ordering is also valid. # There exists one alternative valid ordering, which is [0, 3, 2, 5, 4, 1]. # This seems to work less well though likely because of the strong correlation # between adjacent angles. thetas = (np.pi / 3) * np.array([0, 2, 4, 3, 5, 1]) # Lift the angles to the size of the rays. sz = tdist.shape[:-1] + (tdist.shape[-1] - 1, len(thetas)) thetas = jnp.broadcast_to(thetas, sz) if rng is not None: # Randomly reverse the order of half of the hexes. key, rng = random.split(rng) flip = random.bernoulli(key, shape=sz[:-1]) thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) # Rotate each hex by some random amount. key, rng = random.split(rng) thetas += (2 * jnp.pi) * random.uniform(key, shape=sz[:-1])[Ellipsis, None] else: # If we're deterministic, flip and shift every other hex by 30 degrees. flip = jnp.arange(thetas.shape[-2]) % 2 thetas = jnp.where(flip[Ellipsis, None], thetas[Ellipsis, ::-1], thetas) thetas += (flip * jnp.pi / 6)[Ellipsis, None] # TODO(barron): Plumb through the dx/dy frame for the original ray in the # image plane, to avoid the need of this. perp_axis1, perp_axis2 = construct_perp_basis(directions) # Grab each t-interval's midpoint and half-width. t0, t1 = tdist[Ellipsis, :-1], tdist[Ellipsis, 1:] s = (t0 + t1) / 2 d = (t1 - t0) / 2 # Compute the length along the ray for each multisample, using mip-NeRF math. cz = t0[Ellipsis, None] + math.safe_div(d, (d**2 + 3 * s**2))[Ellipsis, None] * ( (t1**2 + 2 * s**2)[Ellipsis, None] + (3 / np.sqrt(7)) * (np.arange(6) * (2 / 5) - 1) * math.safe_sqrt(((d**2 - s**2) ** 2 + 4 * s**4))[Ellipsis, None] ) # Compute the offset from the ray for each multisample. perp_mag = jnp.sqrt(0.5) * radii[Ellipsis, None, :] * cz # Go from ray coordinate to world coordinates. cx = perp_mag * jnp.cos(thetas) cy = perp_mag * jnp.sin(thetas) control = ( origins[Ellipsis, None, None, :] + perp_axis1[Ellipsis, None, None, :] * cx[Ellipsis, None] + perp_axis2[Ellipsis, None, None, :] * cy[Ellipsis, None] + directions[Ellipsis, None, None, :] * cz[Ellipsis, None] ) return control, perp_mag def unscented_transform(mean, cov, basis, axis=0): """Construct "sigma points" along `axis` from each mean and covariance.""" d = cov.shape[-1] mean_ex = jnp.expand_dims(mean, axis) if basis == 'mean': # This effectively disables the unscented transform. return mean_ex if basis.startswith('random_'): num_random = int(basis.split('_')[-1]) # TODO(barron): use a non-fixed random seed? noise = random.multivariate_normal( random.PRNGKey(0), jnp.zeros_like(mean), cov, (num_random,) + mean.shape[:-1], ) control = mean_ex + jnp.moveaxis(jnp.nan_to_num(noise), 0, axis) return control sqrtm_cov = sqrtm(cov) if any([ basis.startswith(x) for x in ['tetrahedron', 'icosahedron', 'octahedron'] ]): # Use tessellated regular polyhedra vertices (and vec(0)) as control points. if d != 3: raise ValueError(f'Input is {d}D, but polyhedra are only defined for 3D.') base_shape, angular_tesselation = basis.split('_') transform = geopoly.generate_basis( base_shape, int(angular_tesselation), remove_symmetries=False ).T transform1 = np.concatenate([np.zeros((d, 1)), transform], axis=-1) transform1 /= np.sqrt(np.mean(transform1**2, axis=1))[:, None] control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) elif basis == 'julier': # The most basic symmetric unscented transformation from the original paper, # which yields 2*d+1 control points. offsets = np.sqrt(d + 0.5) * jnp.moveaxis(sqrtm_cov, -1, axis) control = jnp.concatenate( [mean_ex, mean_ex + offsets, mean_ex - offsets], axis=axis ) elif basis == 'menegaz': # A compact unscented transformation from # folk.ntnu.no/skoge/prost/proceedings/cdc-ecc-2011/data/papers/2263.pdf # which yields d+1 control points. if d == 3: # A hand-optimized version of the d==3 case. sqrtm_cov_sum = jnp.sum(sqrtm_cov, axis=-1, keepdims=True) offsets = jnp.concatenate( [-sqrtm_cov_sum, 2 * sqrtm_cov - sqrtm_cov_sum / 3], axis=-1 ) control = mean_ex + jnp.moveaxis(offsets, -1, axis) else: transform = np.sqrt(d + 1) * np.eye(d) + (1 - np.sqrt(d + 1)) / d # == sqrt((d+1)) * sqrtm(eye(d) - 1/(d+1)) transform1 = np.concatenate([-np.ones((d, 1)), transform], axis=-1) control = mean_ex + jnp.moveaxis( math.matmul(sqrtm_cov, transform1), -1, axis ) else: raise ValueError(f'basis={basis} not implemented.') return control def compute_control_points( means, covs, rays, tdist, rng, unscented_mip_basis, unscented_scale_mult, ): """Wrapper to compute unscented control points for the MLP class.""" if unscented_mip_basis == 'hexify': control, perp_mag = hexify( rng, origins=rays.origins, directions=rays.directions, radii=rays.radii, tdist=tdist, ) else: # Use a normal unscented transformation. control = unscented_transform( means, covs, basis=unscented_mip_basis, axis=-2, ) if unscented_scale_mult > 0: if rays is None: raise SyntaxError( 'Rays are required as input if unscented_scale_mult > 0.' ) # Mimic the math used by hexify to produce comparable scales. t_recon = jnp.sum( (control - rays.origins[Ellipsis, None, None, :]) * rays.directions[Ellipsis, None, None, :], axis=-1, ) perp_mag = jnp.sqrt(0.5) * rays.radii[Ellipsis, None, :] * t_recon else: perp_mag = None return control, perp_mag <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/stepfun.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for manipulating step functions (piecewise-constant 1D functions). We have a shared naming and dimension convention for these functions. All input/output step functions are assumed to be aligned along the last axis. `t` always indicates the x coordinates of the *endpoints* of a step function. `y` indicates unconstrained values for the *bins* of a step function `w` indicates bin weights that sum to <= 1. `p` indicates non-negative bin values that *integrate* to <= 1. """ from internal import linspline from internal import math from internal import utils import jax import jax.numpy as jnp import numpy as np def query(tq, t, y, left=None, right=None): """Query step function (t, y) at locations tq. Edges repeat by default.""" utils.assert_valid_stepfun(t, y) # Query the step function to recover the interval value. (i0, i1), ((yq, _),) = math.sorted_lookup(tq, t, (y,), utils.device_is_tpu()) # Apply boundary conditions. left = y[Ellipsis, :1] if left is None else left right = y[Ellipsis, -1:] if right is None else right yq = math.select([(i1 == 0, left), (i0 == y.shape[-1], right)], yq) return yq def weight_to_pdf(t, w): """Turn a vector of weights that sums to 1 into a PDF that integrates to 1.""" utils.assert_valid_stepfun(t, w) td = jnp.diff(t) return jnp.where(td < np.finfo(np.float32).tiny, 0, math.safe_div(w, td)) def pdf_to_weight(t, p): """Turn a PDF that integrates to 1 into a vector of weights that sums to 1.""" utils.assert_valid_stepfun(t, p) return p * jnp.diff(t) def integrate_weights(w): """Compute the cumulative sum of w, assuming all weight vectors sum to 1. The output's size on the last dimension is one greater than that of the input, because we're computing the integral corresponding to the endpoints of a step function, not the integral of the interior/bin values. Args: w: Tensor, which will be integrated along the last axis. This is assumed to sum to 1 along the last axis, and this function will (silently) break if that is not the case. Returns: cw0: Tensor, the integral of w, where cw0[..., 0] = 0 and cw0[..., -1] = 1 """ cw = jnp.minimum(1, jnp.cumsum(w[Ellipsis, :-1], axis=-1)) shape = cw.shape[:-1] + (1,) # Ensure that the CDF starts with exactly 0 and ends with exactly 1. cw0 = jnp.concatenate([jnp.zeros(shape), cw, jnp.ones(shape)], axis=-1) return cw0 def invert_cdf(u, t, w_logits): """Invert the CDF defined by (t, w) at the points specified by u in [0, 1).""" utils.assert_valid_stepfun(t, w_logits) # Compute the PDF and CDF for each weight vector. w = jax.nn.softmax(w_logits, axis=-1) cw = integrate_weights(w) # Interpolate into the inverse CDF. t_new = math.sorted_interp(u, cw, t, utils.device_is_tpu()) return t_new def sample( rng, t, w_logits, num_samples, single_jitter=False, deterministic_center=False, eps=jnp.finfo(jnp.float32).eps, ): """Piecewise-Constant PDF sampling from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of samples. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. deterministic_center: bool, if False, when `rng` is None return samples that linspace the entire PDF. If True, skip the front and back of the linspace so that the centers of each PDF interval are returned. eps: float, something like numerical epsilon. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) # Draw uniform samples. if rng is None: # Match the behavior of jax.random.uniform() by spanning [0, 1-eps]. if deterministic_center: pad = 1 / (2 * num_samples) u = jnp.linspace(pad, 1.0 - pad - eps, num_samples) else: u = jnp.linspace(0, 1.0 - eps, num_samples) u = jnp.broadcast_to(u, t.shape[:-1] + (num_samples,)) else: # `u` is in [0, 1) --- it can be zero, but it can never be 1. u_max = eps + (1 - eps) / num_samples max_jitter = (1 - u_max) / (num_samples - 1) - eps d = 1 if single_jitter else num_samples u = jnp.linspace(0, 1 - u_max, num_samples) + jax.random.uniform( rng, t.shape[:-1] + (d,), maxval=max_jitter ) return invert_cdf(u, t, w_logits) def sample_intervals( rng, t, w_logits, num_samples, single_jitter=False, domain=(-jnp.inf, jnp.inf), ): """Sample *intervals* (rather than points) from a step function. Args: rng: random number generator (or None for `linspace` sampling). t: [..., num_bins + 1], bin endpoint coordinates (must be sorted) w_logits: [..., num_bins], logits corresponding to bin weights num_samples: int, the number of intervals to sample. single_jitter: bool, if True, jitter every sample along each ray by the same amount in the inverse CDF. Otherwise, jitter each sample independently. domain: (minval, maxval), the range of valid values for `t`. Returns: t_samples: jnp.ndarray(float32), [batch_size, num_samples]. """ utils.assert_valid_stepfun(t, w_logits) if num_samples <= 1: raise ValueError(f'num_samples must be > 1, is {num_samples}.') # Sample a set of points from the step function. centers = sample( rng, t, w_logits, num_samples, single_jitter, deterministic_center=True ) # The intervals we return will span the midpoints of each adjacent sample. mid = (centers[Ellipsis, 1:] + centers[Ellipsis, :-1]) / 2 # Each first/last fencepost is the reflection of the first/last midpoint # around the first/last sampled center. first = 2 * centers[Ellipsis, :1] - mid[Ellipsis, :1] last = 2 * centers[Ellipsis, -1:] - mid[Ellipsis, -1:] samples = jnp.concatenate([first, mid, last], axis=-1) # We clamp to the limits of the input domain, provided by the caller. samples = jnp.clip(samples, *domain) return samples def lossfun_distortion(t, w): """Compute iint w[i] w[j] |t[i] - t[j]| di dj.""" utils.assert_valid_stepfun(t, w) # The loss in<fim_suffix>curred between all pairs of intervals. ut = (t[Ellipsis, 1:] + t[Ellipsis, :-1]) / 2 dut = jnp.abs(ut[Ellipsis, :, None] - ut[Ellipsis, None, :]) loss_inter = jnp.sum(w * jnp.sum(w[Ellipsis, None, :] * dut, axis=-1), axis=-1) # The loss incurred within each individual interval with itself. loss_intra = jnp.sum(w**2 * jnp.diff(t), axis=-1) / 3 return loss_inter + loss_intra def weighted_percentile(t, w, ps): """Compute the weighted percentiles of a step function. w's must sum to 1.""" utils.assert_valid_stepfun(t, w) cw = integrate_weights(w) # We want to interpolate into the integrated weights according to `ps`. wprctile = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( jnp.array(ps) / 100, cw, t ) return wprctile def resample(t, tp, vp, use_avg=False): """Resample a step function defined by (tp, vp) into intervals t. Notation roughly matches jnp.interp. Resamples by summation by default. Args: t: tensor with shape (..., n+1), the endpoints to resample into. tp: tensor with shape (..., m+1), the endpoints of the step function being resampled. vp: tensor with shape (..., m), the values of the step function being resampled. use_avg: bool, if False, return the sum of the step function for each interval in `t`. If True, return the average, weighted by the width of each interval in `t`. Returns: v: tensor with shape (..., n), the values of the resampled step function. """ utils.assert_valid_stepfun(tp, vp) if use_avg: wp = jnp.diff(tp) v_numer = resample(t, tp, vp * wp, use_avg=False) v_denom = resample(t, tp, wp, use_avg=False) v = math.safe_div(v_numer, v_denom) return v acc = jnp.cumsum(vp, axis=-1) acc0 = jnp.concatenate([jnp.zeros(acc.shape[:-1] + (1,)), acc], axis=-1) acc0_resampled = jnp.vectorize(jnp.interp, signature='(n),(m),(m)->(n)')( t, tp, acc0 ) v = jnp.diff(acc0_resampled, axis=-1) return v def blur_and_resample_weights(tq, t, w, blur_halfwidth): """Blur the (t, w) histogram by blur_halfwidth, then resample it into tq.""" utils.assert_valid_stepfun(t, w) # Convert the histogram to a PDF. p = weight_to_pdf(t, w) # Blur the PDF step function into a piecewise linear spline PDF. t_linspline, p_linspline = linspline.blur_stepfun(t, p, blur_halfwidth) # Integrate the spline PDF, then query it to get integrated weights. quad = linspline.compute_integral(t_linspline, p_linspline) acc_wq = linspline.interpolate_integral(tq, t_linspline, *quad) # Undo the integration to get weights. wq = jnp.diff(acc_wq, axis=-1) # Fix negative values to 0, as they should never happen but may due to # numerical issues. wq = jnp.maximum(0, wq) return wq <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/math.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.sea<fim_suffix>rchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow)))) <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" import concurrent import enum import os import queue import threading import time from typing import Any, Callable, Iterable, Optional, TypeVar, Union from absl import logging import flax import jax from jax import random import jax.numpy as jnp import numpy as np _Array = Union[np.ndarray, jnp.ndarray] @flax.struct.dataclass class Rays: """All tensors must have the same num_dims and first n-1 dims must match. This dataclass contains spatially meaningful quantities associated with the ray that can be calculated by the function casting the ray, as well as all metadata necessary for the ray to be rendered by the Model class. """ origins: Optional[_Array] = None directions: Optional[_Array] = None viewdirs: Optional[_Array] = None radii: Optional[_Array] = None imageplane: Optional[_Array] = None pixels: Optional[_Array] = None lossmult: Optional[_Array] = None near: Optional[_Array] = None far: Optional[_Array] = None cam_idx: Optional[_Array] = None exposure_idx: Optional[_Array] = None exposure_values: Optional[_Array] = None device_idx: Optional[_Array] = None def generate_random_rays( rng, n, origin_lo, origin_hi, radius_lo, radius_hi, near_lo, near_hi, far_lo, far_hi, include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): """Generate a random Rays datastructure.""" key, rng = random.split(rng) origins = random.uniform( key, shape=[n, 3], minval=origin_lo, maxval=origin_hi ) key, rng = random.split(rng) directions = random.normal(key, shape=[n, 3]) directions /= jnp.sqrt( jnp.maximum( jnp.finfo(jnp.float32).tiny, jnp.sum(directions**2, axis=-1, keepdims=True), ) ) viewdirs = directions key, rng = random.split(rng) radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi) key, rng = random.split(rng) near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi) key, rng = random.split(rng) far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi) imageplane = jnp.zeros([n, 2]) lossmult = jnp.zeros([n, 1]) key, rng = random.split(rng) pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024) int_scalar = jnp.int32(jnp.zeros([n, 1])) exposure_kwargs = {} if include_exposure_idx: exposure_kwargs['exposure_idx'] = int_scalar if include_exposure_values: exposure_kwargs['exposure_values'] = jnp.zeros([n, 1]) if include_device_idx: exposure_kwargs['device_idx'] = int_scalar random_rays = Rays( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, pixels=pixels, lossmult=lossmult, near=near, far=far, cam_idx=int_scalar, **exposure_kwargs, ) return random_rays # Dummy Rays object that can be used to initialize NeRF model. def dummy_rays( include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): return generate_random_rays( random.PRNGKey(0), n=100, origin_lo=-1.5, origin_hi=1.5, radius_lo=1e-5, radius_hi=1e-3, near_lo=0.0, near_hi=1.0, far_lo=10, far_hi=10000, include_exposure_idx=include_exposure_idx, include_exposure_values=include_exposure_values, include_device_idx=include_device_idx, ) @flax.struct.dataclass class Batch: """Data batch for NeRF training or testing. This dataclass contains rays and also per-pixel data that is necessary for computing the loss term or evaluating metrics but NOT necessary for rendering. """ rays: Rays rgb: Optional[_Array] = None disps: Optional[_Array] = None normals: Optional[_Array] = None alphas: Optional[_Array] = None masks: Optional[_Array] = None class DataSplit(enum.Enum): """Dataset split.""" TRAIN = 'train' TEST = 'test' class BatchingMethod(enum.Enum): """Draw rays randomly from a single image or all images, in each batch.""" ALL_IMAGES = 'all_images' SINGLE_IMAGE = 'single_image' def open_file(pth, mode='r'): return open(pth, mode=mode) def file_exists(pth): return os.path.exists(pth) def listdir(pth): return os.listdir(pth) def isdir(pth): return os.path.isdir(pth) def makedirs(pth): if not file_exists(pth): os.makedirs(pth) def device_is_tpu(): return jax.local_devices()[0].platform == 'tpu' def shard(xs): """Split data into shards for multiple devices along the first dimension.""" return jax.tree_util.tree_map( lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs ) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) if padding > 0: y = y[:-padding] return y def load_npy(pth): """Load an numpy array cast to float32.""" with open_file(pth, 'rb') as f: x = np.load(f).astype(np.float32) return x def assert_valid_stepfun(t, y): """Assert that step function (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1] + 1: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a step function.' ) def assert_valid_linspline(t, y): """Assert that piecewise linear spline (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1]: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.' ) _FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]]) def iterate_in_separate_thread( queue_size = 3, ): """Decorator factory that iterates a function in a separate thread. Args: queue_size: Keep at most queue_size elements in memory. Returns: Decorator that will iterate a function in a separate thread. """ def decorator( fn, ): def result_fn(*args, **kwargs): results_queue = queue.Queue(queue_size) populating_data = True populating_data_lock = threading.Lock() def thread_fn(): # Mark has_data as a variable that's outside of thread_fn # Otherwise, `populating_data = True` creates a local variable nonlocal populating_data try: for item in fn(*args, **kwargs): results_queue.put(item) finally: # Set populating_data to False regardless of exceptions to stop # ite<fim_suffix>rations with populating_data_lock: populating_data = False # Use executor + futures instead of Thread to propagate exceptions with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: thread_fn_future = executor.submit(thread_fn) while True: with populating_data_lock: if not populating_data and results_queue.empty(): break get_start = time.time() try: # Set timeout to allow for exceptions to be propagated. next_value = results_queue.get(timeout=1.0) except queue.Empty: continue logging.info('Got data in %0.3fs', time.time() - get_start) yield next_value # Thread exception will be raised here thread_fn_future.result() return result_fn return decorator <fim_middle>
null
LINE_COMMENT
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/geopoly.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for constructing geodesic polyhedron, which are used as a basis.""" import itertools import numpy as np def compute_sq_dist(mat0, mat1=None): """Compute the squared Euclidean distance between all pairs of columns.""" if mat1 is None: mat1 = mat0 # Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y. sq_norm0 = np.sum(mat0**2, 0) sq_norm1 = np.sum(mat1**2, 0) sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1 sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors. return sq_dist def compute_tesselation_weights(v): """Tesselate the vertices of a triangle by a factor of `v`.""" if v < 1: raise ValueError(f'v {v} must be >= 1') int_weights = [] for<fim_suffix> i in range(v + 1): for j in range(v + 1 - i): int_weights.append((i, j, v - (i + j))) int_weights = np.array(int_weights) weights = int_weights / v # Barycentric weights. return weights def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4): """Tesselate the vertices of a geodesic polyhedron. Args: base_verts: tensor of floats, the vertex coordinates of the geodesic. base_faces: tensor of ints, the indices of the vertices of base_verts that constitute eachface of the polyhedra. v: int, the factor of the tesselation (v==1 is a no-op). eps: float, a small value used to determine if two vertices are the same. Returns: verts: a tensor of floats, the coordinates of the tesselated vertices. """ if not isinstance(v, int): raise ValueError(f'v {v} must an integer') tri_weights = compute_tesselation_weights(v) verts = [] for base_face in base_faces: new_verts = np.matmul(tri_weights, base_verts[base_face, :]) new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True)) verts.append(new_verts) verts = np.concatenate(verts, 0) sq_dist = compute_sq_dist(verts.T) assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist]) unique = np.unique(assignment) verts = verts[unique, :] return verts def generate_basis( base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4 ): """Generates a 3D basis by tesselating a geometric polyhedron. Args: base_shape: string, the name of the starting polyhedron, must be either 'tetrahedron', 'icosahedron' or 'octahedron'. angular_tesselation: int, the number of times to tesselate the polyhedron, must be >= 1 (a value of 1 is a no-op to the polyhedron). remove_symmetries: bool, if True then remove the symmetric basis columns, which is usually a good idea because otherwise projections onto the basis will have redundant negative copies of each other. eps: float, a small number used to determine symmetries. Returns: basis: a matrix with shape [3, n]. """ if base_shape == 'tetrahedron': verts = np.array([ (np.sqrt(8 / 9), 0, -1 / 3), (-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3), (-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3), (0, 0, 1), ]) faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)]) elif base_shape == 'icosahedron': a = (np.sqrt(5) + 1) / 2 verts = np.array([ (-1, 0, a), (1, 0, a), (-1, 0, -a), (1, 0, -a), (0, a, 1), (0, a, -1), (0, -a, 1), (0, -a, -1), (a, 1, 0), (-a, 1, 0), (a, -1, 0), (-a, -1, 0), ]) / np.sqrt(a + 2) faces = np.array([ (0, 4, 1), (0, 9, 4), (9, 5, 4), (4, 5, 8), (4, 8, 1), (8, 10, 1), (8, 3, 10), (5, 3, 8), (5, 2, 3), (2, 7, 3), (7, 10, 3), (7, 6, 10), (7, 11, 6), (11, 0, 6), (0, 1, 6), (6, 1, 10), (9, 0, 11), (9, 11, 2), (9, 2, 5), (7, 2, 11), ]) elif base_shape == 'octahedron': verts = np.array( [(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)] ) corners = np.array(list(itertools.product([-1, 1], repeat=3))) pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2) faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1) else: raise ValueError(f'base_shape {base_shape} not supported') verts = tesselate_geodesic(verts, faces, angular_tesselation) if remove_symmetries: # Remove elements of `verts` that are reflections of each other. match = compute_sq_dist(verts.T, -verts.T) < eps verts = verts[~np.any(np.triu(match), axis=0), :] basis = verts[:, ::-1] return basis <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for reflection directions and directional encodings.""" import math from internal import math as math_lib import jax.numpy as jnp import numpy as np def reflect(viewdirs, normals): """Reflect view directions about normals. The reflection of a vector v about a unit vector n is a vector u such that dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two equations is u = 2 dot(n, v) n - v. Args: viewdirs: [..., 3] array of view directions. normals: [..., 3] array of normal directions (assumed to be unit vectors). Returns: [..., 3] array of reflection directions. """ return ( 2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals - viewdirs ) def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps): """Normalize x to unit length along last axis. Normalizing vectors is surprisingly tricky, because you have to address the case where the denominator in the normalization is tiny or zero, in which case gradients will explode. For this reason, we perform two normalizations: in the forward pass, we clamp the denominator with ~1e-40, but in the backward pass we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the output of this function is unit norm (unless x is very very small) while preventing exploding gradients. Args: x: The array of values to normalize. grad_eps: The value to clip the squared norm by before division in the backward pass. Returns: A normalized array x / ||x||, normalized along the last axis. """ tiny = jnp.finfo(jnp.float32).tiny grad_eps = jnp.maximum(tiny, grad_eps) denom_sq = jnp.sum(x**2, axis=-1, keepdims=True) normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq)) normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq)) # Use `normal_val` in the forward pass but `normal_grad` in the backward pass. normal = math_lib.override_gradient(normal_val, normal_grad) return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal) def compute_weighted_mae(weights, normals, normals_gt): """Compute weighted mean angular error, assuming normals are unit length.""" angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1)) return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum()) def generalized_binomial_coeff(a, k): """Compute generalized binomial coefficients.""" return np.prod(a - np.arange(k)) / math.factorial(k) def assoc_legendre_coeff(l, m, k): """Compute associated Legendre polynomial coefficients. Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the (l, m)th associated Legendre polynomial, P_l^m(cos(theta)). Args: l: associated Legendre polynomial degree. m: associated Legendre polynomial order. k: power of cos(theta). Returns: A float, the coefficient of the term corresponding to the inputs. """ return ( (-1) ** m * 2**l * math.factorial(l) / math.factorial(k) / math.factorial(l - k - m) * generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l) ) def sph_harm_coeff(l, m, k): """Compute spherical harmonic coefficients.""" return np.sqrt( (2.0 * l + 1.0) * math.factorial(l - m) / (4.0 * np.pi * math.factorial(l + m)) ) * assoc_legendre_coeff(l, m, k) def get_ml_array(deg_view): """Create a list with all pairs of (l, m) values to use in the encoding.""" ml_list = [] for i in range<fim_suffix>(deg_view): l = 2**i # Only use nonnegative m values, later splitting real and imaginary parts. for m in range(l + 1): ml_list.append((m, l)) # Convert list into a numpy array. ml_array = np.array(ml_list).T return ml_array def generate_ide_fn(deg_view): """Generate integrated directional encoding (IDE) function. This function returns a function that computes the integrated directional encoding from Equations 6-8 of arxiv.org/abs/2112.03907. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating integrated directional encoding. Raises: ValueError: if deg_view is larger than 5. """ if deg_view > 5: raise ValueError('Only deg_view of at most 5 is numerically stable.') ml_array = get_ml_array(deg_view) l_max = 2 ** (deg_view - 1) # Create a matrix corresponding to ml_array holding all coefficients, which, # when multiplied (from the right) by the z coordinate Vandermonde matrix, # results in the z component of the encoding. mat = np.zeros((l_max + 1, ml_array.shape[1])) for i, (m, l) in enumerate(ml_array.T): for k in range(l - m + 1): mat[k, i] = sph_harm_coeff(l, m, k) def integrated_dir_enc_fn(xyz, kappa_inv): """Function returning integrated directional encoding (IDE). Args: xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at. kappa_inv: [..., 1] reciprocal of the concentration parameter of the von Mises-Fisher distribution. Returns: An array with the resulting IDE. """ x = xyz[Ellipsis, 0:1] y = xyz[Ellipsis, 1:2] z = xyz[Ellipsis, 2:3] # Compute z Vandermonde matrix. vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1) # Compute x+iy Vandermonde matrix. vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1) # Get spherical harmonics. sph_harms = vmxy * math_lib.matmul(vmz, mat) # Apply attenuation function using the von Mises-Fisher distribution # concentration parameter, kappa. sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1) ide = sph_harms * jnp.exp(-sigma * kappa_inv) # Split into real and imaginary parts and return return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1) return integrated_dir_enc_fn def generate_dir_enc_fn(deg_view): """Generate directional encoding (DE) function. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating directional encoding. """ integrated_dir_enc_fn = generate_ide_fn(deg_view) def dir_enc_fn(xyz): """Function returning directional encoding (DE).""" return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1])) return dir_enc_fn def orientation_loss(w, n, v): """Orientation loss on weights `w`, normals `n`, and -view directions `v`.""" n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1) return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1)) <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/geopoly.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for constructing geodesic polyhedron, which are used as a basis.""" import itertools import numpy as np def compute_sq_dist(mat0, mat1=None): """Compute the squared Euclidean distance between all pairs of columns.""" if mat1 is None: mat1 = mat0 # Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y. sq_norm0 = np.sum(mat0**2, 0) sq_norm1 = np.sum(mat1**2, 0) sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1 sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors. return sq_dist def compute_tesselation_weights(v): """Tesselate the vertices of a triangle by a factor of `v`.""" if v < 1: raise ValueError(f'v {v} must be >= 1') int_weights = [] for i in range(v + 1): for j in range(v + 1 - i<fim_suffix>): int_weights.append((i, j, v - (i + j))) int_weights = np.array(int_weights) weights = int_weights / v # Barycentric weights. return weights def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4): """Tesselate the vertices of a geodesic polyhedron. Args: base_verts: tensor of floats, the vertex coordinates of the geodesic. base_faces: tensor of ints, the indices of the vertices of base_verts that constitute eachface of the polyhedra. v: int, the factor of the tesselation (v==1 is a no-op). eps: float, a small value used to determine if two vertices are the same. Returns: verts: a tensor of floats, the coordinates of the tesselated vertices. """ if not isinstance(v, int): raise ValueError(f'v {v} must an integer') tri_weights = compute_tesselation_weights(v) verts = [] for base_face in base_faces: new_verts = np.matmul(tri_weights, base_verts[base_face, :]) new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True)) verts.append(new_verts) verts = np.concatenate(verts, 0) sq_dist = compute_sq_dist(verts.T) assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist]) unique = np.unique(assignment) verts = verts[unique, :] return verts def generate_basis( base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4 ): """Generates a 3D basis by tesselating a geometric polyhedron. Args: base_shape: string, the name of the starting polyhedron, must be either 'tetrahedron', 'icosahedron' or 'octahedron'. angular_tesselation: int, the number of times to tesselate the polyhedron, must be >= 1 (a value of 1 is a no-op to the polyhedron). remove_symmetries: bool, if True then remove the symmetric basis columns, which is usually a good idea because otherwise projections onto the basis will have redundant negative copies of each other. eps: float, a small number used to determine symmetries. Returns: basis: a matrix with shape [3, n]. """ if base_shape == 'tetrahedron': verts = np.array([ (np.sqrt(8 / 9), 0, -1 / 3), (-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3), (-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3), (0, 0, 1), ]) faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)]) elif base_shape == 'icosahedron': a = (np.sqrt(5) + 1) / 2 verts = np.array([ (-1, 0, a), (1, 0, a), (-1, 0, -a), (1, 0, -a), (0, a, 1), (0, a, -1), (0, -a, 1), (0, -a, -1), (a, 1, 0), (-a, 1, 0), (a, -1, 0), (-a, -1, 0), ]) / np.sqrt(a + 2) faces = np.array([ (0, 4, 1), (0, 9, 4), (9, 5, 4), (4, 5, 8), (4, 8, 1), (8, 10, 1), (8, 3, 10), (5, 3, 8), (5, 2, 3), (2, 7, 3), (7, 10, 3), (7, 6, 10), (7, 11, 6), (11, 0, 6), (0, 1, 6), (6, 1, 10), (9, 0, 11), (9, 11, 2), (9, 2, 5), (7, 2, 11), ]) elif base_shape == 'octahedron': verts = np.array( [(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)] ) corners = np.array(list(itertools.product([-1, 1], repeat=3))) pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2) faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1) else: raise ValueError(f'base_shape {base_shape} not supported') verts = tesselate_geodesic(verts, faces, angular_tesselation) if remove_symmetries: # Remove elements of `verts` that are reflections of each other. match = compute_sq_dist(verts.T, -verts.T) < eps verts = verts[~np.any(np.triu(match), axis=0), :] basis = verts[:, ::-1] return basis <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" import concurrent import enum import os import queue import threading import time from typing import Any, Callable, Iterable, Optional, TypeVar, Union from absl import logging import flax import jax from jax import random import jax.numpy as jnp import numpy as np _Array = Union[np.ndarray, jnp.ndarray] @flax.struct.dataclass class Rays: """All tensors must have the same num_dims and first n-1 dims must match. This dataclass contains spatially meaningful quantities associated with the ray that can be calculated by the function casting the ray, as well as all metadata necessary for the ray to be rendered by the Model class. """ origins: Optional[_Array] = None directions: Optional[_Array] = None viewdirs: Optional[_Array] = None radii: Optional[_Array] = None imageplane: Optional[_Array] = None pixels: Optional[_Array] = None lossmult: Optional[_Array] = None near: Optional[_Array] = None far: Optional[_Array] = None cam_idx: Optional[_Array] = None exposure_idx: Optional[_Array] = None exposure_values: Optional[_Array] = None device_idx: Optional[_Array] = None def generate_random_rays( rng, n, origin_lo, origin_hi, radius_lo, radius_hi, near_lo, near_hi, far_lo, far_hi, include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): """Generate a random Rays datastructure.""" key, rng = random.split(rng) origins = random.uniform( key, shape=[n, 3], minval=origin_lo, maxval=origin_hi ) key, rng = random.split(rng) directions = random.normal(key, shape=[n, 3]) directions /= jnp.sqrt( jnp.maximum( jnp.finfo(jnp.float32).tiny, jnp.sum(directions**2, axis=-1, keepdims=True), ) ) viewdirs = directions key, rng = random.split(rng) radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi) key, rng = random.split(rng) near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi) key, rng = random.split(rng) far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi) imageplane = jnp.zeros([n, 2]) lossmult = jnp.zeros([n, 1]) key, rng = random.split(rng) pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024) int_scalar = jnp.int32(jnp.zeros([n, 1])) exposure_kwargs = {} if include_exposure_idx: exposure_kwargs['exposure_idx'] = int_scalar if include_exposure_values: exposure_kwargs['exposure_values'] = jnp.zeros([n, 1]) if include_device_idx: exposure_kwargs['device_idx'] = int_scalar random_rays = Rays( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, pixels=pixels, lossmult=lossmult, near=near, far=far, cam_idx=int_scalar, **exposure_kwargs, ) return random_rays # Dummy Rays object that can be used to initialize NeRF model. def dummy_rays( include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): return generate_random_rays( random.PRNGKey(0), n=100, origin_lo=-1.5, origin_hi=1.5, radius_lo=1e-5, radius_hi=1e-3, near_lo=0.0, near_hi=1.0, far_lo=10, far_hi=10000, include_exposure_idx=include_exposure_idx, include_exposure_values=include_exposure_values, include_device_idx=include_device_idx, ) @flax.struct.dataclass class Batch: """Data batch for NeRF training or testing. This dataclass contains rays and also per-pixel data that is necessary for computing the loss term or evaluating metrics but NOT necessary for rendering. """ rays: Rays rgb: Optional[_Array] = None disps: Optional[_Array] = None normals: Optional[_Array] = None alphas: Optional[_Array] = None masks: Optional[_Array] = None class DataSplit(enum.Enum): """Dataset split.""" TRAIN = 'train' TEST = 'test' class BatchingMethod(enum.Enum): """Draw rays randomly from a single image or all images, in each batch.""" ALL_IMAGES = 'all_images' SINGLE_IMAGE = 'single_image' def open_file(pth, mode='r'): return open(pth, mode=mode) def file_exists(pth): return os.path.exists(pth) def listdir(pth): return os.listdir(pth) def isdir(pth): return os.path.isdir(pth) def makedirs(pth): if not file_exists(pth): os.makedirs(pth) def device_is_tpu(): return jax.local_devices()[0].platform == 'tpu' def shard(xs): """Split data into shards for multiple devices along the first dimension.""" return jax.tree_util.tree_map( lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs ) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) if padding > 0: y = y[:-padding] return y def load_npy(pth): """Load an numpy array cast to float32.""" with open_file(pth, 'rb') as f: x = np.load(f).astype(np.float32) return x def assert_valid_stepfun(t, y): """Assert that step function (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1] + 1: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a step function.' ) def assert_valid_linspline(t, y): """Assert that piecewise linear spline (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1]: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.' ) _FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]]) def iterate_in_separate_thread( queue_size = 3, ): """Decorator factory that iterates a function in a separate thread. Args: queue_size: Keep at most queue_size elements in memory. Returns: Decorator that will iterate a function in a separate thread. """ def decorator( fn, ): def result_fn(*args, **kwargs): results_queue = queue.Queue(queue_size) populating_data = True populating_data_lock = threading.Lock() def thread_fn(): # Mark has_data as a variable that's outside of thread_fn # Otherwise, `populating_data = True` creates a local variable nonlocal populating_data try: for item in f<fim_suffix>n(*args, **kwargs): results_queue.put(item) finally: # Set populating_data to False regardless of exceptions to stop # iterations with populating_data_lock: populating_data = False # Use executor + futures instead of Thread to propagate exceptions with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: thread_fn_future = executor.submit(thread_fn) while True: with populating_data_lock: if not populating_data and results_queue.empty(): break get_start = time.time() try: # Set timeout to allow for exceptions to be propagated. next_value = results_queue.get(timeout=1.0) except queue.Empty: continue logging.info('Got data in %0.3fs', time.time() - get_start) yield next_value # Thread exception will be raised here thread_fn_future.result() return result_fn return decorator <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for reflection directions and directional encodings.""" import math from internal import math as math_lib import jax.numpy as jnp import numpy as np def reflect(viewdirs, normals): """Reflect view directions about normals. The reflection of a vector v about a unit vector n is a vector u such that dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two equations is u = 2 dot(n, v) n - v. Args: viewdirs: [..., 3] array of view directions. normals: [..., 3] array of normal directions (assumed to be unit vectors). Returns: [..., 3] array of reflection directions. """ return ( 2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals - viewdirs ) def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps): """Normalize x to unit length along last axis. Normalizing vectors is surprisingly tricky, because you have to address the case where the denominator in the normalization is tiny or zero, in which case gradients will explode. For this reason, we perform two normalizations: in the forward pass, we clamp the denominator with ~1e-40, but in the backward pass we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the output of this function is unit norm (unless x is very very small) while preventing exploding gradients. Args: x: The array of values to normalize. grad_eps: The value to clip the squared norm by before division in the backward pass. Returns: A normalized array x / ||x||, normalized along the last axis. """ tiny = jnp.finfo(jnp.float32).tiny grad_eps = jnp.maximum(tiny, grad_eps) denom_sq = jnp.sum(x**2, axis=-1, keepdims=True) normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq)) normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq)) # Use `normal_val` in the forward pass but `normal_grad` in the backward pass. normal = math_lib.override_gradient(normal_val, normal_grad) return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal) def compute_weighted_mae(weights, normals, normals_gt): """Compute weighted mean angular error, assuming normals are unit length.""" angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1)) return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum()) def generalized_binomial_coeff(a, k): """Compute generalized binomial coefficients.""" return np.prod(a - np.arange(k)) / math.factorial(k) def assoc_legendre_coeff(l, m, k): """Compute associated Legendre polynomial coefficients. Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the (l, m)th associated Legendre polynomial, P_l^m(cos(theta)). Args: l: associated Legendre polynomial degree. m: associated Legendre polynomial order. k: power of cos(theta). Returns: A float, the coefficient of the term corresponding to the inputs. """ return ( (-1) ** m * 2**l * math.factorial(l) / math.factorial(k) / math.factorial(l - k - m) * generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l) ) def sph_harm_coeff(l, m, k): """Compute spherical harmonic coefficients.""" return np.sqrt( (2.0 * l + 1.0) * math.factorial(l - m) / (4.0 * np.pi * math.factorial(l + m)) ) * assoc_legendre_coeff(l, m, k) def get_ml_array(deg_view): """Create a list with all pairs of (l, m) values to use in the encoding.""" ml_list = [] for i in range(deg_view): l = 2**i # Only use nonnegative m values, later splitting real and imaginary parts. for m in range(l + 1): ml_list.append((m, l)) # Convert list into a numpy array. ml_array = np.array(ml_list).T return ml_array def generate_ide_fn(deg_view): """Generate integrated directional encoding (IDE) function. This function returns a function that computes the integrated directional encoding from Equations 6-8 of arxiv.org/abs/2112.03907. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating integrated directional encoding. Raises: ValueError: if deg_view is larger than 5. """ if deg_view > 5: raise ValueError('Only deg_view of at most 5 is numerically stable.') ml_array = get_ml_array(deg_view) l_max = 2 ** (deg_view - 1) # Create a matrix corresponding to ml_array holding all coefficients, which, # when multiplied (from the right) by the z coordinate Vandermonde matrix, # results in the z component of the encoding. mat = np.zeros((l_max + 1, ml_array.shape[1])) for i, (m, l) in enumerate(ml_array.T): for k <fim_suffix>in range(l - m + 1): mat[k, i] = sph_harm_coeff(l, m, k) def integrated_dir_enc_fn(xyz, kappa_inv): """Function returning integrated directional encoding (IDE). Args: xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at. kappa_inv: [..., 1] reciprocal of the concentration parameter of the von Mises-Fisher distribution. Returns: An array with the resulting IDE. """ x = xyz[Ellipsis, 0:1] y = xyz[Ellipsis, 1:2] z = xyz[Ellipsis, 2:3] # Compute z Vandermonde matrix. vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1) # Compute x+iy Vandermonde matrix. vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1) # Get spherical harmonics. sph_harms = vmxy * math_lib.matmul(vmz, mat) # Apply attenuation function using the von Mises-Fisher distribution # concentration parameter, kappa. sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1) ide = sph_harms * jnp.exp(-sigma * kappa_inv) # Split into real and imaginary parts and return return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1) return integrated_dir_enc_fn def generate_dir_enc_fn(deg_view): """Generate directional encoding (DE) function. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating directional encoding. """ integrated_dir_enc_fn = generate_ide_fn(deg_view) def dir_enc_fn(xyz): """Function returning directional encoding (DE).""" return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1])) return dir_enc_fn def orientation_loss(w, n, v): """Orientation loss on weights `w`, normals `n`, and -view directions `v`.""" n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1) return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1)) <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/math.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for<fim_suffix> fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow)))) <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" import concurrent import enum import os import queue import threading import time from typing import Any, Callable, Iterable, Optional, TypeVar, Union from absl import logging import flax import jax from jax import random import jax.numpy as jnp import numpy as np _Array = Union[np.ndarray, jnp.ndarray] @flax.struct.dataclass class Rays: """All tensors must have the same num_dims and first n-1 dims must match. This dataclass contains spatially meaningful quantities associated with the ray that can be calculated by the function casting the ray, as well as all metadata necessary for the ray to be rendered by the Model class. """ origins: Optional[_Array] = None directions: Optional[_Array] = None viewdirs: Optional[_Array] = None radii: Optional[_Array] = None imageplane: Optional[_Array] = None pixels: Optional[_Array] = None lossmult: Optional[_Array] = None near: Optional[_Array] = None far: Optional[_Array] = None cam_idx: Optional[_Array] = None exposure_idx: Optional[_Array] = None exposure_values: Optional[_Array] = None device_idx: Optional[_Array] = None def generate_random_rays( rng, n, origin_lo, origin_hi, radius_lo, radius_hi, near_lo, near_hi, far_lo, far_hi, include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): """Generate a random Rays datastructure.""" key, rng = random.split(rng) origins = random.uniform( key, shape=[n, 3], minval=origin_lo, maxval=origin_hi ) key, rng = random.split(rng) directions = random.normal(key, shape=[n, 3]) directions /= jnp.sqrt( jnp.maximum( jnp.finfo(jnp.float32).tiny, jnp.sum(directions**2, axis=-1, keepdims=True), ) ) viewdirs = directions key, rng = random.split(rng) radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi) key, rng = random.split(rng) near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi) key, rng = random.split(rng) far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi) imageplane = jnp.zeros([n, 2]) lossmult = jnp.zeros([n, 1]) key, rng = random.split(rng) pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024) int_scalar = jnp.int32(jnp.zeros([n, 1])) exposure_kwargs = {} if include_exposure_idx: exposure_kwargs['exposure_idx'] = int_scalar if include_exposure_values: exposure_kwargs['exposure_values'] = jnp.zeros([n, 1]) if include_device_idx: exposure_kwargs['device_idx'] = int_scalar random_rays = Rays( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, pixels=pixels, lossmult=lossmult, near=near, far=far, cam_idx=int_scalar, **exposure_kwargs, ) return random_rays # Dummy Rays object that can be used to initialize NeRF model. def dummy_rays( include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): return generate_random_rays( random.PRNGKey(0), n=100, origin_lo=-1.5, origin_hi=1.5, radius_lo=1e-5, radius_hi=1e-3, near_lo=0.0, near_hi=1.0, far_lo=10, far_hi=10000, include_exposure_idx=include_exposure_idx, include_exposure_values=include_exposure_values, include_device_idx=include_device_idx, ) @flax.struct.dataclass class Batch: """Data batch for NeRF training or testing. This dataclass contains rays and also per-pixel data that is necessary for computing the loss term or evaluating metrics but NOT necessary for rendering. """ rays: Rays rgb: Optional[_Array] = None disps: Optional[_Array] = None normals: Optional[_Array] = None alphas: Optional[_Array] = None masks: Optional[_Array] = None class DataSplit(enum.Enum): """Dataset split.""" TRAIN = 'train' TEST = 'test' class BatchingMethod(enum.Enum): """Draw rays randomly from a single image or all images, in each batch.""" ALL_IMAGES = 'all_images' SINGLE_IMAGE = 'single_image' def open_file(pth, mode='r'): return open(pth, mode=mode) def file_exists(pth): return os.path.exists(pth) def listdir(pth): return os.listdir(pth) def isdir(pth): return os.path.isdir(pth) def makedirs(pth): if not file_exists(pth): os.makedirs(pth) def device_is_tpu(): return jax.local_devices()[0].platform == 'tpu' def shard(xs): """Split data into shards for multiple devices along the first dimension.""" return jax.tree_util.tree_map( lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs ) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) if padding > 0: y = y[:-padding] return y def load_npy(pth): """Load an numpy array cast to float32.""" with open_file(pth, 'rb') as f: x = np.load(f).astype(np.float32) return x def assert_valid_stepfun(t, y): """Assert that step function (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1] + 1: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a step function.' ) def assert_valid_linspline(t, y): """Assert that piecewise linear spline (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1]: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.' ) _FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]]) def iterate_in_separate_thread( queue_size = 3, ): """Decorator factory that iterates a function in a separate thread. Args: queue_size: Keep at most queue_size elements in memory. Returns: Decorator that will iterate a function in a separate thread. """ def decorator( fn, ): def result_fn(*args, **kwargs): results_queue = queue.Queue(queue_size) populating_data = True populating_data_lock = threading.Lock() def thread_fn(): # Mark has_data as a variable that's outside of thread_fn # Otherwise, `populating_data = True` creates a local variable nonlocal populating_data try: for item in fn(*args, *<fim_suffix>*kwargs): results_queue.put(item) finally: # Set populating_data to False regardless of exceptions to stop # iterations with populating_data_lock: populating_data = False # Use executor + futures instead of Thread to propagate exceptions with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: thread_fn_future = executor.submit(thread_fn) while True: with populating_data_lock: if not populating_data and results_queue.empty(): break get_start = time.time() try: # Set timeout to allow for exceptions to be propagated. next_value = results_queue.get(timeout=1.0) except queue.Empty: continue logging.info('Got data in %0.3fs', time.time() - get_start) yield next_value # Thread exception will be raised here thread_fn_future.result() return result_fn return decorator <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/geopoly.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tools for constructing geodesic polyhedron, which are used as a basis.""" import itertools import numpy as np def compute_sq_dist(mat0, mat1=None): """Compute the squared Euclidean distance between all pairs of columns.""" if mat1 is None: mat1 = mat0 # Use the fact that ||x - y||^2 == ||x||^2 + ||y||^2 - 2 x^T y. sq_norm0 = np.sum(mat0**2, 0) sq_norm1 = np.sum(mat1**2, 0) sq_dist = sq_norm0[:, None] + sq_norm1[None, :] - 2 * mat0.T @ mat1 sq_dist = np.maximum(0, sq_dist) # Negative values must be numerical errors. return sq_dist def compute_tesselation_weights(v): """Tesselate the vertices of a triangle by a factor of `v`.""" if v < 1: raise ValueError(f'v {v} must be >= 1') int_weights = [] for i in range(v + 1): for j in range(v + 1 - i): int_weights.append((i, j, v - (i + j))) int_weights = np.array(int_weights) weights = int_weights / v # Barycentric weights. return weights def tesselate_geodesic(base_verts, base_faces, v, eps=1e-4): """Tesselate the vertices of a geodesic polyhedron. Args: base_verts: tensor of floats, the vertex coordinates of the geodesic. base_faces: tensor of ints, the indices of the vertices of base_verts that constitute eachface of the polyhedra. v: int, the factor of the tesselation (v==1 is a no-op). eps: float, a small value used to determine if two vertices are the same. Returns: verts: a tensor of floats, the coordinates of the tesselated vertices. """ if not isinstance(v, int): raise ValueError(f'v {v} must an integer') tri_weights = compute_tesselation_weights(v) verts = [] for<fim_suffix> base_face in base_faces: new_verts = np.matmul(tri_weights, base_verts[base_face, :]) new_verts /= np.sqrt(np.sum(new_verts**2, 1, keepdims=True)) verts.append(new_verts) verts = np.concatenate(verts, 0) sq_dist = compute_sq_dist(verts.T) assignment = np.array([np.min(np.argwhere(d <= eps)) for d in sq_dist]) unique = np.unique(assignment) verts = verts[unique, :] return verts def generate_basis( base_shape, angular_tesselation, remove_symmetries=True, eps=1e-4 ): """Generates a 3D basis by tesselating a geometric polyhedron. Args: base_shape: string, the name of the starting polyhedron, must be either 'tetrahedron', 'icosahedron' or 'octahedron'. angular_tesselation: int, the number of times to tesselate the polyhedron, must be >= 1 (a value of 1 is a no-op to the polyhedron). remove_symmetries: bool, if True then remove the symmetric basis columns, which is usually a good idea because otherwise projections onto the basis will have redundant negative copies of each other. eps: float, a small number used to determine symmetries. Returns: basis: a matrix with shape [3, n]. """ if base_shape == 'tetrahedron': verts = np.array([ (np.sqrt(8 / 9), 0, -1 / 3), (-np.sqrt(2 / 9), np.sqrt(2 / 3), -1 / 3), (-np.sqrt(2 / 9), -np.sqrt(2 / 3), -1 / 3), (0, 0, 1), ]) faces = np.array([(0, 1, 2), (0, 2, 3), (0, 1, 3), (1, 2, 3)]) elif base_shape == 'icosahedron': a = (np.sqrt(5) + 1) / 2 verts = np.array([ (-1, 0, a), (1, 0, a), (-1, 0, -a), (1, 0, -a), (0, a, 1), (0, a, -1), (0, -a, 1), (0, -a, -1), (a, 1, 0), (-a, 1, 0), (a, -1, 0), (-a, -1, 0), ]) / np.sqrt(a + 2) faces = np.array([ (0, 4, 1), (0, 9, 4), (9, 5, 4), (4, 5, 8), (4, 8, 1), (8, 10, 1), (8, 3, 10), (5, 3, 8), (5, 2, 3), (2, 7, 3), (7, 10, 3), (7, 6, 10), (7, 11, 6), (11, 0, 6), (0, 1, 6), (6, 1, 10), (9, 0, 11), (9, 11, 2), (9, 2, 5), (7, 2, 11), ]) elif base_shape == 'octahedron': verts = np.array( [(0, 0, -1), (0, 0, 1), (0, -1, 0), (0, 1, 0), (-1, 0, 0), (1, 0, 0)] ) corners = np.array(list(itertools.product([-1, 1], repeat=3))) pairs = np.argwhere(compute_sq_dist(corners.T, verts.T) == 2) faces = np.sort(np.reshape(pairs[:, 1], [3, -1]).T, 1) else: raise ValueError(f'base_shape {base_shape} not supported') verts = tesselate_geodesic(verts, faces, angular_tesselation) if remove_symmetries: # Remove elements of `verts` that are reflections of each other. match = compute_sq_dist(verts.T, -verts.T) < eps verts = verts[~np.any(np.triu(match), axis=0), :] basis = verts[:, ::-1] return basis <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for reflection directions and directional encodings.""" import math from internal import math as math_lib import jax.numpy as jnp import numpy as np def reflect(viewdirs, normals): """Reflect view directions about normals. The reflection of a vector v about a unit vector n is a vector u such that dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two equations is u = 2 dot(n, v) n - v. Args: viewdirs: [..., 3] array of view directions. normals: [..., 3] array of normal directions (assumed to be unit vectors). Returns: [..., 3] array of reflection directions. """ return ( 2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals - viewdirs ) def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps): """Normalize x to unit length along last axis. Normalizing vectors is surprisingly tricky, because you have to address the case where the denominator in the normalization is tiny or zero, in which case gradients will explode. For this reason, we perform two normalizations: in the forward pass, we clamp the denominator with ~1e-40, but in the backward pass we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the output of this function is unit norm (unless x is very very small) while preventing exploding gradients. Args: x: The array of values to normalize. grad_eps: The value to clip the squared norm by before division in the backward pass. Returns: A normalized array x / ||x||, normalized along the last axis. """ tiny = jnp.finfo(jnp.float32).tiny grad_eps = jnp.maximum(tiny, grad_eps) denom_sq = jnp.sum(x**2, axis=-1, keepdims=True) normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq)) normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq)) # Use `normal_val` in the forward pass but `normal_grad` in the backward pass. normal = math_lib.override_gradient(normal_val, normal_grad) return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal) def compute_weighted_mae(weights, normals, normals_gt): """Compute weighted mean angular error, assuming normals are unit length.""" angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1)) return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum()) def generalized_binomial_coeff(a, k): """Compute generalized binomial coefficients.""" return np.prod(a - np.arange(k)) / math.factorial(k) def assoc_legendre_coeff(l, m, k): """Compute associated Legendre polynomial coefficients. Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the (l, m)th associated Legendre polynomial, P_l^m(cos(theta)). Args: l: associated Legendre polynomial degree. m: associated Legendre polynomial order. k: power of cos(theta). Returns: A float, the coefficient of the term corresponding to the inputs. """ return ( (-1) ** m * 2**l * math.factorial(l) / math.factorial(k) / math.factorial(l - k - m) * generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l) ) def sph_harm_coeff(l, m, k): """Compute spherical harmonic coefficients.""" return np.sqrt( (2.0 * l + 1.0) * math.factorial(l - m) / (4.0 * np.pi * math.factorial(l + m)) ) * assoc_legendre_coeff(l, m, k) def get_ml_array(deg_view): """Create a list with all pairs of (l, m) values to use in the encoding.""" ml_list = [] for i in range(deg_view): l = 2**i # Only use nonnegative m values, later splitting real and imaginary parts. for m in range(l + 1)<fim_suffix>: ml_list.append((m, l)) # Convert list into a numpy array. ml_array = np.array(ml_list).T return ml_array def generate_ide_fn(deg_view): """Generate integrated directional encoding (IDE) function. This function returns a function that computes the integrated directional encoding from Equations 6-8 of arxiv.org/abs/2112.03907. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating integrated directional encoding. Raises: ValueError: if deg_view is larger than 5. """ if deg_view > 5: raise ValueError('Only deg_view of at most 5 is numerically stable.') ml_array = get_ml_array(deg_view) l_max = 2 ** (deg_view - 1) # Create a matrix corresponding to ml_array holding all coefficients, which, # when multiplied (from the right) by the z coordinate Vandermonde matrix, # results in the z component of the encoding. mat = np.zeros((l_max + 1, ml_array.shape[1])) for i, (m, l) in enumerate(ml_array.T): for k in range(l - m + 1): mat[k, i] = sph_harm_coeff(l, m, k) def integrated_dir_enc_fn(xyz, kappa_inv): """Function returning integrated directional encoding (IDE). Args: xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at. kappa_inv: [..., 1] reciprocal of the concentration parameter of the von Mises-Fisher distribution. Returns: An array with the resulting IDE. """ x = xyz[Ellipsis, 0:1] y = xyz[Ellipsis, 1:2] z = xyz[Ellipsis, 2:3] # Compute z Vandermonde matrix. vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1) # Compute x+iy Vandermonde matrix. vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1) # Get spherical harmonics. sph_harms = vmxy * math_lib.matmul(vmz, mat) # Apply attenuation function using the von Mises-Fisher distribution # concentration parameter, kappa. sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1) ide = sph_harms * jnp.exp(-sigma * kappa_inv) # Split into real and imaginary parts and return return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1) return integrated_dir_enc_fn def generate_dir_enc_fn(deg_view): """Generate directional encoding (DE) function. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating directional encoding. """ integrated_dir_enc_fn = generate_ide_fn(deg_view) def dir_enc_fn(xyz): """Function returning directional encoding (DE).""" return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1])) return dir_enc_fn def orientation_loss(w, n, v): """Orientation loss on weights `w`, normals `n`, and -view directions `v`.""" n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1) return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1)) <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" import concurrent import enum import os import queue import threading import time from typing import Any, Callable, Iterable, Optional, TypeVar, Union from absl import logging import flax import jax from jax import random import jax.numpy as jnp import numpy as np _Array = Union[np.ndarray, jnp.ndarray] @flax.struct.dataclass class Rays: """All tensors must have the same num_dims and first n-1 dims must match. This dataclass contains spatially meaningful quantities associated with the ray that can be calculated by the function casting the ray, as well as all metadata necessary for the ray to be rendered by the Model class. """ origins: Optional[_Array] = None directions: Optional[_Array] = None viewdirs: Optional[_Array] = None radii: Optional[_Array] = None imageplane: Optional[_Array] = None pixels: Optional[_Array] = None lossmult: Optional[_Array] = None near: Optional[_Array] = None far: Optional[_Array] = None cam_idx: Optional[_Array] = None exposure_idx: Optional[_Array] = None exposure_values: Optional[_Array] = None device_idx: Optional[_Array] = None def generate_random_rays( rng, n, origin_lo, origin_hi, radius_lo, radius_hi, near_lo, near_hi, far_lo, far_hi, include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): """Generate a random Rays datastructure.""" key, rng = random.split(rng) origins = random.uniform( key, shape=[n, 3], minval=origin_lo, maxval=origin_hi ) key, rng = random.split(rng) directions = random.normal(key, shape=[n, 3]) directions /= jnp.sqrt( jnp.maximum( jnp.finfo(jnp.float32).tiny, jnp.sum(directions**2, axis=-1, keepdims=True), ) ) viewdirs = directions key, rng = random.split(rng) radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi) key, rng = random.split(rng) near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi) key, rng = random.split(rng) far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi) imageplane = jnp.zeros([n, 2]) lossmult = jnp.zeros([n, 1]) key, rng = random.split(rng) pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024) int_scalar = jnp.int32(jnp.zeros([n, 1])) exposure_kwargs = {} if include_exposure_idx: exposure_kwargs['exposure_idx'] = int_scalar if include_exposure_values: exposure_kwargs['exposure_values'] = jnp.zeros([n, 1]) if include_device_idx: exposure_kwargs['device_idx'] = int_scalar random_rays = Rays( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, pixels=pixels, lossmult=lossmult, near=near, far=far, cam_idx=int_scalar, **exposure_kwargs, ) return random_rays # Dummy Rays object that can be used to initialize NeRF model. def dummy_rays( include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): return generate_random_rays( random.PRNGKey(0), n=100, origin_lo=-1.5, origin_hi=1.5, radius_lo=1e-5, radius_hi=1e-3, near_lo=0.0, near_hi=1.0, far_lo=10, far_hi=10000, include_exposure_idx=include_exposure_idx, include_exposure_values=include_exposure_values, include_device_idx=include_device_idx, ) @flax.struct.dataclass class Batch: """Data batch for NeRF training or testing. This dataclass contains rays and also per-pixel data that is necessary for computing the loss term or evaluating metrics but NOT necessary for rendering. """ rays: Rays rgb: Optional[_Array] = None disps: Optional[_Array] = None normals: Optional[_Array] = None alphas: Optional[_Array] = None masks: Optional[_Array] = None class DataSplit(enum.Enum): """Dataset split.""" TRAIN = 'train' TEST = 'test' class BatchingMethod(enum.Enum): """Draw rays randomly from a single image or all images, in each batch.""" ALL_IMAGES = 'all_images' SINGLE_IMAGE = 'single_image' def open_file(pth, mode='r'): return open(pth, mode=mode) def file_exists(pth): return os.path.exists(pth) def listdir(pth): return os.listdir(pth) def isdir(pth): return os.path.isdir(pth) def makedirs(pth): if not file_exists(pth): os.makedirs(pth) def device_is_tpu(): return jax.local_devices()[0].platform == 'tpu' def shard(xs): """Split data into shards for multiple devices along the first dimension.""" return jax.tree_util.tree_map( lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs ) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) if padding > 0: y = y[:-padding] return y def load_npy(pth): """Load an numpy array cast to float32.""" with open_file(pth, 'rb') as f: x = np.load(f).astype(np.float32) return x def assert_valid_stepfun(t, y): """Assert that step function (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1] + 1: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a step function.' ) def assert_valid_linspline(t, y): """Assert that piecewise linear spline (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1]: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.' ) _FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]]) def iterate_in_separate_thread( queue_size = 3, ): """Decorator factory that iterates a function in a separate thread. Args: queue_size: Keep at most queue_size elements in memory. Returns: Decorator that will iterate a function in a separate thread. """ def decorator( fn, ): def result_fn(*args, **kwargs): results_queue = queue.Queue(queue_size) populating_data = True populating_data_lock = threading.Lock() def thread_fn(): # Mark has_data as a variable that's outside of thread_fn # Otherwise, `populating_data = True` creates a local variable nonlocal populating_data try: fo<fim_suffix>r item in fn(*args, **kwargs): results_queue.put(item) finally: # Set populating_data to False regardless of exceptions to stop # iterations with populating_data_lock: populating_data = False # Use executor + futures instead of Thread to propagate exceptions with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: thread_fn_future = executor.submit(thread_fn) while True: with populating_data_lock: if not populating_data and results_queue.empty(): break get_start = time.time() try: # Set timeout to allow for exceptions to be propagated. next_value = results_queue.get(timeout=1.0) except queue.Empty: continue logging.info('Got data in %0.3fs', time.time() - get_start) yield next_value # Thread exception will be raised here thread_fn_future.result() return result_fn return decorator <fim_middle>
null
FOR
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" import concurrent import enum import os import queue import threading import time from typing import Any, Callable, Iterable, Optional, TypeVar, Union from absl import logging import flax import jax from jax import random import jax.numpy as jnp import numpy as np _Array = Union[np.ndarray, jnp.ndarray] @flax.struct.dataclass class Rays: """All tensors must have the same num_dims and first n-1 dims must match. This dataclass contains spatially meaningful quantities associated with the ray that can be calculated by the function casting the ray, as well as all metadata necessary for the ray to be rendered by the Model class. """ origins: Optional[_Array] = None directions: Optional[_Array] = None viewdirs: Optional[_Array] = None radii: Optional[_Array] = None imageplane: Optional[_Array] = None pixels: Optional[_Array] = None lossmult: Optional[_Array] = None near: Optional[_Array] = None far: Optional[_Array] = None cam_idx: Optional[_Array] = None exposure_idx: Optional[_Array] = None exposure_values: Optional[_Array] = None device_idx: Optional[_Array] = None def generate_random_rays( rng, n, origin_lo, origin_hi, radius_lo, radius_hi, near_lo, near_hi, far_lo, far_hi, include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): """Generate a random Rays datastructure.""" key, rng = random.split(rng) origins = random.uniform( key, shape=[n, 3], minval=origin_lo, maxval=origin_hi ) key, rng = random.split(rng) directions = random.normal(key, shape=[n, 3]) directions /= jnp.sqrt( jnp.maximum( jnp.finfo(jnp.float32).tiny, jnp.sum(directions**2, axis=-1, keepdims=True), ) ) viewdirs = directions key, rng = random.split(rng) radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi) key, rng = random.split(rng) near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi) key, rng = random.split(rng) far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi) imageplane = jnp.zeros([n, 2]) lossmult = jnp.zeros([n, 1]) key, rng = random.split(rng) pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024) int_scalar = jnp.int32(jnp.zeros([n, 1])) exposure_kwargs = {} if include_exposure_idx: exposure_kwargs['exposure_idx'] = int_scalar if include_exposure_values: exposure_kwargs['exposure_values'] = jnp.zeros([n, 1]) if include_device_idx: exposure_kwargs['device_idx'] = int_scalar random_rays = Rays( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, pixels=pixels, lossmult=lossmult, near=near, far=far, cam_idx=int_scalar, **exposure_kwargs, ) return random_rays # Dummy Rays object that can be used to initialize NeRF model. def dummy_rays( include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): return generate_random_rays( random.PRNGKey(0), n=100, origin_lo=-1.5, origin_hi=1.5, radius_lo=1e-5, radius_hi=1e-3, near_lo=0.0, near_hi=1.0, far_lo=10, far_hi=10000, include_exposure_idx=include_exposure_idx, include_exposure_values=include_exposure_values, include_device_idx=include_device_idx, ) @flax.struct.dataclass class Batch: """Data batch for NeRF training or testing. This dataclass contains rays and also per-pixel data that is necessary for computing the loss term or evaluating metrics but NOT necessary for rendering. """ rays: Rays rgb: Optional[_Array] = None disps: Optional[_Array] = None normals: Optional[_Array] = None alphas: Optional[_Array] = None masks: Optional[_Array] = None class DataSplit(enum.Enum): """Dataset split.""" TRAIN = 'train' TEST = 'test' class BatchingMethod(enum.Enum): """Draw rays randomly from a single image or all images, in each batch.""" ALL_IMAGES = 'all_images' SINGLE_IMAGE = 'single_image' def open_file(pth, mode='r'): return open(pth, mode=mode) def file_exists(pth): return os.path.exists(pth) def listdir(pth): return os.listdir(pth) def isdir(pth): return os.path.isdir(pth) def makedirs(pth): if not file_exists(pth): os.makedirs(pth) def device_is_tpu(): return jax.local_devices()[0].platform == 'tpu' def shard(xs): """Split data into shards for multiple devices along the first dimension.""" return jax.tree_util.tree_map( lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs ) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) if padding > 0: y = y[:-padding] return y def load_npy(pth): """Load an numpy array cast to float32.""" with open_file(pth, 'rb') as f: x = np.load(f).astype(np.float32) return x def assert_valid_stepfun(t, y): """Assert that step function (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1] + 1: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a step function.' ) def assert_valid_linspline(t, y): """Assert that piecewise linear spline (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1]: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.' ) _FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]]) def iterate_in_separate_thread( queue_size = 3, ): """Decorator factory that iterates a function in a separate thread. Args: queue_size: Keep at most queue_size elements in memory. Returns: Decorator that will iterate a function in a separate thread. """ def decorator( fn, ): def re<fim_suffix>sult_fn(*args, **kwargs): results_queue = queue.Queue(queue_size) populating_data = True populating_data_lock = threading.Lock() def thread_fn(): # Mark has_data as a variable that's outside of thread_fn # Otherwise, `populating_data = True` creates a local variable nonlocal populating_data try: for item in fn(*args, **kwargs): results_queue.put(item) finally: # Set populating_data to False regardless of exceptions to stop # iterations with populating_data_lock: populating_data = False # Use executor + futures instead of Thread to propagate exceptions with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: thread_fn_future = executor.submit(thread_fn) while True: with populating_data_lock: if not populating_data and results_queue.empty(): break get_start = time.time() try: # Set timeout to allow for exceptions to be propagated. next_value = results_queue.get(timeout=1.0) except queue.Empty: continue logging.info('Got data in %0.3fs', time.time() - get_start) yield next_value # Thread exception will be raised here thread_fn_future.result() return result_fn return decorator <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" import concurrent import enum import os import queue import threading import time from typing import Any, Callable, Iterable, Optional, TypeVar, Union from absl import logging import flax import jax from jax import random import jax.numpy as jnp import numpy as np _Array = Union[np.ndarray, jnp.ndarray] @flax.struct.dataclass class Rays: """All tensors must have the same num_dims and first n-1 dims must match. This dataclass contains spatially meaningful quantities associated with the ray that can be calculated by the function casting the ray, as well as all metadata necessary for the ray to be rendered by the Model class. """ origins: Optional[_Array] = None directions: Optional[_Array] = None viewdirs: Optional[_Array] = None radii: Optional[_Array] = None imageplane: Optional[_Array] = None pixels: Optional[_Array] = None lossmult: Optional[_Array] = None near: Optional[_Array] = None far: Optional[_Array] = None cam_idx: Optional[_Array] = None exposure_idx: Optional[_Array] = None exposure_values: Optional[_Array] = None device_idx: Optional[_Array] = None def generate_random_rays( rng, n, origin_lo, origin_hi, radius_lo, radius_hi, near_lo, near_hi, far_lo, far_hi, include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): """Generate a random Rays datastructure.""" key, rng = random.split(rng) origins = random.uniform( key, shape=[n, 3], minval=origin_lo, maxval=origin_hi ) key, rng = random.split(rng) directions = random.normal(key, shape=[n, 3]) directions /= jnp.sqrt( jnp.maximum( jnp.finfo(jnp.float32).tiny, jnp.sum(directions**2, axis=-1, keepdims=True), ) ) viewdirs = directions key, rng = random.split(rng) radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi) key, rng = random.split(rng) near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi) key, rng = random.split(rng) far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi) imageplane = jnp.zeros([n, 2]) lossmult = jnp.zeros([n, 1]) key, rng = random.split(rng) pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024) int_scalar = jnp.int32(jnp.zeros([n, 1])) exposure_kwargs = {} if include_exposure_idx: exposure_kwargs['exposure_idx'] = int_scalar if include_exposure_values: exposure_kwargs['exposure_values'] = jnp.zeros([n, 1]) if include_device_idx: exposure_kwargs['device_idx'] = int_scalar random_rays = Rays( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, pixels=pixels, lossmult=lossmult, near=near, far=far, cam_idx=int_scalar, **exposure_kwargs, ) return random_rays # Dummy Rays object that can be used to initialize NeRF model. def dummy_rays( include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): return generate_random_rays( random.PRNGKey(0), n=100, origin_lo=-1.5, origin_hi=1.5, radius_lo=1e-5, radius_hi=1e-3, near_lo=0.0, near_hi=1.0, far_lo=10, far_hi=10000, include_exposure_idx=include_exposure_idx, include_exposure_values=include_exposure_values, include_device_idx=include_device_idx, ) @flax.struct.dataclass class Batch: """Data batch for NeRF training or testing. This dataclass contains rays and also per-pixel data that is necessary for computing the loss term or evaluating metrics but NOT necessary for rendering. """ rays: Rays rgb: Optional[_Array] = None disps: Optional[_Array] = None normals: Optional[_Array] = None alphas: Optional[_Array] = None masks: Optional[_Array] = None class DataSplit(enum.Enum): """Dataset split.""" TRAIN = 'train' TEST = 'test' class BatchingMethod(enum.Enum): """Draw rays randomly from a single image or all images, in each batch.""" ALL_IMAGES = 'all_images' SINGLE_IMAGE = 'single_image' def open_file(pth, mode='r'): return open(pth, mode=mode) def file_exists(pth): return os.path.exists(pth) def listdir(pth): return os.listdir(pth) def isdir(pth): return os.path.isdir(pth) def makedirs(pth): if not file_exists(pth): os.makedirs(pth) def device_is_tpu(): return jax.local_devices()[0].platform == 'tpu' def shard(xs): """Split data into shards for multiple devices along the first dimension.""" return jax.tree_util.tree_map( lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs ) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) if padding > 0: y = y[:-padding] return y def load_npy(pth): """Load an numpy array cast to float32.""" with open_file(pth, 'rb') as f: x = np.load(f).astype(np.float32) return x def assert_valid_stepfun(t, y): """Assert that step function (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1] + 1: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a step function.' ) def assert_valid_linspline(t, y): """Assert that piecewise linear spline (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1]: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.' ) _FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]]) def iterate_in_separate_thread( queue_size = 3, ): """Decorator factory that iterates a function in a separate thread. Args: queue_size: Keep at most queue_size elements in memory. Returns: Decorator that will iterate a function in a separate thread. """ def dec<fim_suffix>orator( fn, ): def result_fn(*args, **kwargs): results_queue = queue.Queue(queue_size) populating_data = True populating_data_lock = threading.Lock() def thread_fn(): # Mark has_data as a variable that's outside of thread_fn # Otherwise, `populating_data = True` creates a local variable nonlocal populating_data try: for item in fn(*args, **kwargs): results_queue.put(item) finally: # Set populating_data to False regardless of exceptions to stop # iterations with populating_data_lock: populating_data = False # Use executor + futures instead of Thread to propagate exceptions with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: thread_fn_future = executor.submit(thread_fn) while True: with populating_data_lock: if not populating_data and results_queue.empty(): break get_start = time.time() try: # Set timeout to allow for exceptions to be propagated. next_value = results_queue.get(timeout=1.0) except queue.Empty: continue logging.info('Got data in %0.3fs', time.time() - get_start) yield next_value # Thread exception will be raised here thread_fn_future.result() return result_fn return decorator <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for reflection directions and directional encodings.""" import math from internal import math as math_lib import jax.numpy as jnp import numpy as np def reflect(viewdirs, normals): """Reflect view directions about normals. The reflection of a vector v about a unit vector n is a vector u such that dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two equations is u = 2 dot(n, v) n - v. Args: viewdirs: [..., 3] array of view directions. normals: [..., 3] array of normal directions (assumed to be unit vectors). Returns: [..., 3] array of reflection directions. """ return ( 2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals - viewdirs ) def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps): """Normalize x to unit length along last axis. Normalizing vectors is surprisingly tricky, because you have to address the case where the denominator in the normalization is tiny or zero, in which case gradients will explode. For this reason, we perform two normalizations: in the forward pass, we clamp the denominator with ~1e-40, but in the backward pass we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the output of this function is unit norm (unless x is very very small) while preventing exploding gradients. Args: x: The array of values to normalize. grad_eps: The value to clip the squared norm by before division in the backward pass. Returns: A normalized array x / ||x||, normalized along the last axis. """ tiny = jnp.finfo(jnp.float32).tiny grad_eps = jnp.maximum(tiny, grad_eps) denom_sq = jnp.sum(x**2, axis=-1, keepdims=True) normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq)) normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq)) # Use `normal_val` in the forward pass but `normal_grad` in the backward pass. normal = math_lib.override_gradient(normal_val, normal_grad) return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal) def compute_weighted_mae(weights, normals, normals_gt): """Compute weighted mean angular error, assuming normals are unit length.""" angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1)) return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum()) def generalized_binomial_coeff(a, k): """Compute generalized binomial coefficients.""" return np.prod(a - np.arange(k)) / math.factorial(k) def assoc_legendre_coeff(l, m, k): """Compute associated Legendre polynomial coefficients. Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the (l, m)th associated Legendre polynomial, P_l^m(cos(theta)). Args: l: associated Legendre polynomial degree. m: associated Legendre polynomial order. k: power of cos(theta). Returns: A float, the coefficient of the term corresponding to the inputs. """ return ( (-1) ** m * 2**l * math.factorial(l) / math.factorial(k) / math.factorial(l - k - m) * generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l) ) def sph_harm_coeff(l, m, k): """Compute spherical harmonic coefficients.""" return np.sqrt( (2.0 * l + 1.0) * math.factorial(l - m) / (4.0 * np.pi * math.factorial(l + m)) ) * assoc_legendre_coeff(l, m, k) def get_ml_array(deg_view): """Create a list with all pairs of (l, m) values to use in the encoding.""" ml_list = [] for i in range(deg_view): l = 2**i # Only use nonnegative m values, later splitting real and imaginary parts. for m in range(l + 1): ml_list.append((m, l)) # Convert list into a numpy array. ml_array = np.array(ml_list).T return ml_array def generate_ide_fn(deg_view): """Generate integrated directional encoding (IDE) function. This function returns a function that computes the integrated directional encoding from Equations 6-8 of arxiv.org/abs/2112.03907. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating integrated directional encoding. Raises: ValueError: if deg_view is larger than 5. """ if deg_view > 5: raise ValueError('Only deg_view of at most 5 is numerically stable.') ml_array = get_ml_array(deg_view) l_max = 2 ** (deg_view - 1) # Create a matrix corresponding to ml_array holding all coefficients, which, # when multiplied (from the right) by the z coordinate Vandermonde matrix, # results in the z component of the encoding. mat = np.zeros((l_max + 1, ml_array.shape[1])) for i, (m, l) in enumerate(ml_array.T): for k in range(l - m + 1): mat[k, i] = sph_harm_coeff(l, m, k) def integrated_dir_enc_fn(xyz, kappa_inv): """Function returning integrated directional encoding (IDE). Args: xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at. kappa_inv: [..., 1] reciprocal of the concentration parameter of the von Mises-Fisher distribution. Returns: An array with the resulting IDE. """ x = xyz[Ellipsis, 0:1] y = xyz[Ellipsis, 1:2] z = xyz[Ellipsis, 2:3] # Compute z Vandermonde matrix. vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1) # Compute x+iy Vandermonde matrix. vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1) # Get spherical harmonics. sph_harms = vmxy * math_lib.matmul(vmz, mat) # Apply attenuation function using the von Mises-Fisher distribution # concentration parameter, kappa. sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1) ide = sph_harms * jnp.exp(-sigma * kappa_inv) # Split into real and imaginary parts and return return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1) return integrated_dir_enc_fn def generate_dir_enc_fn(deg_view): """Generate directional encoding (DE) function. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating directional encoding. """ integrated_dir_enc_fn = generate_ide_fn(deg_view) def dir_enc_<fim_suffix>fn(xyz): """Function returning directional encoding (DE).""" return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1])) return dir_enc_fn def orientation_loss(w, n, v): """Orientation loss on weights `w`, normals `n`, and -view directions `v`.""" n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1) return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1)) <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/ref_utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Functions for reflection directions and directional encodings.""" import math from internal import math as math_lib import jax.numpy as jnp import numpy as np def reflect(viewdirs, normals): """Reflect view directions about normals. The reflection of a vector v about a unit vector n is a vector u such that dot(v, n) = dot(u, n), and dot(u, u) = dot(v, v). The solution to these two equations is u = 2 dot(n, v) n - v. Args: viewdirs: [..., 3] array of view directions. normals: [..., 3] array of normal directions (assumed to be unit vectors). Returns: [..., 3] array of reflection directions. """ return ( 2.0 * jnp.sum(normals * viewdirs, axis=-1, keepdims=True) * normals - viewdirs ) def l2_normalize(x, grad_eps=jnp.finfo(jnp.float32).eps): """Normalize x to unit length along last axis. Normalizing vectors is surprisingly tricky, because you have to address the case where the denominator in the normalization is tiny or zero, in which case gradients will explode. For this reason, we perform two normalizations: in the forward pass, we clamp the denominator with ~1e-40, but in the backward pass we clamp with `grad_eps`, which defaults to ~1e-7. This guarantees that the output of this function is unit norm (unless x is very very small) while preventing exploding gradients. Args: x: The array of values to normalize. grad_eps: The value to clip the squared norm by before division in the backward pass. Returns: A normalized array x / ||x||, normalized along the last axis. """ tiny = jnp.finfo(jnp.float32).tiny grad_eps = jnp.maximum(tiny, grad_eps) denom_sq = jnp.sum(x**2, axis=-1, keepdims=True) normal_val = x / jnp.sqrt(jnp.maximum(tiny, denom_sq)) normal_grad = x / jnp.sqrt(jnp.maximum(grad_eps, denom_sq)) # Use `normal_val` in the forward pass but `normal_grad` in the backward pass. normal = math_lib.override_gradient(normal_val, normal_grad) return jnp.where(denom_sq < tiny, jnp.zeros_like(normal), normal) def compute_weighted_mae(weights, normals, normals_gt): """Compute weighted mean angular error, assuming normals are unit length.""" angles = math_lib.safe_arccos((normals * normals_gt).sum(axis=-1)) return (180.0 / jnp.pi) * ((weights * angles).sum() / weights.sum()) def generalized_binomial_coeff(a, k): """Compute generalized binomial coefficients.""" return np.prod(a - np.arange(k)) / math.factorial(k) def assoc_legendre_coeff(l, m, k): """Compute associated Legendre polynomial coefficients. Returns the coefficient of the cos^k(theta)*sin^m(theta) term in the (l, m)th associated Legendre polynomial, P_l^m(cos(theta)). Args: l: associated Legendre polynomial degree. m: associated Legendre polynomial order. k: power of cos(theta). Returns: A float, the coefficient of the term corresponding to the inputs. """ return ( (-1) ** m * 2**l * math.factorial(l) / math.factorial(k) / math.factorial(l - k - m) * generalized_binomial_coeff(0.5 * (l + k + m - 1.0), l) ) def sph_harm_coeff(l, m, k): """Compute spherical harmonic coefficients.""" return np.sqrt( (2.0 * l + 1.0) * math.factorial(l - m) / (4.0 * np.pi * math.factorial(l + m)) ) * assoc_legendre_coeff(l, m, k) def get_ml_array(deg_view): """Create a list with all pairs of (l, m) values to use in the encoding.""" ml_list = [] for i in range(deg_view): l = 2**i # Only use nonnegative m values, later splitting real and imaginary parts. for m in range(l + 1): ml_list.append((m, l)) # Convert list into a numpy array. ml_array = np.array(ml_list).T return ml_array def generate_ide_fn(deg_view): """Generate integrated directional encoding (IDE) function. This function returns a function that computes the integrated directional encoding from Equations 6-8 of arxiv.org/abs/2112.03907. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating integrated directional encoding. Raises: ValueError: if deg_view is larger than 5. """ if deg_view > 5: raise ValueError('Only deg_view of at most 5 is numerically stable.') ml_array = get_ml_array(deg_view) l_max = 2 ** (deg_view - 1) # Create a matrix corresponding to ml_array holding all coefficients, which, # when multiplied (from the right) by the z coordinate Vandermonde matrix, # results in the z component of the encoding. mat = np.zeros((l_max + 1, ml_array.shape[1])) for i, (m, l) in enumerate(ml_array.T): for k in range(l - m + 1): mat[k, i] = sph_harm_coeff(l, m, k) def integrated_dir_en<fim_suffix>c_fn(xyz, kappa_inv): """Function returning integrated directional encoding (IDE). Args: xyz: [..., 3] array of Cartesian coordinates of directions to evaluate at. kappa_inv: [..., 1] reciprocal of the concentration parameter of the von Mises-Fisher distribution. Returns: An array with the resulting IDE. """ x = xyz[Ellipsis, 0:1] y = xyz[Ellipsis, 1:2] z = xyz[Ellipsis, 2:3] # Compute z Vandermonde matrix. vmz = jnp.concatenate([z**i for i in range(mat.shape[0])], axis=-1) # Compute x+iy Vandermonde matrix. vmxy = jnp.concatenate([(x + 1j * y) ** m for m in ml_array[0, :]], axis=-1) # Get spherical harmonics. sph_harms = vmxy * math_lib.matmul(vmz, mat) # Apply attenuation function using the von Mises-Fisher distribution # concentration parameter, kappa. sigma = 0.5 * ml_array[1, :] * (ml_array[1, :] + 1) ide = sph_harms * jnp.exp(-sigma * kappa_inv) # Split into real and imaginary parts and return return jnp.concatenate([jnp.real(ide), jnp.imag(ide)], axis=-1) return integrated_dir_enc_fn def generate_dir_enc_fn(deg_view): """Generate directional encoding (DE) function. Args: deg_view: number of spherical harmonics degrees to use. Returns: A function for evaluating directional encoding. """ integrated_dir_enc_fn = generate_ide_fn(deg_view) def dir_enc_fn(xyz): """Function returning directional encoding (DE).""" return integrated_dir_enc_fn(xyz, jnp.zeros_like(xyz[Ellipsis, :1])) return dir_enc_fn def orientation_loss(w, n, v): """Orientation loss on weights `w`, normals `n`, and -view directions `v`.""" n_dot_v = (n * v[Ellipsis, None, :]).sum(axis=-1) return jnp.mean((w * jnp.minimum(0.0, n_dot_v) ** 2).sum(axis=-1)) <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" import concurrent import enum import os import queue import threading import time from typing import Any, Callable, Iterable, Optional, TypeVar, Union from absl import logging import flax import jax from jax import random import jax.numpy as jnp import numpy as np _Array = Union[np.ndarray, jnp.ndarray] @flax.struct.dataclass class Rays: """All tensors must have the same num_dims and first n-1 dims must match. This dataclass contains spatially meaningful quantities associated with the ray that can be calculated by the function casting the ray, as well as all metadata necessary for the ray to be rendered by the Model class. """ origins: Optional[_Array] = None directions: Optional[_Array] = None viewdirs: Optional[_Array] = None radii: Optional[_Array] = None imageplane: Optional[_Array] = None pixels: Optional[_Array] = None lossmult: Optional[_Array] = None near: Optional[_Array] = None far: Optional[_Array] = None cam_idx: Optional[_Array] = None exposure_idx: Optional[_Array] = None exposure_values: Optional[_Array] = None device_idx: Optional[_Array] = None def generate_random_rays( rng, n, origin_lo, origin_hi, radius_lo, radius_hi, near_lo, near_hi, far_lo, far_hi, include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): """Generate a random Rays datastructure.""" key, rng = random.split(rng) origins = random.uniform( key, shape=[n, 3], minval=origin_lo, maxval=origin_hi ) key, rng = random.split(rng) directions = random.normal(key, shape=[n, 3]) directions /= jnp.sqrt( jnp.maximum( jnp.finfo(jnp.float32).tiny, jnp.sum(directions**2, axis=-1, keepdims=True), ) ) viewdirs = directions key, rng = random.split(rng) radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi) key, rng = random.split(rng) near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi) key, rng = random.split(rng) far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi) imageplane = jnp.zeros([n, 2]) lossmult = jnp.zeros([n, 1]) key, rng = random.split(rng) pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024) int_scalar = jnp.int32(jnp.zeros([n, 1])) exposure_kwargs = {} if include_exposure_idx: exposure_kwargs['exposure_idx'] = int_scalar if include_exposure_values: exposure_kwargs['exposure_values'] = jnp.zeros([n, 1]) if include_device_idx: exposure_kwargs['device_idx'] = int_scalar random_rays = Rays( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, pixels=pixels, lossmult=lossmult, near=near, far=far, cam_idx=int_scalar, **exposure_kwargs, ) return random_rays # Dummy Rays object that can be used to initialize NeRF model. def dummy_rays( include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): return generate_random_rays( random.PRNGKey(0), n=100, origin_lo=-1.5, origin_hi=1.5, radius_lo=1e-5, radius_hi=1e-3, near_lo=0.0, near_hi=1.0, far_lo=10, far_hi=10000, include_exposure_idx=include_exposure_idx, include_exposure_values=include_exposure_values, include_device_idx=include_device_idx, ) @flax.struct.dataclass class Batch: """Data batch for NeRF training or testing. This dataclass contains rays and also per-pixel data that is necessary for computing the loss term or evaluating metrics but NOT necessary for rendering. """ rays: Rays rgb: Optional[_Array] = None disps: Optional[_Array] = None normals: Optional[_Array] = None alphas: Optional[_Array] = None masks: Optional[_Array] = None class DataSplit(enum.Enum): """Dataset split.""" TRAIN = 'train' TEST = 'test' class BatchingMethod(enum.Enum): """Draw rays randomly from a single image or all images, in each batch.""" ALL_IMAGES = 'all_images' SINGLE_IMAGE = 'single_image' def open_file(pth, mode='r'): return open(pth, mode=mode) def file_exists(pth): return os.path.exists(pth) def listdir(pth): return os.listdir(pth) def isdir(pth): return os.path.isdir(pth) def makedirs(pth): if not file_exists(pth): os.makedirs(pth) def device_is_tpu(): return jax.local_devices()[0].platform == 'tpu' def shard(xs): """Split data into shards for multiple devices along the first dimension.""" return jax.tree_util.tree_map( lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs ) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) if padding > 0: y = y[:-padding] return y def load_npy(pth): """Load an numpy array cast to float32.""" with open_file(pth, 'rb') as f: x = np.load(f).astype(np.float32) return x def assert_valid_stepfun(t, y): """Assert that step function (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1] + 1: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a step function.' ) def assert_valid_linspline(t, y): """Assert that piecewise linear spline (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1]: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.' ) _FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]]) def iterate_in_separate_thread( queue_size = 3, ): """Decorator factory that iterates a function in a separate thread. Args: queue_size: Keep at most queue_size elements in memory. Returns: Decorator that will iterate a function in a separate thread. """ def decorator( fn, ): def result_fn(*args, **kwargs): results_queue = queue.Queue(queue_size) populating_data = True populating_data_lock = threading.Lock() def thread_fn()<fim_suffix>: # Mark has_data as a variable that's outside of thread_fn # Otherwise, `populating_data = True` creates a local variable nonlocal populating_data try: for item in fn(*args, **kwargs): results_queue.put(item) finally: # Set populating_data to False regardless of exceptions to stop # iterations with populating_data_lock: populating_data = False # Use executor + futures instead of Thread to propagate exceptions with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: thread_fn_future = executor.submit(thread_fn) while True: with populating_data_lock: if not populating_data and results_queue.empty(): break get_start = time.time() try: # Set timeout to allow for exceptions to be propagated. next_value = results_queue.get(timeout=1.0) except queue.Empty: continue logging.info('Got data in %0.3fs', time.time() - get_start) yield next_value # Thread exception will be raised here thread_fn_future.result() return result_fn return decorator <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/math.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def saf<fim_suffix>e_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangents): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow)))) <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" import concurrent import enum import os import queue import threading import time from typing import Any, Callable, Iterable, Optional, TypeVar, Union from absl import logging import flax import jax from jax import random import jax.numpy as jnp import numpy as np _Array = Union[np.ndarray, jnp.ndarray] @flax.struct.dataclass class Rays: """All tensors must have the same num_dims and first n-1 dims must match. This dataclass contains spatially meaningful quantities associated with the ray that can be calculated by the function casting the ray, as well as all metadata necessary for the ray to be rendered by the Model class. """ origins: Optional[_Array] = None directions: Optional[_Array] = None viewdirs: Optional[_Array] = None radii: Optional[_Array] = None imageplane: Optional[_Array] = None pixels: Optional[_Array] = None lossmult: Optional[_Array] = None near: Optional[_Array] = None far: Optional[_Array] = None cam_idx: Optional[_Array] = None exposure_idx: Optional[_Array] = None exposure_values: Optional[_Array] = None device_idx: Optional[_Array] = None def generate_random_rays( rng, n, origin_lo, origin_hi, radius_lo, radius_hi, near_lo, near_hi, far_lo, far_hi, include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): """Generate a random Rays datastructure.""" key, rng = random.split(rng) origins = random.uniform( key, shape=[n, 3], minval=origin_lo, maxval=origin_hi ) key, rng = random.split(rng) directions = random.normal(key, shape=[n, 3]) directions /= jnp.sqrt( jnp.maximum( jnp.finfo(jnp.float32).tiny, jnp.sum(directions**2, axis=-1, keepdims=True), ) ) viewdirs = directions key, rng = random.split(rng) radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi) key, rng = random.split(rng) near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi) key, rng = random.split(rng) far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi) imageplane = jnp.zeros([n, 2]) lossmult = jnp.zeros([n, 1]) key, rng = random.split(rng) pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024) int_scalar = jnp.int32(jnp.zeros([n, 1])) exposure_kwargs = {} if include_exposure_idx: exposure_kwargs['exposure_idx'] = int_scalar if include_exposure_values: exposure_kwargs['exposure_values'] = jnp.zeros([n, 1]) if include_device_idx: exposure_kwargs['device_idx'] = int_scalar random_rays = Rays( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, pixels=pixels, lossmult=lossmult, near=near, far=far, cam_idx=int_scalar, **exposure_kwargs, ) return random_rays # Dummy Rays object that can be used to initialize NeRF model. def dummy_rays( include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): return generate_random_rays( random.PRNGKey(0), n=100, origin_lo=-1.5, origin_hi=1.5, radius_lo=1e-5, radius_hi=1e-3, near_lo=0.0, near_hi=1.0, far_lo=10, far_hi=10000, include_exposure_idx=include_exposure_idx, include_exposure_values=include_exposure_values, include_device_idx=include_device_idx, ) @flax.struct.dataclass class Batch: """Data batch for NeRF training or testing. This dataclass contains rays and also per-pixel data that is necessary for computing the loss term or evaluating metrics but NOT necessary for rendering. """ rays: Rays rgb: Optional[_Array] = None disps: Optional[_Array] = None normals: Optional[_Array] = None alphas: Optional[_Array] = None masks: Optional[_Array] = None class DataSplit(enum.Enum): """Dataset split.""" TRAIN = 'train' TEST = 'test' class BatchingMethod(enum.Enum): """Draw rays randomly from a single image or all images, in each batch.""" ALL_IMAGES = 'all_images' SINGLE_IMAGE = 'single_image' def open_file(pth, mode='r'): return open(pth, mode=mode) def file_exists(pth): return os.path.exists(pth) def listdir(pth): return os.listdir(pth) def isdir(pth): return os.path.isdir(pth) def makedirs(pth): if not file_exists(pth): os.makedirs(pth) def device_is_tpu(): return jax.local_devices()[0].platform == 'tpu' def shard(xs): """Split data into shards for multiple devices along the first dimension.""" return jax.tree_util.tree_map( lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs ) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) if padding > 0: y = y[:-padding] return y def load_npy(pth): """Load an numpy array cast to float32.""" with open_file(pth, 'rb') as f: x = np.load(f).astype(np.float32) return x def assert_valid_stepfun(t, y): """Assert that step function (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1] + 1: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a step function.' ) def assert_valid_linspline(t, y): """Assert that piecewise linear spline (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1]: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.' ) _FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]]) def iterate_in_separate_thread( queue_size = 3, ): """Decorator factory that iterates a function in a separate thread. Args: queue_size: Keep at most queue_size elements in memory. Returns: Decorator that will iterate a function in a separate thread. """ def decorator( fn, ): def result_fn(*args, **kwargs): results_queue = queue.Queue(queue_size) populating_data = True populating_data_lock = threading.Lock() def thre<fim_suffix>ad_fn(): # Mark has_data as a variable that's outside of thread_fn # Otherwise, `populating_data = True` creates a local variable nonlocal populating_data try: for item in fn(*args, **kwargs): results_queue.put(item) finally: # Set populating_data to False regardless of exceptions to stop # iterations with populating_data_lock: populating_data = False # Use executor + futures instead of Thread to propagate exceptions with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: thread_fn_future = executor.submit(thread_fn) while True: with populating_data_lock: if not populating_data and results_queue.empty(): break get_start = time.time() try: # Set timeout to allow for exceptions to be propagated. next_value = results_queue.get(timeout=1.0) except queue.Empty: continue logging.info('Got data in %0.3fs', time.time() - get_start) yield next_value # Thread exception will be raised here thread_fn_future.result() return result_fn return decorator <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" import concurrent import enum import os import queue import threading import time from typing import Any, Callable, Iterable, Optional, TypeVar, Union from absl import logging import flax import jax from jax import random import jax.numpy as jnp import numpy as np _Array = Union[np.ndarray, jnp.ndarray] @flax.struct.dataclass class Rays: """All tensors must have the same num_dims and first n-1 dims must match. This dataclass contains spatially meaningful quantities associated with the ray that can be calculated by the function casting the ray, as well as all metadata necessary for the ray to be rendered by the Model class. """ origins: Optional[_Array] = None directions: Optional[_Array] = None viewdirs: Optional[_Array] = None radii: Optional[_Array] = None imageplane: Optional[_Array] = None pixels: Optional[_Array] = None lossmult: Optional[_Array] = None near: Optional[_Array] = None far: Optional[_Array] = None cam_idx: Optional[_Array] = None exposure_idx: Optional[_Array] = None exposure_values: Optional[_Array] = None device_idx: Optional[_Array] = None def generate_random_rays( rng, n, origin_lo, origin_hi, radius_lo, radius_hi, near_lo, near_hi, far_lo, far_hi, include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): """Generate a random Rays datastructure.""" key, rng = random.split(rng) origins = random.uniform( key, shape=[n, 3], minval=origin_lo, maxval=origin_hi ) key, rng = random.split(rng) directions = random.normal(key, shape=[n, 3]) directions /= jnp.sqrt( jnp.maximum( jnp.finfo(jnp.float32).tiny, jnp.sum(directions**2, axis=-1, keepdims=True), ) ) viewdirs = directions key, rng = random.split(rng) radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi) key, rng = random.split(rng) near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi) key, rng = random.split(rng) far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi) imageplane = jnp.zeros([n, 2]) lossmult = jnp.zeros([n, 1]) key, rng = random.split(rng) pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024) int_scalar = jnp.int32(jnp.zeros([n, 1])) exposure_kwargs = {} if include_exposure_idx: exposure_kwargs['exposure_idx'] = int_scalar if include_exposure_values: exposure_kwargs['exposure_values'] = jnp.zeros([n, 1]) if include_device_idx: exposure_kwargs['device_idx'] = int_scalar random_rays = Rays( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, pixels=pixels, lossmult=lossmult, near=near, far=far, cam_idx=int_scalar, **exposure_kwargs, ) return random_rays # Dummy Rays object that can be used to initialize NeRF model. def dummy_rays( include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): return generate_random_rays( random.PRNGKey(0), n=100, origin_lo=-1.5, origin_hi=1.5, radius_lo=1e-5, radius_hi=1e-3, near_lo=0.0, near_hi=1.0, far_lo=10, far_hi=10000, include_exposure_idx=include_exposure_idx, include_exposure_values=include_exposure_values, include_device_idx=include_device_idx, ) @flax.struct.dataclass class Batch: """Data batch for NeRF training or testing. This dataclass contains rays and also per-pixel data that is necessary for computing the loss term or evaluating metrics but NOT necessary for rendering. """ rays: Rays rgb: Optional[_Array] = None disps: Optional[_Array] = None normals: Optional[_Array] = None alphas: Optional[_Array] = None masks: Optional[_Array] = None class DataSplit(enum.Enum): """Dataset split.""" TRAIN = 'train' TEST = 'test' class BatchingMethod(enum.Enum): """Draw rays randomly from a single image or all images, in each batch.""" ALL_IMAGES = 'all_images' SINGLE_IMAGE = 'single_image' def open_file(pth, mode='r'): return open(pth, mode=mode) def file_exists(pth): return os.path.exists(pth) def listdir(pth): return os.listdir(pth) def isdir(pth): return os.path.isdir(pth) def makedirs(pth): if not file_exists(pth): os.makedirs(pth) def device_is_tpu(): return jax.local_devices()[0].platform == 'tpu' def shard(xs): """Split data into shards for multiple devices along the first dimension.""" return jax.tree_util.tree_map( lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs ) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) if padding > 0: y = y[:-padding] return y def load_npy(pth): """Load an numpy array cast to float32.""" with open_file(pth, 'rb') as f: x = np.load(f).astype(np.float32) return x def assert_valid_stepfun(t, y): """Assert that step function (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1] + 1: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a step function.' ) def assert_valid_linspline(t, y): """Assert that piecewise linear spline (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1]: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.' ) _FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]]) def iterate_in_separate_thread( queue_size = 3, ): """Decorator factory that iterates a function in a separate thread. Args: queue_size: Keep at most queue_size elements in memory. Returns: Decorator that will iterate a function in a separate thread. """ def decorator( fn, ): def result_fn(*args, **kwargs): results_queue = queue.Queue(queue_size) populating_data = True populating_data_lock = threading.Lock() def thread_f<fim_suffix>n(): # Mark has_data as a variable that's outside of thread_fn # Otherwise, `populating_data = True` creates a local variable nonlocal populating_data try: for item in fn(*args, **kwargs): results_queue.put(item) finally: # Set populating_data to False regardless of exceptions to stop # iterations with populating_data_lock: populating_data = False # Use executor + futures instead of Thread to propagate exceptions with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: thread_fn_future = executor.submit(thread_fn) while True: with populating_data_lock: if not populating_data and results_queue.empty(): break get_start = time.time() try: # Set timeout to allow for exceptions to be propagated. next_value = results_queue.get(timeout=1.0) except queue.Empty: continue logging.info('Got data in %0.3fs', time.time() - get_start) yield next_value # Thread exception will be raised here thread_fn_future.result() return result_fn return decorator <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/math.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mathy utility functions.""" import functools import jax import jax.numpy as jnp import numpy as np tiny_val = np.float32(np.finfo(np.float32).tiny) min_val = np.float32(np.finfo(np.float32).min) max_val = np.float32(np.finfo(np.float32).max) def laplace_cdf(x, beta): alpha = 1 / beta return alpha * (0.5 + 0.5 * safe_sign(x) * (jnp.exp(-jnp.abs(x) / beta) - 1)) def scaled_softplus(x, scale=100.0): return (1.0 / scale) * jax.nn.softplus(scale * x) def matmul(a, b): """jnp.matmul defaults to bfloat16, but this helper function doesn't.""" return jnp.matmul(a, b, precision=jax.lax.Precision.HIGHEST) def unstack(x, axis=0): return tuple( jnp.squeeze(z, axis=axis) for z in jnp.split(x, x.shape[axis], axis=axis) ) @jax.custom_jvp def plus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, tiny_val, jnp.nextafter(jnp.float32(x), jnp.inf) ) @jax.custom_jvp def minus_eps(x): return jnp.where( jnp.abs(x) < tiny_val, -tiny_val, jnp.nextafter(jnp.float32(x), -jnp.inf) ) @plus_eps.defjvp def plus_eps_jvp(primals, tangents): """Make plus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return plus_eps(*primals), tangents[0] @minus_eps.defjvp def minus_eps_jvp(primals, tangents): """Make minus_eps()'s gradient a no-op (nextafter's gradient is undefined).""" return minus_eps(*primals), tangents[0] @jax.custom_jvp def expm1(x): """jnp.expm1() has inaccurate gradients when x << 0, this doesn't.""" return jnp.expm1(x) @expm1.defjvp def expm1_jvp(primals, tangents): return expm1(*primals), tangents[0] * jnp.exp(primals[0]) def safe_trig_helper(x, fn, t=100 * jnp.pi): """Helper function used by safe_cos/safe_sin: mods x before sin()/cos().""" return fn(jnp.nan_to_num(jnp.where(jnp.abs(x) < t, x, x % t))) def safe_cos(x): """jnp.cos() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.cos) def safe_sin(x): """jnp.sin() on a TPU may NaN out for large values.""" return safe_trig_helper(x, jnp.sin) @jax.custom_vjp def safe_arctan2(x1, x2): return safe_arctan2_fwd(x1, x2)[0] def safe_arctan2_fwd(x1, x2): return jnp.arctan2(x1, x2), (x1, x2) def safe_arctan2_bwd(res, g): x1, x2 = res denom = remove_zero(x1**2 + x2**2) d1 = g * (x2 / denom) d2 = g * (-x1 / denom) return d1, d2 safe_arctan2.defvjp(safe_arctan2_fwd, safe_arctan2_bwd) def generate_clip_nograd_fn(a_min, a_max): """Generates a function that clips to [a_min, a_max] with no grad effects.""" @jax.custom_jvp def clip_nograd(a): """Clamps `a` from above and below.""" return jnp.clip(a, a_min, a_max) @clip_nograd.defjvp def clip_nograd_jvp(primals, tangents): """Override clips()'s gradient to be a no-op.""" return clip_nograd(primals[0]), tangents[0] return clip_nograd clip_finite_nograd = generate_clip_nograd_fn(min_val, max_val) clip_pos_finite_nograd = generate_clip_nograd_fn(tiny_val, max_val) def clip_pos(x): """Clamps `x` from below to be positive.""" return jnp.maximum(tiny_val, x) def safe_sign(x): """jnp.sign(x) except x=0 is assumed to have a sign of +1, not 0.""" return jnp.where(x < 0, -1, +1) def remove_zero(x): """Shifts `x` away from 0.""" return jnp.where(jnp.abs(x) < tiny_val, tiny_val, x) def clip_finite(x): return jnp.clip(x, min_val, max_val) @jax.custom_vjp def safe_div(n, d): """Divide `n` by `d` but the value and gradient never nan out.""" return safe_div_fwd(n, d)[0] def safe_div_fwd(n, d): r = jnp.clip(n / remove_zero(d), min_val, max_val) return jnp.where(jnp.abs(d) < tiny_val, 0, r), (d, r) def safe_div_bwd(res, g): d, r = res dn = jnp.clip(g / remove_zero(d), min_val, max_val) dd = jnp.clip(-g * r / remove_zero(d), min_val, max_val) return dn, dd safe_div.defvjp(safe_div_fwd, safe_div_bwd) def generate_safe_fn(fn, grad_fn, x_range): """Generate's a `safe` fn() where inputs are clipped in fwd and bwd passes.""" @jax.custom_jvp def safe_fn(x): """fn() with clipped inputs.""" return fn(jnp.clip(x, *x_range)) @safe_fn.defjvp def safe_fn_jvp(primals, tangen<fim_suffix>ts): """Backpropagate using the gradient and clipped inputs.""" (x,) = primals (x_dot,) = tangents y = safe_fn(x) y_dot = grad_fn(jnp.clip(x, *x_range), y, x_dot) return y, y_dot return safe_fn # These safe_* functions need to be wrapped in no-op function definitions for # gin to recognize them, otherwise they could just be calls to generate_safe_fn. def safe_log(x): return generate_safe_fn( jnp.log, lambda x, _, x_dot: x_dot / x, (tiny_val, max_val), )(x) def safe_exp(x): return generate_safe_fn( jnp.exp, lambda _, y, x_dot: y * x_dot, (min_val, np.nextafter(np.log(max_val), np.float32(0))), )(x) def safe_sqrt(x): return generate_safe_fn( jnp.sqrt, lambda x, _, x_dot: 0.5 * x_dot / jnp.sqrt(jnp.maximum(tiny_val, x)), (0, max_val), )(x) def safe_log1p(x): return generate_safe_fn( jnp.log1p, lambda x, _, x_dot: x_dot / (1 + x), (np.nextafter(np.float32(-1), np.float32(0)), max_val), )(x) def safe_expm1(x): return generate_safe_fn( expm1, # Note that we wrap around our more accurate expm1. lambda x, _, x_dot: jnp.exp(x) * x_dot, (min_val, np.nextafter(np.log1p(max_val), np.float32(0))), )(x) def safe_arccos(x): """jnp.arccos(x) where x is clipped to [-1, 1].""" y = jnp.arccos(jnp.clip(x, plus_eps(-1), minus_eps(1))) return jnp.where(x >= 1, 0, jnp.where(x <= -1, jnp.pi, y)) def apply_fn_to_grad(grad_fn): """Applies a scalar `grad_fn` function to the gradient of the input.""" @jax.custom_vjp def fn_out(x): return x fn_out.defvjp(lambda x: (x, None), lambda _, y: (grad_fn(y),)) return fn_out def select(cond_pairs, default): """A helpful wrapper around jnp.select() that is easier to read.""" return jnp.select(*zip(*cond_pairs), default) def power_ladder_max_output(p): """The limit of power_ladder(x, p) as x goes to infinity.""" return select( [ (p == -jnp.inf, 1), (p >= 0, jnp.inf), ], safe_div(p - 1, p), ) def power_ladder(x, p, premult=None, postmult=None): """Tukey's power ladder, with a +1 on x, some scaling, and special cases.""" # Compute sign(x) * |p - 1|/p * ((|x|/|p-1| + 1)^p - 1) if premult is not None: x = x * premult xp = jnp.abs(x) xs = xp / jnp.maximum(tiny_val, jnp.abs(p - 1)) p_safe = clip_finite_nograd(remove_zero(p)) y = safe_sign(x) * select( [ (p == 1, xp), (p == 0, safe_log1p(xp)), (p == -jnp.inf, -safe_expm1(-xp)), (p == jnp.inf, safe_expm1(xp)), ], clip_finite_nograd( jnp.abs(p_safe - 1) / p_safe * ((xs + 1) ** p_safe - 1) ), ) if postmult is not None: y = y * postmult return y def inv_power_ladder(y, p, premult=None, postmult=None): """The inverse of `power_ladder()`.""" if postmult is not None: y /= postmult yp = jnp.abs(y) p_safe = clip_finite_nograd(remove_zero(p)) y_max = minus_eps(power_ladder_max_output(p)) yp = override_gradient(jnp.clip(yp, -y_max, y_max), yp) # Clip val, not grad. x = safe_sign(y) * select( [ (p == 1, yp), (p == 0, safe_expm1(yp)), (p == -jnp.inf, -safe_log1p(-yp)), (p == jnp.inf, safe_log1p(yp)), ], jnp.abs(p_safe - 1) * ( ((safe_div(p_safe, jnp.abs(p_safe - 1)) * yp + 1)) ** (1 / p_safe) - 1 ), ) if premult is not None: x /= premult return x def log_lerp(t, v0, v1): """Interpolate log-linearly from `v0` (t=0) to `v1` (t=1).""" if v0 <= 0 or v1 <= 0: raise ValueError(f'Interpolants {v0} and {v1} must be positive.') lv0 = jnp.log(v0) lv1 = jnp.log(v1) return jnp.exp(jnp.clip(t, 0, 1) * (lv1 - lv0) + lv0) def approx_erf(x): """An approximation of erf() that is accurate to within 0.007.""" return jnp.sign(x) * jnp.sqrt(1 - jnp.exp(-(4 / jnp.pi) * x**2)) def create_learning_rate_decay(**kwargs): """A partial evaluation of learning rate decay that can be used with gin.""" return functools.partial(learning_rate_decay, **kwargs) def learning_rate_decay( step, lr_init, lr_final, max_steps, lr_delay_steps=0, lr_delay_mult=1 ): """Continuous learning rate decay function. The returned rate is lr_init when step=0 and lr_final when step=max_steps, and is log-linearly interpolated elsewhere (equivalent to exponential decay). If lr_delay_steps>0 then the learning rate will be scaled by some smooth function of lr_delay_mult, such that the initial learning rate is lr_init*lr_delay_mult at the beginning of optimization but will be eased back to the normal learning rate when steps>lr_delay_steps. Args: step: int, the current optimization step. lr_init: float, the initial learning rate. lr_final: float, the final learning rate. max_steps: int, the number of steps during optimization. lr_delay_steps: int, the number of steps to delay the full learning rate. lr_delay_mult: float, the multiplier on the rate when delaying it. Returns: lr: the learning for current step 'step'. """ if lr_delay_steps > 0: # A kind of reverse cosine decay. delay_rate = lr_delay_mult + (1 - lr_delay_mult) * jnp.sin( 0.5 * jnp.pi * jnp.clip(step / lr_delay_steps, 0, 1) ) else: delay_rate = 1.0 return delay_rate * log_lerp(step / max_steps, lr_init, lr_final) def sorted_lookup(x, xp, fps, device_is_tpu): """Lookup `x` into locations `xp` , return indices and each `[fp]` value.""" if not isinstance(fps, tuple): raise ValueError(f'Input `fps` must be a tuple, but is {type(fps)}.') if device_is_tpu: # Identify the location in `xp` that corresponds to each `x`. # The final `True` index in `mask` is the start of the matching interval. mask = x[Ellipsis, None, :] >= xp[Ellipsis, :, None] def find_interval(x): # Grab the value where `mask` switches from True to False, and vice versa. # This approach takes advantage of the fact that `x` is sorted. x0 = jnp.max(jnp.where(mask, x[Ellipsis, None], x[Ellipsis, :1, None]), -2) x1 = jnp.min(jnp.where(~mask, x[Ellipsis, None], x[Ellipsis, -1:, None]), -2) return x0, x1 idx0, idx1 = find_interval(jnp.arange(xp.shape[-1])) vals = [find_interval(fp) for fp in fps] else: # jnp.searchsorted() has slightly different conventions for boundary # handling than the rest of this codebase. idx = jnp.vectorize( lambda a, v: jnp.searchsorted(a, v, side='right'), signature='(n),(m)->(m)', )(xp, x) idx1 = jnp.minimum(idx, xp.shape[-1] - 1) idx0 = jnp.maximum(idx - 1, 0) vals = [] for fp in fps: fp0 = jnp.take_along_axis(fp, idx0, axis=-1) fp1 = jnp.take_along_axis(fp, idx1, axis=-1) vals.append((fp0, fp1)) return (idx0, idx1), vals def sorted_interp( x, xp, fp, device_is_tpu, eps=jnp.finfo(jnp.float32).eps ** 2 ): """A version of interp() where xp and fp must be sorted.""" (xp0, xp1), (fp0, fp1) = sorted_lookup( x, xp, (xp, fp), device_is_tpu=device_is_tpu )[1] offset = jnp.clip((x - xp0) / jnp.maximum(eps, xp1 - xp0), 0, 1) ret = fp0 + offset * (fp1 - fp0) return ret def searchsorted(a, v, device_is_tpu): """Behaves like jnp.searchsorted, excluding boundary conditions.""" return sorted_lookup(v, a, (), device_is_tpu=device_is_tpu)[0] def override_gradient(fval, bval): """Use `fval` in the forward pass but `bval` in the backward pass.""" # Note that the parentheses are needed to avoid catastrophic cancellation. return jax.lax.stop_gradient(fval) + (bval - jax.lax.stop_gradient(bval)) def average_across_multisamples(x): """Function that averages grid query results across the multisample dimension.""" return jnp.mean(x, axis=-2) def noop(x): return x @jax.custom_jvp def fake_clip(a, a_min, a_max): """jnp.clip() but the gradient doesn't get clipped on the backward pass.""" return jnp.clip(a, a_min, a_max) @fake_clip.defjvp def fake_clip_jvp(primals, tangents): """Override fake_clip()'s gradient so that it's a no-op.""" return jnp.clip(*primals), tangents[0] @jax.jit def general_lossfun(x, alpha, scale): r"""This implements the rho(x, \alpha, c) function described in "A General and Adaptive Robust Loss Function", Jonathan T. Barron, https://arxiv.org/abs/1701.03077. Args: x: The residual for which the loss is being computed. x can have any shape, and alpha and scale will be broadcasted to match x's shape if necessary. alpha: The shape parameter of the loss (\alpha in the paper), where more negative values produce a loss with more robust behavior (outliers "cost" less), and more positive values produce a loss with less robust behavior (outliers are penalized more heavily). Alpha can be any value in [-infinity, infinity], but the gradient of the loss with respect to alpha is 0 at -infinity, infinity, 0, and 2. Varying alpha allows for smooth interpolation between several discrete robust losses: alpha=-Infinity: Welsch/Leclerc Loss. alpha=-2: Geman-McClure loss. alpha=0: Cauchy/Lortentzian loss. alpha=1: Charbonnier/pseudo-Huber loss. alpha=2: L2 loss. scale: The scale parameter of the loss. When |x| < scale, the loss is an L2-like quadratic bowl, and when |x| > scale the loss function takes on a different shape according to alpha. Returns: The losses for each element of x, in the same shape as x. """ eps = jnp.finfo(jnp.float32).eps maxval = 1e15 # A "safe" versions of expm1 that will not NaN-out on large inputs. expm1_safe = lambda x: jnp.expm1(jnp.minimum(x, 43)) # `scale` must be > 0. scale = jnp.maximum(eps, scale) # Large values of |x| can cause non-finite gradients. x = fake_clip(x, -maxval, maxval) # The loss when alpha == 2. This will get reused repeatedly. loss_two = 0.5 * (x / scale)**2 # Clamp |alpha| to be >= machine epsilon so that it's safe to divide by. a = jnp.where(alpha >= 0, jnp.ones_like(alpha), -jnp.ones_like(alpha)) * jnp.maximum(eps, jnp.abs(alpha)) # Clamp |2-alpha| to be >= machine epsilon so that it's safe to divide by. b = jnp.maximum(eps, jnp.abs(a - 2)) # The loss when not in one of the special casess. loss_ow = (b / a) * ((loss_two / (0.5 * b) + 1)**(0.5 * a) - 1) # Select which of the cases of the loss to return as a function of alpha. return jnp.where( alpha == -jnp.inf, -expm1_safe(-loss_two), jnp.where( alpha == 0, jnp.log1p(loss_two), jnp.where(alpha == 2, loss_two, jnp.where(alpha == jnp.inf, expm1_safe(loss_two), loss_ow)))) <fim_middle>
null
METHOD
complete_current_header_empty_completion
<filename>camp_zipnerf/internal/utils.py<fim_prefix># coding=utf-8 # Copyright 2023 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility functions.""" import concurrent import enum import os import queue import threading import time from typing import Any, Callable, Iterable, Optional, TypeVar, Union from absl import logging import flax import jax from jax import random import jax.numpy as jnp import numpy as np _Array = Union[np.ndarray, jnp.ndarray] @flax.struct.dataclass class Rays: """All tensors must have the same num_dims and first n-1 dims must match. This dataclass contains spatially meaningful quantities associated with the ray that can be calculated by the function casting the ray, as well as all metadata necessary for the ray to be rendered by the Model class. """ origins: Optional[_Array] = None directions: Optional[_Array] = None viewdirs: Optional[_Array] = None radii: Optional[_Array] = None imageplane: Optional[_Array] = None pixels: Optional[_Array] = None lossmult: Optional[_Array] = None near: Optional[_Array] = None far: Optional[_Array] = None cam_idx: Optional[_Array] = None exposure_idx: Optional[_Array] = None exposure_values: Optional[_Array] = None device_idx: Optional[_Array] = None def generate_random_rays( rng, n, origin_lo, origin_hi, radius_lo, radius_hi, near_lo, near_hi, far_lo, far_hi, include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): """Generate a random Rays datastructure.""" key, rng = random.split(rng) origins = random.uniform( key, shape=[n, 3], minval=origin_lo, maxval=origin_hi ) key, rng = random.split(rng) directions = random.normal(key, shape=[n, 3]) directions /= jnp.sqrt( jnp.maximum( jnp.finfo(jnp.float32).tiny, jnp.sum(directions**2, axis=-1, keepdims=True), ) ) viewdirs = directions key, rng = random.split(rng) radii = random.uniform(key, shape=[n, 1], minval=radius_lo, maxval=radius_hi) key, rng = random.split(rng) near = random.uniform(key, shape=[n, 1], minval=near_lo, maxval=near_hi) key, rng = random.split(rng) far = random.uniform(key, shape=[n, 1], minval=far_lo, maxval=far_hi) imageplane = jnp.zeros([n, 2]) lossmult = jnp.zeros([n, 1]) key, rng = random.split(rng) pixels = random.randint(key, shape=[n, 2], minval=0, maxval=1024) int_scalar = jnp.int32(jnp.zeros([n, 1])) exposure_kwargs = {} if include_exposure_idx: exposure_kwargs['exposure_idx'] = int_scalar if include_exposure_values: exposure_kwargs['exposure_values'] = jnp.zeros([n, 1]) if include_device_idx: exposure_kwargs['device_idx'] = int_scalar random_rays = Rays( origins=origins, directions=directions, viewdirs=viewdirs, radii=radii, imageplane=imageplane, pixels=pixels, lossmult=lossmult, near=near, far=far, cam_idx=int_scalar, **exposure_kwargs, ) return random_rays # Dummy Rays object that can be used to initialize NeRF model. def dummy_rays( include_exposure_idx = False, include_exposure_values = False, include_device_idx = False, ): return generate_random_rays( random.PRNGKey(0), n=100, origin_lo=-1.5, origin_hi=1.5, radius_lo=1e-5, radius_hi=1e-3, near_lo=0.0, near_hi=1.0, far_lo=10, far_hi=10000, include_exposure_idx=include_exposure_idx, include_exposure_values=include_exposure_values, include_device_idx=include_device_idx, ) @flax.struct.dataclass class Batch: """Data batch for NeRF training or testing. This dataclass contains rays and also per-pixel data that is necessary for computing the loss term or evaluating metrics but NOT necessary for rendering. """ rays: Rays rgb: Optional[_Array] = None disps: Optional[_Array] = None normals: Optional[_Array] = None alphas: Optional[_Array] = None masks: Optional[_Array] = None class DataSplit(enum.Enum): """Dataset split.""" TRAIN = 'train' TEST = 'test' class BatchingMethod(enum.Enum): """Draw rays randomly from a single image or all images, in each batch.""" ALL_IMAGES = 'all_images' SINGLE_IMAGE = 'single_image' def open_file(pth, mode='r'): return open(pth, mode=mode) def file_exists(pth): return os.path.exists(pth) def listdir(pth): return os.listdir(pth) def isdir(pth): return os.path.isdir(pth) def makedirs(pth): if not file_exists(pth): os.makedirs(pth) def device_is_tpu(): return jax.local_devices()[0].platform == 'tpu' def shard(xs): """Split data into shards for multiple devices along the first dimension.""" return jax.tree_util.tree_map( lambda x: x.reshape((jax.local_device_count(), -1) + x.shape[1:]), xs ) def unshard(x, padding=0): """Collect the sharded tensor to the shape before sharding.""" y = x.reshape([x.shape[0] * x.shape[1]] + list(x.shape[2:])) if padding > 0: y = y[:-padding] return y def load_npy(pth): """Load an numpy array cast to float32.""" with open_file(pth, 'rb') as f: x = np.load(f).astype(np.float32) return x def assert_valid_stepfun(t, y): """Assert that step function (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1] + 1: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a step function.' ) def assert_valid_linspline(t, y): """Assert that piecewise linear spline (t, y) has a valid shape.""" if t.shape[-1] != y.shape[-1]: raise ValueError( f'Invalid shapes ({t.shape}, {y.shape}) for a linear spline.' ) _FnT = TypeVar('_FnT', bound=Callable[Ellipsis, Iterable[Any]]) def iterate_in_separate_thread( queue_size = 3, ): """Decorator factory that iterates a function in a separate thread. Args: queue_size: Keep at most queue_size elements in memory. Returns: Decorator that will iterate a function in a separate thread. """ def decorator( fn, ): def result_fn<fim_suffix>(*args, **kwargs): results_queue = queue.Queue(queue_size) populating_data = True populating_data_lock = threading.Lock() def thread_fn(): # Mark has_data as a variable that's outside of thread_fn # Otherwise, `populating_data = True` creates a local variable nonlocal populating_data try: for item in fn(*args, **kwargs): results_queue.put(item) finally: # Set populating_data to False regardless of exceptions to stop # iterations with populating_data_lock: populating_data = False # Use executor + futures instead of Thread to propagate exceptions with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor: thread_fn_future = executor.submit(thread_fn) while True: with populating_data_lock: if not populating_data and results_queue.empty(): break get_start = time.time() try: # Set timeout to allow for exceptions to be propagated. next_value = results_queue.get(timeout=1.0) except queue.Empty: continue logging.info('Got data in %0.3fs', time.time() - get_start) yield next_value # Thread exception will be raised here thread_fn_future.result() return result_fn return decorator <fim_middle>
null
METHOD
complete_current_header_empty_completion