diff --git a/.gitattributes b/.gitattributes index b7c35705b9fcfdb94022672a28dd5b2d9f4b8762..24ff1e975eeea1810c939ec560fd60ccebe319aa 100644 --- a/.gitattributes +++ b/.gitattributes @@ -41,3 +41,11 @@ examples/test16.mov filter=lfs diff=lfs merge=lfs -text examples/test17.mov filter=lfs diff=lfs merge=lfs -text examples/test18.mov filter=lfs diff=lfs merge=lfs -text examples/test19.mov filter=lfs diff=lfs merge=lfs -text +third-party/DPVO/build/lib.win-amd64-3.9/lietorch_backends.cp39-win_amd64.pyd filter=lfs diff=lfs merge=lfs -text +third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation.obj filter=lfs diff=lfs merge=lfs -text +third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation_kernel.obj filter=lfs diff=lfs merge=lfs -text +third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba.obj filter=lfs diff=lfs merge=lfs -text +third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba_cuda.obj filter=lfs diff=lfs merge=lfs -text +third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch.obj filter=lfs diff=lfs merge=lfs -text +third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_cpu.obj filter=lfs diff=lfs merge=lfs -text +third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_gpu.obj filter=lfs diff=lfs merge=lfs -text diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..e9a73a55cf7ecb136437c7a4aacf8cc6bf517250 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,21 @@ +chumpy @ git+https://github.com/mattloper/chumpy +numpy==1.22.3 +yacs +joblib +scikit-image +opencv-python +imageio[ffmpeg] +matplotlib +tensorboard +smplx +progress +einops +mmcv==1.3.9 +timm==0.4.9 +munkres +xtcocotools>=1.8 +loguru +setuptools==59.5.0 +tqdm +ultralytics +gdown==4.6.0 \ No newline at end of file diff --git a/test.py b/test.py new file mode 100644 index 0000000000000000000000000000000000000000..d83a39fa183dc9ee60a25f37b05e7807b066152c --- /dev/null +++ b/test.py @@ -0,0 +1,230 @@ +import os +import argparse +import os.path as osp +from glob import glob +from collections import defaultdict + +import cv2 +import torch +import joblib +import numpy as np +from loguru import logger +from progress.bar import Bar + +from configs.config import get_cfg_defaults +from lib.data.datasets import CustomDataset +from lib.utils.imutils import avg_preds +from lib.utils.transforms import matrix_to_axis_angle +from lib.models import build_network, build_body_model +from lib.models.preproc.detector import DetectionModel +from lib.models.preproc.extractor import FeatureExtractor +from lib.models.smplify import TemporalSMPLify + +try: + from lib.models.preproc.slam import SLAMModel + + _run_global = True +except: + logger.info('DPVO is not properly installed. Only estimate in local coordinates !') + _run_global = False + + +def run(cfg, + video, + output_pth, + network, + calib=None, + run_global=True, + save_pkl=False, + visualize=False, + run_smplify=False): + cap = cv2.VideoCapture(video) + assert cap.isOpened(), f'Failed to load video file {video}' + fps = cap.get(cv2.CAP_PROP_FPS) + length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) + width, height = cap.get(cv2.CAP_PROP_FRAME_WIDTH), cap.get(cv2.CAP_PROP_FRAME_HEIGHT) + + # Whether or not estimating motion in global coordinates + run_global = run_global and _run_global + + # Preprocess + with torch.no_grad(): + if not (osp.exists(osp.join(output_pth, 'tracking_results.pth')) and + osp.exists(osp.join(output_pth, 'slam_results.pth'))): + + detector = DetectionModel(cfg.DEVICE.lower()) + extractor = FeatureExtractor(cfg.DEVICE.lower(), cfg.FLIP_EVAL) + + if run_global: + slam = SLAMModel(video, output_pth, width, height, calib) + else: + slam = None + + bar = Bar('Preprocess: 2D detection and SLAM', fill='#', max=length) + while (cap.isOpened()): + flag, img = cap.read() + if not flag: break + + # 2D detection and tracking + detector.track(img, fps, length) + + # SLAM + if slam is not None: + slam.track() + + bar.next() + + tracking_results = detector.process(fps) + + if slam is not None: + slam_results = slam.process() + else: + slam_results = np.zeros((length, 7)) + slam_results[:, 3] = 1.0 # Unit quaternion + + # Extract image features + # TODO: Merge this into the previous while loop with an online bbox smoothing. + tracking_results = extractor.run(video, tracking_results) + logger.info('Complete Data preprocessing!') + + # Save the processed data + joblib.dump(tracking_results, osp.join(output_pth, 'tracking_results.pth')) + joblib.dump(slam_results, osp.join(output_pth, 'slam_results.pth')) + logger.info(f'Save processed data at {output_pth}') + + # If the processed data already exists, load the processed data + else: + tracking_results = joblib.load(osp.join(output_pth, 'tracking_results.pth')) + slam_results = joblib.load(osp.join(output_pth, 'slam_results.pth')) + logger.info(f'Already processed data exists at {output_pth} ! Load the data .') + + # Build dataset + dataset = CustomDataset(cfg, tracking_results, slam_results, width, height, fps) + + # run WHAM + results = defaultdict(dict) + + n_subjs = len(dataset) + for subj in range(n_subjs): + + with torch.no_grad(): + if cfg.FLIP_EVAL: + # Forward pass with flipped input + flipped_batch = dataset.load_data(subj, True) + _id, x, inits, features, mask, init_root, cam_angvel, frame_id, kwargs = flipped_batch + flipped_pred = network(x, inits, features, mask=mask, init_root=init_root, cam_angvel=cam_angvel, + return_y_up=True, **kwargs) + + # Forward pass with normal input + batch = dataset.load_data(subj) + _id, x, inits, features, mask, init_root, cam_angvel, frame_id, kwargs = batch + pred = network(x, inits, features, mask=mask, init_root=init_root, cam_angvel=cam_angvel, + return_y_up=True, **kwargs) + + # Merge two predictions + flipped_pose, flipped_shape = flipped_pred['pose'].squeeze(0), flipped_pred['betas'].squeeze(0) + pose, shape = pred['pose'].squeeze(0), pred['betas'].squeeze(0) + flipped_pose, pose = flipped_pose.reshape(-1, 24, 6), pose.reshape(-1, 24, 6) + avg_pose, avg_shape = avg_preds(pose, shape, flipped_pose, flipped_shape) + avg_pose = avg_pose.reshape(-1, 144) + avg_contact = (flipped_pred['contact'][..., [2, 3, 0, 1]] + pred['contact']) / 2 + + # Refine trajectory with merged prediction + network.pred_pose = avg_pose.view_as(network.pred_pose) + network.pred_shape = avg_shape.view_as(network.pred_shape) + network.pred_contact = avg_contact.view_as(network.pred_contact) + output = network.forward_smpl(**kwargs) + pred = network.refine_trajectory(output, cam_angvel, return_y_up=True) + + else: + # data + batch = dataset.load_data(subj) + _id, x, inits, features, mask, init_root, cam_angvel, frame_id, kwargs = batch + + # inference + pred = network(x, inits, features, mask=mask, init_root=init_root, cam_angvel=cam_angvel, + return_y_up=True, **kwargs) + + # if False: + if run_smplify: + smplify = TemporalSMPLify(smpl, img_w=width, img_h=height, device=cfg.DEVICE) + input_keypoints = dataset.tracking_results[_id]['keypoints'] + pred = smplify.fit(pred, input_keypoints, **kwargs) + + with torch.no_grad(): + network.pred_pose = pred['pose'] + network.pred_shape = pred['betas'] + network.pred_cam = pred['cam'] + output = network.forward_smpl(**kwargs) + pred = network.refine_trajectory(output, cam_angvel, return_y_up=True) + + # ========= Store results ========= # + pred_body_pose = matrix_to_axis_angle(pred['poses_body']).cpu().numpy().reshape(-1, 69) + pred_root = matrix_to_axis_angle(pred['poses_root_cam']).cpu().numpy().reshape(-1, 3) + pred_root_world = matrix_to_axis_angle(pred['poses_root_world']).cpu().numpy().reshape(-1, 3) + pred_pose = np.concatenate((pred_root, pred_body_pose), axis=-1) + pred_pose_world = np.concatenate((pred_root_world, pred_body_pose), axis=-1) + pred_trans = (pred['trans_cam'] - network.output.offset).cpu().numpy() + + results[_id]['pose'] = pred_pose + results[_id]['trans'] = pred_trans + results[_id]['pose_world'] = pred_pose_world + results[_id]['trans_world'] = pred['trans_world'].cpu().squeeze(0).numpy() + results[_id]['betas'] = pred['betas'].cpu().squeeze(0).numpy() + results[_id]['verts'] = (pred['verts_cam'] + pred['trans_cam'].unsqueeze(1)).cpu().numpy() + results[_id]['frame_ids'] = frame_id + + if save_pkl: + joblib.dump(results, osp.join(output_pth, "wham_output.pkl")) + + # Visualize + if visualize: + from lib.vis.run_vis import run_vis_on_demo + with torch.no_grad(): + run_vis_on_demo(cfg, video, results, output_pth, network.smpl, vis_global=run_global) + + +if __name__ == '__main__': + + VIDEO_PATH = "examples/test19.mov" + OUTPUT_PATH = "output/demo" + CALIB_PATH = None + ESTIMATE_LOCAL_ONLY = False + VISUALIZE = True + SAVE_PKL = True + RUN_SMPLIFY = False + GENDER = 'male' + + + cfg = get_cfg_defaults() + cfg.merge_from_file('configs/yamls/demo.yaml') + + logger.info(f'GPU name -> {torch.cuda.get_device_name()}') + logger.info(f'GPU feat -> {torch.cuda.get_device_properties("cuda")}') + + # ========= Load WHAM ========= # + smpl_batch_size = cfg.TRAIN.BATCH_SIZE * cfg.DATASET.SEQLEN + smpl = build_body_model(device=cfg.DEVICE, gender=GENDER, batch_size=smpl_batch_size) + network = build_network(cfg, smpl) + network.eval() + + # Output folder + sequence = '.'.join(VIDEO_PATH.split('/')[-1].split('.')[:-1]) + output_pth = osp.join(OUTPUT_PATH, sequence) + os.makedirs(output_pth, exist_ok=True) + + faces_np = network.smpl.get_faces() + np.save(osp.join(output_pth, f'faces_{GENDER}.npy'), faces_np) + + run(cfg, + VIDEO_PATH, + output_pth, + network, + CALIB_PATH, + run_global=not ESTIMATE_LOCAL_ONLY, + save_pkl=SAVE_PKL, + visualize=VISUALIZE, + run_smplify=RUN_SMPLIFY) + + print() + logger.info('Done !') diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/cuda_ba.cp39-win_amd64.pyd b/third-party/DPVO/build/lib.win-amd64-3.9/cuda_ba.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..04f76738a9a502dcba08f87bcad74151e6c32922 Binary files /dev/null and b/third-party/DPVO/build/lib.win-amd64-3.9/cuda_ba.cp39-win_amd64.pyd differ diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/cuda_corr.cp39-win_amd64.pyd b/third-party/DPVO/build/lib.win-amd64-3.9/cuda_corr.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..848d0470fbce169ebddcf1d41eeaa290911f5c08 Binary files /dev/null and b/third-party/DPVO/build/lib.win-amd64-3.9/cuda_corr.cp39-win_amd64.pyd differ diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/__init__.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/altcorr/__init__.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/altcorr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e94d30c390bcff235acfb7feee2ea3c4554e011b --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/altcorr/__init__.py @@ -0,0 +1 @@ +from .correlation import corr, patchify \ No newline at end of file diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/altcorr/correlation.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/altcorr/correlation.py new file mode 100644 index 0000000000000000000000000000000000000000..8cf59af17c7e4c14327eb000afcb480b8881f271 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/altcorr/correlation.py @@ -0,0 +1,74 @@ +import torch +import cuda_corr + +class CorrLayer(torch.autograd.Function): + @staticmethod + def forward(ctx, fmap1, fmap2, coords, ii, jj, radius, dropout): + """ forward correlation """ + ctx.save_for_backward(fmap1, fmap2, coords, ii, jj) + ctx.radius = radius + ctx.dropout = dropout + corr, = cuda_corr.forward(fmap1, fmap2, coords, ii, jj, radius) + + return corr + + @staticmethod + def backward(ctx, grad): + """ backward correlation """ + fmap1, fmap2, coords, ii, jj = ctx.saved_tensors + + if ctx.dropout < 1: + perm = torch.rand(len(ii), device="cuda") < ctx.dropout + coords = coords[:,perm] + grad = grad[:,perm] + ii = ii[perm] + jj = jj[perm] + + fmap1_grad, fmap2_grad = \ + cuda_corr.backward(fmap1, fmap2, coords, ii, jj, grad, ctx.radius) + + return fmap1_grad, fmap2_grad, None, None, None, None, None + + +class PatchLayer(torch.autograd.Function): + @staticmethod + def forward(ctx, net, coords, radius): + """ forward patchify """ + ctx.radius = radius + ctx.save_for_backward(net, coords) + + patches, = cuda_corr.patchify_forward(net, coords, radius) + return patches + + @staticmethod + def backward(ctx, grad): + """ backward patchify """ + net, coords = ctx.saved_tensors + grad, = cuda_corr.patchify_backward(net, coords, grad, ctx.radius) + + return grad, None, None + +def patchify(net, coords, radius, mode='bilinear'): + """ extract patches """ + + patches = PatchLayer.apply(net, coords, radius) + + if mode == 'bilinear': + offset = (coords - coords.floor()).to(net.device) + dx, dy = offset[:,:,None,None,None].unbind(dim=-1) + + d = 2 * radius + 1 + x00 = (1-dy) * (1-dx) * patches[...,:d,:d] + x01 = (1-dy) * ( dx) * patches[...,:d,1:] + x10 = ( dy) * (1-dx) * patches[...,1:,:d] + x11 = ( dy) * ( dx) * patches[...,1:,1:] + + return x00 + x01 + x10 + x11 + + return patches + + +def corr(fmap1, fmap2, coords, ii, jj, radius=1, dropout=1): + return CorrLayer.apply(fmap1, fmap2, coords, ii, jj, radius, dropout) + + diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/ba.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/ba.py new file mode 100644 index 0000000000000000000000000000000000000000..033f881d3e59e54c35235163c1ba5ad9307962f7 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/ba.py @@ -0,0 +1,182 @@ +import torch +from torch_scatter import scatter_sum + +from . import fastba +from . import lietorch +from .lietorch import SE3 + +from .utils import Timer + +from . import projective_ops as pops + +class CholeskySolver(torch.autograd.Function): + @staticmethod + def forward(ctx, H, b): + # don't crash training if cholesky decomp fails + U, info = torch.linalg.cholesky_ex(H) + + if torch.any(info): + ctx.failed = True + return torch.zeros_like(b) + + xs = torch.cholesky_solve(b, U) + ctx.save_for_backward(U, xs) + ctx.failed = False + + return xs + + @staticmethod + def backward(ctx, grad_x): + if ctx.failed: + return None, None + + U, xs = ctx.saved_tensors + dz = torch.cholesky_solve(grad_x, U) + dH = -torch.matmul(xs, dz.transpose(-1,-2)) + + return dH, dz + +# utility functions for scattering ops +def safe_scatter_add_mat(A, ii, jj, n, m): + v = (ii >= 0) & (jj >= 0) & (ii < n) & (jj < m) + return scatter_sum(A[:,v], ii[v]*m + jj[v], dim=1, dim_size=n*m) + +def safe_scatter_add_vec(b, ii, n): + v = (ii >= 0) & (ii < n) + return scatter_sum(b[:,v], ii[v], dim=1, dim_size=n) + +# apply retraction operator to inv-depth maps +def disp_retr(disps, dz, ii): + ii = ii.to(device=dz.device) + return disps + scatter_sum(dz, ii, dim=1, dim_size=disps.shape[1]) + +# apply retraction operator to poses +def pose_retr(poses, dx, ii): + ii = ii.to(device=dx.device) + return poses.retr(scatter_sum(dx, ii, dim=1, dim_size=poses.shape[1])) + +def block_matmul(A, B): + """ block matrix multiply """ + b, n1, m1, p1, q1 = A.shape + b, n2, m2, p2, q2 = B.shape + A = A.permute(0, 1, 3, 2, 4).reshape(b, n1*p1, m1*q1) + B = B.permute(0, 1, 3, 2, 4).reshape(b, n2*p2, m2*q2) + return torch.matmul(A, B).reshape(b, n1, p1, m2, q2).permute(0, 1, 3, 2, 4) + +def block_solve(A, B, ep=1.0, lm=1e-4): + """ block matrix solve """ + b, n1, m1, p1, q1 = A.shape + b, n2, m2, p2, q2 = B.shape + A = A.permute(0, 1, 3, 2, 4).reshape(b, n1*p1, m1*q1) + B = B.permute(0, 1, 3, 2, 4).reshape(b, n2*p2, m2*q2) + + A = A + (ep + lm * A) * torch.eye(n1*p1, device=A.device) + + X = CholeskySolver.apply(A, B) + return X.reshape(b, n1, p1, m2, q2).permute(0, 1, 3, 2, 4) + + +def block_show(A): + import matplotlib.pyplot as plt + b, n1, m1, p1, q1 = A.shape + A = A.permute(0, 1, 3, 2, 4).reshape(b, n1*p1, m1*q1) + plt.imshow(A[0].detach().cpu().numpy()) + plt.show() + +def BA(poses, patches, intrinsics, targets, weights, lmbda, ii, jj, kk, bounds, ep=100.0, PRINT=False, fixedp=1, structure_only=False): + """ bundle adjustment """ + + b = 1 + n = max(ii.max().item(), jj.max().item()) + 1 + + coords, v, (Ji, Jj, Jz) = \ + pops.transform(poses, patches, intrinsics, ii, jj, kk, jacobian=True) + + p = coords.shape[3] + r = targets - coords[...,p//2,p//2,:] + + v *= (r.norm(dim=-1) < 250).float() + + in_bounds = \ + (coords[...,p//2,p//2,0] > bounds[0]) & \ + (coords[...,p//2,p//2,1] > bounds[1]) & \ + (coords[...,p//2,p//2,0] < bounds[2]) & \ + (coords[...,p//2,p//2,1] < bounds[3]) + + v *= in_bounds.float() + + if PRINT: + print((r * v[...,None]).norm(dim=-1).mean().item()) + + r = (v[...,None] * r).unsqueeze(dim=-1) + weights = (v[...,None] * weights).unsqueeze(dim=-1) + + wJiT = (weights * Ji).transpose(2,3) + wJjT = (weights * Jj).transpose(2,3) + wJzT = (weights * Jz).transpose(2,3) + + Bii = torch.matmul(wJiT, Ji) + Bij = torch.matmul(wJiT, Jj) + Bji = torch.matmul(wJjT, Ji) + Bjj = torch.matmul(wJjT, Jj) + + Eik = torch.matmul(wJiT, Jz) + Ejk = torch.matmul(wJjT, Jz) + + vi = torch.matmul(wJiT, r) + vj = torch.matmul(wJjT, r) + + # fix first pose + ii = ii.clone() + jj = jj.clone() + + n = n - fixedp + ii = ii - fixedp + jj = jj - fixedp + + kx, kk = torch.unique(kk, return_inverse=True, sorted=True) + m = len(kx) + + B = safe_scatter_add_mat(Bii, ii, ii, n, n).view(b, n, n, 6, 6) + \ + safe_scatter_add_mat(Bij, ii, jj, n, n).view(b, n, n, 6, 6) + \ + safe_scatter_add_mat(Bji, jj, ii, n, n).view(b, n, n, 6, 6) + \ + safe_scatter_add_mat(Bjj, jj, jj, n, n).view(b, n, n, 6, 6) + + E = safe_scatter_add_mat(Eik, ii, kk, n, m).view(b, n, m, 6, 1) + \ + safe_scatter_add_mat(Ejk, jj, kk, n, m).view(b, n, m, 6, 1) + + C = safe_scatter_add_vec(torch.matmul(wJzT, Jz), kk, m) + + v = safe_scatter_add_vec(vi, ii, n).view(b, n, 1, 6, 1) + \ + safe_scatter_add_vec(vj, jj, n).view(b, n, 1, 6, 1) + + w = safe_scatter_add_vec(torch.matmul(wJzT, r), kk, m) + + if isinstance(lmbda, torch.Tensor): + lmbda = lmbda.reshape(*C.shape) + + Q = 1.0 / (C + lmbda) + + ### solve w/ schur complement ### + EQ = E * Q[:,None] + + if structure_only or n == 0: + dZ = (Q * w).view(b, -1, 1, 1) + + else: + S = B - block_matmul(EQ, E.permute(0,2,1,4,3)) + y = v - block_matmul(EQ, w.unsqueeze(dim=2)) + dX = block_solve(S, y, ep=ep, lm=1e-4) + + dZ = Q * (w - block_matmul(E.permute(0,2,1,4,3), dX).squeeze(dim=-1)) + dX = dX.view(b, -1, 6) + dZ = dZ.view(b, -1, 1, 1) + + x, y, disps = patches.unbind(dim=2) + disps = disp_retr(disps, dZ, kx).clamp(min=1e-3, max=10.0) + patches = torch.stack([x, y, disps], dim=2) + + if not structure_only and n > 0: + poses = pose_retr(poses, dX, fixedp + torch.arange(n)) + + return poses, patches diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/blocks.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..ce91f07560345a3be3cf2f9f3b9fc1b740fb3b34 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/blocks.py @@ -0,0 +1,118 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + +import torch_scatter + +class LayerNorm1D(nn.Module): + def __init__(self, dim): + super(LayerNorm1D, self).__init__() + self.norm = nn.LayerNorm(dim, eps=1e-4) + + def forward(self, x): + return self.norm(x.transpose(1,2)).transpose(1,2) + +class GatedResidual(nn.Module): + def __init__(self, dim): + super().__init__() + + self.gate = nn.Sequential( + nn.Linear(dim, dim), + nn.Sigmoid()) + + self.res = nn.Sequential( + nn.Linear(dim, dim), + nn.ReLU(inplace=True), + nn.Linear(dim, dim)) + + def forward(self, x): + return x + self.gate(x) * self.res(x) + +class SoftAgg(nn.Module): + def __init__(self, dim=512, expand=True): + super(SoftAgg, self).__init__() + self.dim = dim + self.expand = expand + self.f = nn.Linear(self.dim, self.dim) + self.g = nn.Linear(self.dim, self.dim) + self.h = nn.Linear(self.dim, self.dim) + + def forward(self, x, ix): + _, jx = torch.unique(ix, return_inverse=True) + w = torch_scatter.scatter_softmax(self.g(x), jx, dim=1) + y = torch_scatter.scatter_sum(self.f(x) * w, jx, dim=1) + + if self.expand: + return self.h(y)[:,jx] + + return self.h(y) + +class SoftAggBasic(nn.Module): + def __init__(self, dim=512, expand=True): + super(SoftAggBasic, self).__init__() + self.dim = dim + self.expand = expand + self.f = nn.Linear(self.dim, self.dim) + self.g = nn.Linear(self.dim, 1) + self.h = nn.Linear(self.dim, self.dim) + + def forward(self, x, ix): + _, jx = torch.unique(ix, return_inverse=True) + w = torch_scatter.scatter_softmax(self.g(x), jx, dim=1) + y = torch_scatter.scatter_sum(self.f(x) * w, jx, dim=1) + + if self.expand: + return self.h(y)[:,jx] + + return self.h(y) + + +### Gradient Clipping and Zeroing Operations ### + +GRAD_CLIP = 0.1 + +class GradClip(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + return x + + @staticmethod + def backward(ctx, grad_x): + grad_x = torch.where(torch.isnan(grad_x), torch.zeros_like(grad_x), grad_x) + return grad_x.clamp(min=-0.01, max=0.01) + +class GradientClip(nn.Module): + def __init__(self): + super(GradientClip, self).__init__() + + def forward(self, x): + return GradClip.apply(x) + +class GradZero(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + return x + + @staticmethod + def backward(ctx, grad_x): + grad_x = torch.where(torch.isnan(grad_x), torch.zeros_like(grad_x), grad_x) + grad_x = torch.where(torch.abs(grad_x) > GRAD_CLIP, torch.zeros_like(grad_x), grad_x) + return grad_x + +class GradientZero(nn.Module): + def __init__(self): + super(GradientZero, self).__init__() + + def forward(self, x): + return GradZero.apply(x) + + +class GradMag(torch.autograd.Function): + @staticmethod + def forward(ctx, x): + return x + + @staticmethod + def backward(ctx, grad_x): + print(grad_x.abs().mean()) + return grad_x diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/config.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/config.py new file mode 100644 index 0000000000000000000000000000000000000000..c31cacb86eaea8ec75f77439288c4eddc48861e4 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/config.py @@ -0,0 +1,27 @@ +from yacs.config import CfgNode as CN + +_C = CN() + +# max number of keyframes +_C.BUFFER_SIZE = 2048 + +# bias patch selection towards high gradient regions? +_C.GRADIENT_BIAS = True + +# VO config (increase for better accuracy) +_C.PATCHES_PER_FRAME = 80 +_C.REMOVAL_WINDOW = 20 +_C.OPTIMIZATION_WINDOW = 12 +_C.PATCH_LIFETIME = 12 + +# threshold for keyframe removal +_C.KEYFRAME_INDEX = 4 +_C.KEYFRAME_THRESH = 12.5 + +# camera motion model +_C.MOTION_MODEL = 'DAMPED_LINEAR' +_C.MOTION_DAMPING = 0.5 + +_C.MIXED_PRECISION = True + +cfg = _C diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/__init__.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d3f5a12faa99758192ecc4ed3fc22c9249232e86 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/__init__.py @@ -0,0 +1 @@ + diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/augmentation.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/augmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..caf8d94384cd69f741e5678b946f9f4f5b6a9184 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/augmentation.py @@ -0,0 +1,66 @@ +import torch +import torchvision.transforms as transforms +import numpy as np +import torch.nn.functional as F + + +class RGBDAugmentor: + """ perform augmentation on RGB-D video """ + + def __init__(self, crop_size): + self.crop_size = crop_size + self.augcolor = transforms.Compose([ + transforms.ToPILImage(), + transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.2/3.14), + transforms.RandomGrayscale(p=0.1), + transforms.RandomInvert(p=0.1), + transforms.ToTensor()]) + + self.max_scale = 0.5 + + def spatial_transform(self, images, depths, poses, intrinsics): + """ cropping and resizing """ + ht, wd = images.shape[2:] + + max_scale = self.max_scale + min_scale = np.log2(np.maximum( + (self.crop_size[0] + 1) / float(ht), + (self.crop_size[1] + 1) / float(wd))) + + scale = 1 + if np.random.rand() < 0.8: + scale = 2 ** np.random.uniform(0.0, max_scale) + + intrinsics = scale * intrinsics + + ht1 = int(scale * ht) + wd1 = int(scale * wd) + + depths = depths.unsqueeze(dim=1) + + images = F.interpolate(images, (ht1, wd1), mode='bicubic', align_corners=False) + depths = F.interpolate(depths, (ht1, wd1), recompute_scale_factor=False) + + # always perform center crop (TODO: try non-center crops) + y0 = (images.shape[2] - self.crop_size[0]) // 2 + x0 = (images.shape[3] - self.crop_size[1]) // 2 + + intrinsics = intrinsics - torch.tensor([0.0, 0.0, x0, y0]) + images = images[:, :, y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + depths = depths[:, :, y0:y0+self.crop_size[0], x0:x0+self.crop_size[1]] + + depths = depths.squeeze(dim=1) + return images, poses, depths, intrinsics + + def color_transform(self, images): + """ color jittering """ + num, ch, ht, wd = images.shape + images = images.permute(1, 2, 3, 0).reshape(ch, ht, wd*num) + images = 255 * self.augcolor(images[[2,1,0]] / 255.0) + return images[[2,1,0]].reshape(ch, ht, wd, num).permute(3,0,1,2).contiguous() + + def __call__(self, images, poses, depths, intrinsics): + if np.random.rand() < 0.5: + images = self.color_transform(images) + + return self.spatial_transform(images, depths, poses, intrinsics) diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/base.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/base.py new file mode 100644 index 0000000000000000000000000000000000000000..0b80ade51a145807dfb071c51b92b62e8aea2cd1 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/base.py @@ -0,0 +1,176 @@ +import numpy as np +import torch +import torch.utils.data as data +import torch.nn.functional as F + +import csv +import os +import cv2 +import math +import random +import json +import pickle +import os.path as osp + +from .augmentation import RGBDAugmentor +from .rgbd_utils import * + +class RGBDDataset(data.Dataset): + def __init__(self, name, datapath, n_frames=4, crop_size=[480,640], fmin=10.0, fmax=75.0, aug=True, sample=True): + """ Base class for RGBD dataset """ + self.aug = None + self.root = datapath + self.name = name + + self.aug = aug + self.sample = sample + + self.n_frames = n_frames + self.fmin = fmin # exclude very easy examples + self.fmax = fmax # exclude very hard examples + + if self.aug: + self.aug = RGBDAugmentor(crop_size=crop_size) + + # building dataset is expensive, cache so only needs to be performed once + cur_path = osp.dirname(osp.abspath(__file__)) + if not os.path.isdir(osp.join(cur_path, 'cache')): + os.mkdir(osp.join(cur_path, 'cache')) + + self.scene_info = \ + pickle.load(open('datasets/TartanAir.pickle', 'rb'))[0] + + self._build_dataset_index() + + def _build_dataset_index(self): + self.dataset_index = [] + for scene in self.scene_info: + if not self.__class__.is_test_scene(scene): + graph = self.scene_info[scene]['graph'] + for i in graph: + if i < len(graph) - 65: + self.dataset_index.append((scene, i)) + else: + print("Reserving {} for validation".format(scene)) + + @staticmethod + def image_read(image_file): + return cv2.imread(image_file) + + @staticmethod + def depth_read(depth_file): + return np.load(depth_file) + + def build_frame_graph(self, poses, depths, intrinsics, f=16, max_flow=256): + """ compute optical flow distance between all pairs of frames """ + def read_disp(fn): + depth = self.__class__.depth_read(fn)[f//2::f, f//2::f] + depth[depth < 0.01] = np.mean(depth) + return 1.0 / depth + + poses = np.array(poses) + intrinsics = np.array(intrinsics) / f + + disps = np.stack(list(map(read_disp, depths)), 0) + d = f * compute_distance_matrix_flow(poses, disps, intrinsics) + + graph = {} + for i in range(d.shape[0]): + j, = np.where(d[i] < max_flow) + graph[i] = (j, d[i,j]) + + return graph + + def __getitem__(self, index): + """ return training video """ + + index = index % len(self.dataset_index) + scene_id, ix = self.dataset_index[index] + + frame_graph = self.scene_info[scene_id]['graph'] + images_list = self.scene_info[scene_id]['images'] + depths_list = self.scene_info[scene_id]['depths'] + poses_list = self.scene_info[scene_id]['poses'] + intrinsics_list = self.scene_info[scene_id]['intrinsics'] + + # stride = np.random.choice([1,2,3]) + + d = np.random.uniform(self.fmin, self.fmax) + s = 1 + + inds = [ ix ] + + while len(inds) < self.n_frames: + # get other frames within flow threshold + + if self.sample: + k = (frame_graph[ix][1] > self.fmin) & (frame_graph[ix][1] < self.fmax) + frames = frame_graph[ix][0][k] + + # prefer frames forward in time + if np.count_nonzero(frames[frames > ix]): + ix = np.random.choice(frames[frames > ix]) + + elif ix + 1 < len(images_list): + ix = ix + 1 + + elif np.count_nonzero(frames): + ix = np.random.choice(frames) + + else: + i = frame_graph[ix][0].copy() + g = frame_graph[ix][1].copy() + + g[g > d] = -1 + if s > 0: + g[i <= ix] = -1 + else: + g[i >= ix] = -1 + + if len(g) > 0 and np.max(g) > 0: + ix = i[np.argmax(g)] + else: + if ix + s >= len(images_list) or ix + s < 0: + s *= -1 + + ix = ix + s + + inds += [ ix ] + + + images, depths, poses, intrinsics = [], [], [], [] + for i in inds: + images.append(self.__class__.image_read(images_list[i])) + depths.append(self.__class__.depth_read(depths_list[i])) + poses.append(poses_list[i]) + intrinsics.append(intrinsics_list[i]) + + images = np.stack(images).astype(np.float32) + depths = np.stack(depths).astype(np.float32) + poses = np.stack(poses).astype(np.float32) + intrinsics = np.stack(intrinsics).astype(np.float32) + + images = torch.from_numpy(images).float() + images = images.permute(0, 3, 1, 2) + + disps = torch.from_numpy(1.0 / depths) + poses = torch.from_numpy(poses) + intrinsics = torch.from_numpy(intrinsics) + + if self.aug: + images, poses, disps, intrinsics = \ + self.aug(images, poses, disps, intrinsics) + + # normalize depth + s = .7 * torch.quantile(disps, .98) + disps = disps / s + poses[...,:3] *= s + + return images, poses, disps, intrinsics + + def __len__(self): + return len(self.dataset_index) + + def __imul__(self, x): + self.dataset_index *= x + return self diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/factory.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/factory.py new file mode 100644 index 0000000000000000000000000000000000000000..6e136c09368af8f7d157f31c2da0159ceb248de4 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/factory.py @@ -0,0 +1,26 @@ + +import pickle +import os +import os.path as osp + +# RGBD-Dataset +from .tartan import TartanAir + +def dataset_factory(dataset_list, **kwargs): + """ create a combined dataset """ + + from torch.utils.data import ConcatDataset + + dataset_map = { + 'tartan': (TartanAir, ), + } + + db_list = [] + for key in dataset_list: + # cache datasets for faster future loading + db = dataset_map[key][0](**kwargs) + + print("Dataset {} has {} images".format(key, len(db))) + db_list.append(db) + + return ConcatDataset(db_list) diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/frame_utils.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/frame_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..113027722aae21db56d9a4887b6535b37922864f --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/frame_utils.py @@ -0,0 +1,164 @@ +import numpy as np +from PIL import Image +from os.path import * +import re +import cv2 +cv2.setNumThreads(0) + + +TAG_CHAR = np.array([202021.25], np.float32) + +def readFlowKITTI(filename): + flow = cv2.imread(filename, cv2.IMREAD_ANYDEPTH|cv2.IMREAD_COLOR) + flow = flow[:,:,::-1].astype(np.float32) + flow, valid = flow[:, :, :2], flow[:, :, 2] + flow = (flow - 2**15) / 64.0 + return flow, valid + +def readFlow(fn): + """ Read .flo file in Middlebury format""" + # Code adapted from: + # http://stackoverflow.com/questions/28013200/reading-middlebury-flow-files-with-python-bytes-array-numpy + + # WARNING: this will work on little-endian architectures (eg Intel x86) only! + # print 'fn = %s'%(fn) + with open(fn, 'rb') as f: + magic = np.fromfile(f, np.float32, count=1) + if 202021.25 != magic: + print('Magic number incorrect. Invalid .flo file') + return None + else: + w = np.fromfile(f, np.int32, count=1) + h = np.fromfile(f, np.int32, count=1) + # print 'Reading %d x %d flo file\n' % (w, h) + data = np.fromfile(f, np.float32, count=2*int(w)*int(h)) + # Reshape data into 3D array (columns, rows, bands) + # The reshape here is for visualization, the original code is (w,h,2) + return np.resize(data, (int(h), int(w), 2)) + +def readPFM(file): + file = open(file, 'rb') + + color = None + width = None + height = None + scale = None + endian = None + + header = file.readline().rstrip() + if header == b'PF': + color = True + elif header == b'Pf': + color = False + else: + raise Exception('Not a PFM file.') + + try: + dim_match = re.match(rb'^(\d+)\s(\d+)\s$', file.readline()) + except: + dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline()) + + if dim_match: + width, height = map(int, dim_match.groups()) + else: + raise Exception('Malformed PFM header.') + + scale = float(file.readline().rstrip()) + if scale < 0: # little-endian + endian = '<' + scale = -scale + else: + endian = '>' # big-endian + + data = np.fromfile(file, endian + 'f') + shape = (height, width, 3) if color else (height, width) + + data = np.reshape(data, shape) + data = np.flipud(data) + return data + + +def writeFlow(filename,uv,v=None): + """ Write optical flow to file. + + If v is None, uv is assumed to contain both u and v channels, + stacked in depth. + Original code by Deqing Sun, adapted from Daniel Scharstein. + """ + nBands = 2 + + if v is None: + assert(uv.ndim == 3) + assert(uv.shape[2] == 2) + u = uv[:,:,0] + v = uv[:,:,1] + else: + u = uv + + assert(u.shape == v.shape) + height,width = u.shape + f = open(filename,'wb') + # write the header + f.write(TAG_CHAR) + np.array(width).astype(np.int32).tofile(f) + np.array(height).astype(np.int32).tofile(f) + # arrange into matrix form + tmp = np.zeros((height, width*nBands)) + tmp[:,np.arange(width)*2] = u + tmp[:,np.arange(width)*2 + 1] = v + tmp.astype(np.float32).tofile(f) + f.close() + + +def readDPT(filename): + """ Read depth data from file, return as numpy array. """ + f = open(filename,'rb') + check = np.fromfile(f,dtype=np.float32,count=1)[0] + TAG_FLOAT = 202021.25 + TAG_CHAR = 'PIEH' + assert check == TAG_FLOAT, ' depth_read:: Wrong tag in flow file (should be: {0}, is: {1}). Big-endian machine? '.format(TAG_FLOAT,check) + width = np.fromfile(f,dtype=np.int32,count=1)[0] + height = np.fromfile(f,dtype=np.int32,count=1)[0] + size = width*height + assert width > 0 and height > 0 and size > 1 and size < 100000000, ' depth_read:: Wrong input size (width = {0}, height = {1}).'.format(width,height) + depth = np.fromfile(f,dtype=np.float32,count=-1).reshape((height,width)) + return depth + +def cam_read(filename): + """ Read camera data, return (M,N) tuple. + M is the intrinsic matrix, N is the extrinsic matrix, so that + x = M*N*X, + with x being a point in homogeneous image pixel coordinates, X being a + point in homogeneous world coordinates.""" + f = open(filename,'rb') + check = np.fromfile(f,dtype=np.float32,count=1)[0] + M = np.fromfile(f,dtype='float64',count=9).reshape((3,3)) + N = np.fromfile(f,dtype='float64',count=12).reshape((3,4)) + + E = np.eye(4) + E[0:3,:] = N + + fx, fy, cx, cy = M[0,0], M[1,1], M[0,2], M[1,2] + kvec = np.array([fx, fy, cx, cy]) + + q = Rotation.from_matrix(E[:3,:3]).as_quat() + pvec = np.concatenate([E[:3,3], q], 0) + + return pvec, kvec + + +def read_gen(file_name, pil=False): + ext = splitext(file_name)[-1] + if ext == '.png' or ext == '.jpeg' or ext == '.ppm' or ext == '.jpg': + return Image.open(file_name) + elif ext == '.bin' or ext == '.raw': + return np.load(file_name) + elif ext == '.flo': + return readFlow(file_name).astype(np.float32) + elif ext == '.pfm': + return readPFM(file_name).astype(np.float32) + elif ext == '.dpt': + return readDPT(file_name).astype(np.float32) + elif ext == '.cam': + return cam_read(file_name) + return [] diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/rgbd_utils.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/rgbd_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ddd0c651c786d4932fb59492059f34145f3be01b --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/rgbd_utils.py @@ -0,0 +1,188 @@ +import numpy as np +import os.path as osp + +import torch +from ..lietorch import SE3 + +from scipy.spatial.transform import Rotation + +def parse_list(filepath, skiprows=0): + """ read list data """ + data = np.loadtxt(filepath, delimiter=' ', dtype=np.unicode_, skiprows=skiprows) + return data + +def associate_frames(tstamp_image, tstamp_depth, tstamp_pose, max_dt=1.0): + """ pair images, depths, and poses """ + associations = [] + for i, t in enumerate(tstamp_image): + if tstamp_pose is None: + j = np.argmin(np.abs(tstamp_depth - t)) + if (np.abs(tstamp_depth[j] - t) < max_dt): + associations.append((i, j)) + + else: + j = np.argmin(np.abs(tstamp_depth - t)) + k = np.argmin(np.abs(tstamp_pose - t)) + + if (np.abs(tstamp_depth[j] - t) < max_dt) and \ + (np.abs(tstamp_pose[k] - t) < max_dt): + associations.append((i, j, k)) + + return associations + +def loadtum(datapath, frame_rate=-1): + """ read video data in tum-rgbd format """ + if osp.isfile(osp.join(datapath, 'groundtruth.txt')): + pose_list = osp.join(datapath, 'groundtruth.txt') + + elif osp.isfile(osp.join(datapath, 'pose.txt')): + pose_list = osp.join(datapath, 'pose.txt') + + else: + return None, None, None, None + + image_list = osp.join(datapath, 'rgb.txt') + depth_list = osp.join(datapath, 'depth.txt') + + calib_path = osp.join(datapath, 'calibration.txt') + intrinsic = None + if osp.isfile(calib_path): + intrinsic = np.loadtxt(calib_path, delimiter=' ') + intrinsic = intrinsic.astype(np.float64) + + image_data = parse_list(image_list) + depth_data = parse_list(depth_list) + pose_data = parse_list(pose_list, skiprows=1) + pose_vecs = pose_data[:,1:].astype(np.float64) + + tstamp_image = image_data[:,0].astype(np.float64) + tstamp_depth = depth_data[:,0].astype(np.float64) + tstamp_pose = pose_data[:,0].astype(np.float64) + associations = associate_frames(tstamp_image, tstamp_depth, tstamp_pose) + + # print(len(tstamp_image)) + # print(len(associations)) + + indicies = range(len(associations))[::5] + + # indicies = [ 0 ] + # for i in range(1, len(associations)): + # t0 = tstamp_image[associations[indicies[-1]][0]] + # t1 = tstamp_image[associations[i][0]] + # if t1 - t0 > 1.0 / frame_rate: + # indicies += [ i ] + + images, poses, depths, intrinsics, tstamps = [], [], [], [], [] + for ix in indicies: + (i, j, k) = associations[ix] + images += [ osp.join(datapath, image_data[i,1]) ] + depths += [ osp.join(datapath, depth_data[j,1]) ] + poses += [ pose_vecs[k] ] + tstamps += [ tstamp_image[i] ] + + if intrinsic is not None: + intrinsics += [ intrinsic ] + + return images, depths, poses, intrinsics, tstamps + + +def all_pairs_distance_matrix(poses, beta=2.5): + """ compute distance matrix between all pairs of poses """ + poses = np.array(poses, dtype=np.float32) + poses[:,:3] *= beta # scale to balence rot + trans + poses = SE3(torch.from_numpy(poses)) + + r = (poses[:,None].inv() * poses[None,:]).log() + return r.norm(dim=-1).cpu().numpy() + +def pose_matrix_to_quaternion(pose): + """ convert 4x4 pose matrix to (t, q) """ + q = Rotation.from_matrix(pose[:3, :3]).as_quat() + return np.concatenate([pose[:3, 3], q], axis=0) + +def compute_distance_matrix_flow(poses, disps, intrinsics): + """ compute flow magnitude between all pairs of frames """ + if not isinstance(poses, SE3): + poses = torch.from_numpy(poses).float().cuda()[None] + poses = SE3(poses).inv() + + disps = torch.from_numpy(disps).float().cuda()[None] + intrinsics = torch.from_numpy(intrinsics).float().cuda()[None] + + N = poses.shape[1] + + ii, jj = torch.meshgrid(torch.arange(N), torch.arange(N)) + ii = ii.reshape(-1).cuda() + jj = jj.reshape(-1).cuda() + + MAX_FLOW = 100.0 + matrix = np.zeros((N, N), dtype=np.float32) + + s = 2048 + for i in range(0, ii.shape[0], s): + flow1, val1 = pops.induced_flow(poses, disps, intrinsics, ii[i:i+s], jj[i:i+s]) + flow2, val2 = pops.induced_flow(poses, disps, intrinsics, jj[i:i+s], ii[i:i+s]) + + flow = torch.stack([flow1, flow2], dim=2) + val = torch.stack([val1, val2], dim=2) + + mag = flow.norm(dim=-1).clamp(max=MAX_FLOW) + mag = mag.view(mag.shape[1], -1) + val = val.view(val.shape[1], -1) + + mag = (mag * val).mean(-1) / val.mean(-1) + mag[val.mean(-1) < 0.7] = np.inf + + i1 = ii[i:i+s].cpu().numpy() + j1 = jj[i:i+s].cpu().numpy() + matrix[i1, j1] = mag.cpu().numpy() + + return matrix + + +def compute_distance_matrix_flow2(poses, disps, intrinsics, beta=0.4): + """ compute flow magnitude between all pairs of frames """ + # if not isinstance(poses, SE3): + # poses = torch.from_numpy(poses).float().cuda()[None] + # poses = SE3(poses).inv() + + # disps = torch.from_numpy(disps).float().cuda()[None] + # intrinsics = torch.from_numpy(intrinsics).float().cuda()[None] + + N = poses.shape[1] + + ii, jj = torch.meshgrid(torch.arange(N), torch.arange(N)) + ii = ii.reshape(-1) + jj = jj.reshape(-1) + + MAX_FLOW = 128.0 + matrix = np.zeros((N, N), dtype=np.float32) + + s = 2048 + for i in range(0, ii.shape[0], s): + flow1a, val1a = pops.induced_flow(poses, disps, intrinsics, ii[i:i+s], jj[i:i+s], tonly=True) + flow1b, val1b = pops.induced_flow(poses, disps, intrinsics, ii[i:i+s], jj[i:i+s]) + flow2a, val2a = pops.induced_flow(poses, disps, intrinsics, jj[i:i+s], ii[i:i+s], tonly=True) + flow2b, val2b = pops.induced_flow(poses, disps, intrinsics, ii[i:i+s], jj[i:i+s]) + + flow1 = flow1a + beta * flow1b + val1 = val1a * val2b + + flow2 = flow2a + beta * flow2b + val2 = val2a * val2b + + flow = torch.stack([flow1, flow2], dim=2) + val = torch.stack([val1, val2], dim=2) + + mag = flow.norm(dim=-1).clamp(max=MAX_FLOW) + mag = mag.view(mag.shape[1], -1) + val = val.view(val.shape[1], -1) + + mag = (mag * val).mean(-1) / val.mean(-1) + mag[val.mean(-1) < 0.8] = np.inf + + i1 = ii[i:i+s].cpu().numpy() + j1 = jj[i:i+s].cpu().numpy() + matrix[i1, j1] = mag.cpu().numpy() + + return matrix diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/tartan.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/tartan.py new file mode 100644 index 0000000000000000000000000000000000000000..a854725fc96c4a9eff035ae0e7a1e854fb06df5a --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/data_readers/tartan.py @@ -0,0 +1,110 @@ + +import numpy as np +import torch +import glob +import cv2 +import os +import os.path as osp + +from ..lietorch import SE3 +from .base import RGBDDataset + +# cur_path = osp.dirname(osp.abspath(__file__)) +# test_split = osp.join(cur_path, 'tartan_test.txt') +# test_split = open(test_split).read().split() + + +test_split = [ + "abandonedfactory/abandonedfactory/Easy/P011", + "abandonedfactory/abandonedfactory/Hard/P011", + "abandonedfactory_night/abandonedfactory_night/Easy/P013", + "abandonedfactory_night/abandonedfactory_night/Hard/P014", + "amusement/amusement/Easy/P008", + "amusement/amusement/Hard/P007", + "carwelding/carwelding/Easy/P007", + "endofworld/endofworld/Easy/P009", + "gascola/gascola/Easy/P008", + "gascola/gascola/Hard/P009", + "hospital/hospital/Easy/P036", + "hospital/hospital/Hard/P049", + "japanesealley/japanesealley/Easy/P007", + "japanesealley/japanesealley/Hard/P005", + "neighborhood/neighborhood/Easy/P021", + "neighborhood/neighborhood/Hard/P017", + "ocean/ocean/Easy/P013", + "ocean/ocean/Hard/P009", + "office2/office2/Easy/P011", + "office2/office2/Hard/P010", + "office/office/Hard/P007", + "oldtown/oldtown/Easy/P007", + "oldtown/oldtown/Hard/P008", + "seasidetown/seasidetown/Easy/P009", + "seasonsforest/seasonsforest/Easy/P011", + "seasonsforest/seasonsforest/Hard/P006", + "seasonsforest_winter/seasonsforest_winter/Easy/P009", + "seasonsforest_winter/seasonsforest_winter/Hard/P018", + "soulcity/soulcity/Easy/P012", + "soulcity/soulcity/Hard/P009", + "westerndesert/westerndesert/Easy/P013", + "westerndesert/westerndesert/Hard/P007", +] + + +class TartanAir(RGBDDataset): + + # scale depths to balance rot & trans + DEPTH_SCALE = 5.0 + + def __init__(self, mode='training', **kwargs): + self.mode = mode + self.n_frames = 2 + super(TartanAir, self).__init__(name='TartanAir', **kwargs) + + @staticmethod + def is_test_scene(scene): + # print(scene, any(x in scene for x in test_split)) + return any(x in scene for x in test_split) + + def _build_dataset(self): + from tqdm import tqdm + print("Building TartanAir dataset") + + scene_info = {} + scenes = glob.glob(osp.join(self.root, '*/*/*/*')) + for scene in tqdm(sorted(scenes)): + images = sorted(glob.glob(osp.join(scene, 'image_left/*.png'))) + depths = sorted(glob.glob(osp.join(scene, 'depth_left/*.npy'))) + + if len(images) != len(depths): + continue + + poses = np.loadtxt(osp.join(scene, 'pose_left.txt'), delimiter=' ') + poses = poses[:, [1, 2, 0, 4, 5, 3, 6]] + poses[:,:3] /= TartanAir.DEPTH_SCALE + intrinsics = [TartanAir.calib_read()] * len(images) + + # graph of co-visible frames based on flow + graph = self.build_frame_graph(poses, depths, intrinsics) + + scene = '/'.join(scene.split('/')) + scene_info[scene] = {'images': images, 'depths': depths, + 'poses': poses, 'intrinsics': intrinsics, 'graph': graph} + + return scene_info + + @staticmethod + def calib_read(): + return np.array([320.0, 320.0, 320.0, 240.0]) + + @staticmethod + def image_read(image_file): + return cv2.imread(image_file) + + @staticmethod + def depth_read(depth_file): + depth = np.load(depth_file) / TartanAir.DEPTH_SCALE + depth[depth==np.nan] = 1.0 + depth[depth==np.inf] = 1.0 + return depth + + diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/dpvo.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/dpvo.py new file mode 100644 index 0000000000000000000000000000000000000000..e7b562d979a947a55bd7cf3aa122167d2884d59a --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/dpvo.py @@ -0,0 +1,402 @@ +import torch +import numpy as np +import torch.nn.functional as F + +from . import fastba +from . import altcorr +from . import lietorch +from .lietorch import SE3 + +from .net import VONet +from .utils import * +from . import projective_ops as pops + +autocast = torch.cuda.amp.autocast +Id = SE3.Identity(1, device="cuda") + + +class DPVO: + def __init__(self, cfg, network, ht=480, wd=640, viz=False): + self.cfg = cfg + self.load_weights(network) + self.is_initialized = False + self.enable_timing = False + + self.n = 0 # number of frames + self.m = 0 # number of patches + self.M = self.cfg.PATCHES_PER_FRAME + self.N = self.cfg.BUFFER_SIZE + + self.ht = ht # image height + self.wd = wd # image width + + DIM = self.DIM + RES = self.RES + + ### state attributes ### + self.tlist = [] + self.counter = 0 + + # dummy image for visualization + self.image_ = torch.zeros(self.ht, self.wd, 3, dtype=torch.uint8, device="cpu") + + self.tstamps_ = torch.zeros(self.N, dtype=torch.long, device="cuda") + self.poses_ = torch.zeros(self.N, 7, dtype=torch.float, device="cuda") + self.patches_ = torch.zeros(self.N, self.M, 3, self.P, self.P, dtype=torch.float, device="cuda") + self.intrinsics_ = torch.zeros(self.N, 4, dtype=torch.float, device="cuda") + + self.points_ = torch.zeros(self.N * self.M, 3, dtype=torch.float, device="cuda") + self.colors_ = torch.zeros(self.N, self.M, 3, dtype=torch.uint8, device="cuda") + + self.index_ = torch.zeros(self.N, self.M, dtype=torch.long, device="cuda") + self.index_map_ = torch.zeros(self.N, dtype=torch.long, device="cuda") + + ### network attributes ### + self.mem = 32 + + if self.cfg.MIXED_PRECISION: + self.kwargs = kwargs = {"device": "cuda", "dtype": torch.half} + else: + self.kwargs = kwargs = {"device": "cuda", "dtype": torch.float} + + self.imap_ = torch.zeros(self.mem, self.M, DIM, **kwargs) + self.gmap_ = torch.zeros(self.mem, self.M, 128, self.P, self.P, **kwargs) + + ht = ht // RES + wd = wd // RES + + self.fmap1_ = torch.zeros(1, self.mem, 128, ht // 1, wd // 1, **kwargs) + self.fmap2_ = torch.zeros(1, self.mem, 128, ht // 4, wd // 4, **kwargs) + + # feature pyramid + self.pyramid = (self.fmap1_, self.fmap2_) + + self.net = torch.zeros(1, 0, DIM, **kwargs) + self.ii = torch.as_tensor([], dtype=torch.long, device="cuda") + self.jj = torch.as_tensor([], dtype=torch.long, device="cuda") + self.kk = torch.as_tensor([], dtype=torch.long, device="cuda") + + # initialize poses to identity matrix + self.poses_[:,6] = 1.0 + + # store relative poses for removed frames + self.delta = {} + + self.viewer = None + if viz: + self.start_viewer() + + def load_weights(self, network): + # load network from checkpoint file + if isinstance(network, str): + from collections import OrderedDict + state_dict = torch.load(network) + new_state_dict = OrderedDict() + for k, v in state_dict.items(): + if "update.lmbda" not in k: + new_state_dict[k.replace('module.', '')] = v + + self.network = VONet() + self.network.load_state_dict(new_state_dict) + + else: + self.network = network + + # steal network attributes + self.DIM = self.network.DIM + self.RES = self.network.RES + self.P = self.network.P + + self.network.cuda() + self.network.eval() + + # if self.cfg.MIXED_PRECISION: + # self.network.half() + + + def start_viewer(self): + from dpviewer import Viewer + + intrinsics_ = torch.zeros(1, 4, dtype=torch.float32, device="cuda") + + self.viewer = Viewer( + self.image_, + self.poses_, + self.points_, + self.colors_, + intrinsics_) + + @property + def poses(self): + return self.poses_.view(1, self.N, 7) + + @property + def patches(self): + return self.patches_.view(1, self.N*self.M, 3, 3, 3) + + @property + def intrinsics(self): + return self.intrinsics_.view(1, self.N, 4) + + @property + def ix(self): + return self.index_.view(-1) + + @property + def imap(self): + return self.imap_.view(1, self.mem * self.M, self.DIM) + + @property + def gmap(self): + return self.gmap_.view(1, self.mem * self.M, 128, 3, 3) + + def get_pose(self, t): + if t in self.traj: + return SE3(self.traj[t]) + + t0, dP = self.delta[t] + return dP * self.get_pose(t0) + + def terminate(self): + """ interpolate missing poses """ + self.traj = {} + for i in range(self.n): + self.traj[self.tstamps_[i].item()] = self.poses_[i] + + poses = [self.get_pose(t) for t in range(self.counter)] + poses = lietorch.stack(poses, dim=0) + poses = poses.inv().data.cpu().numpy() + tstamps = np.array(self.tlist, dtype=np.float) + + if self.viewer is not None: + self.viewer.join() + + return poses, tstamps + + def corr(self, coords, indicies=None): + """ local correlation volume """ + ii, jj = indicies if indicies is not None else (self.kk, self.jj) + ii1 = ii % (self.M * self.mem) + jj1 = jj % (self.mem) + corr1 = altcorr.corr(self.gmap, self.pyramid[0], coords / 1, ii1, jj1, 3) + corr2 = altcorr.corr(self.gmap, self.pyramid[1], coords / 4, ii1, jj1, 3) + return torch.stack([corr1, corr2], -1).view(1, len(ii), -1) + + def reproject(self, indicies=None): + """ reproject patch k from i -> j """ + (ii, jj, kk) = indicies if indicies is not None else (self.ii, self.jj, self.kk) + coords = pops.transform(SE3(self.poses), self.patches, self.intrinsics, ii, jj, kk) + return coords.permute(0, 1, 4, 2, 3).contiguous() + + def append_factors(self, ii, jj): + self.jj = torch.cat([self.jj, jj]) + self.kk = torch.cat([self.kk, ii]) + self.ii = torch.cat([self.ii, self.ix[ii]]) + + net = torch.zeros(1, len(ii), self.DIM, **self.kwargs) + self.net = torch.cat([self.net, net], dim=1) + + def remove_factors(self, m): + self.ii = self.ii[~m] + self.jj = self.jj[~m] + self.kk = self.kk[~m] + self.net = self.net[:,~m] + + def motion_probe(self): + """ kinda hacky way to ensure enough motion for initialization """ + kk = torch.arange(self.m-self.M, self.m, device="cuda") + jj = self.n * torch.ones_like(kk) + ii = self.ix[kk] + + net = torch.zeros(1, len(ii), self.DIM, **self.kwargs) + coords = self.reproject(indicies=(ii, jj, kk)) + + with autocast(enabled=self.cfg.MIXED_PRECISION): + corr = self.corr(coords, indicies=(kk, jj)) + ctx = self.imap[:,kk % (self.M * self.mem)] + net, (delta, weight, _) = \ + self.network.update(net, ctx, corr, None, ii, jj, kk) + + return torch.quantile(delta.norm(dim=-1).float(), 0.5) + + def motionmag(self, i, j): + k = (self.ii == i) & (self.jj == j) + ii = self.ii[k] + jj = self.jj[k] + kk = self.kk[k] + + flow = pops.flow_mag(SE3(self.poses), self.patches, self.intrinsics, ii, jj, kk, beta=0.5) + return flow.mean().item() + + def keyframe(self): + + i = self.n - self.cfg.KEYFRAME_INDEX - 1 + j = self.n - self.cfg.KEYFRAME_INDEX + 1 + m = self.motionmag(i, j) + self.motionmag(j, i) + + if m / 2 < self.cfg.KEYFRAME_THRESH: + k = self.n - self.cfg.KEYFRAME_INDEX + t0 = self.tstamps_[k-1].item() + t1 = self.tstamps_[k].item() + + dP = SE3(self.poses_[k]) * SE3(self.poses_[k-1]).inv() + self.delta[t1] = (t0, dP) + + to_remove = (self.ii == k) | (self.jj == k) + self.remove_factors(to_remove) + + self.kk[self.ii > k] -= self.M + self.ii[self.ii > k] -= 1 + self.jj[self.jj > k] -= 1 + + for i in range(k, self.n-1): + self.tstamps_[i] = self.tstamps_[i+1] + self.colors_[i] = self.colors_[i+1] + self.poses_[i] = self.poses_[i+1] + self.patches_[i] = self.patches_[i+1] + self.intrinsics_[i] = self.intrinsics_[i+1] + + self.imap_[i%self.mem] = self.imap_[(i+1) % self.mem] + self.gmap_[i%self.mem] = self.gmap_[(i+1) % self.mem] + self.fmap1_[0,i%self.mem] = self.fmap1_[0,(i+1)%self.mem] + self.fmap2_[0,i%self.mem] = self.fmap2_[0,(i+1)%self.mem] + + self.n -= 1 + self.m-= self.M + + to_remove = self.ix[self.kk] < self.n - self.cfg.REMOVAL_WINDOW + self.remove_factors(to_remove) + + def update(self): + with Timer("other", enabled=self.enable_timing): + coords = self.reproject() + + with autocast(enabled=True): + corr = self.corr(coords) + ctx = self.imap[:,self.kk % (self.M * self.mem)] + self.net, (delta, weight, _) = \ + self.network.update(self.net, ctx, corr, None, self.ii, self.jj, self.kk) + + lmbda = torch.as_tensor([1e-4], device="cuda") + weight = weight.float() + target = coords[...,self.P//2,self.P//2] + delta.float() + + with Timer("BA", enabled=self.enable_timing): + t0 = self.n - self.cfg.OPTIMIZATION_WINDOW if self.is_initialized else 1 + t0 = max(t0, 1) + + try: + fastba.BA(self.poses, self.patches, self.intrinsics, + target, weight, lmbda, self.ii, self.jj, self.kk, t0, self.n, 2) + except: + print("Warning BA failed...") + + points = pops.point_cloud(SE3(self.poses), self.patches[:, :self.m], self.intrinsics, self.ix[:self.m]) + points = (points[...,1,1,:3] / points[...,1,1,3:]).reshape(-1, 3) + self.points_[:len(points)] = points[:] + + def __edges_all(self): + return flatmeshgrid( + torch.arange(0, self.m, device="cuda"), + torch.arange(0, self.n, device="cuda"), indexing='ij') + + def __edges_forw(self): + r=self.cfg.PATCH_LIFETIME + t0 = self.M * max((self.n - r), 0) + t1 = self.M * max((self.n - 1), 0) + return flatmeshgrid( + torch.arange(t0, t1, device="cuda"), + torch.arange(self.n-1, self.n, device="cuda"), indexing='ij') + + def __edges_back(self): + r=self.cfg.PATCH_LIFETIME + t0 = self.M * max((self.n - 1), 0) + t1 = self.M * max((self.n - 0), 0) + return flatmeshgrid(torch.arange(t0, t1, device="cuda"), + torch.arange(max(self.n-r, 0), self.n, device="cuda"), indexing='ij') + + def __call__(self, tstamp, image, intrinsics): + """ track new frame """ + + if (self.n+1) >= self.N: + raise Exception(f'The buffer size is too small. You can increase it using "--buffer {self.N*2}"') + + if self.viewer is not None: + self.viewer.update_image(image) + + image = 2 * (image[None,None] / 255.0) - 0.5 + + with autocast(enabled=self.cfg.MIXED_PRECISION): + fmap, gmap, imap, patches, _, clr = \ + self.network.patchify(image, + patches_per_image=self.cfg.PATCHES_PER_FRAME, + gradient_bias=self.cfg.GRADIENT_BIAS, + return_color=True) + + ### update state attributes ### + self.tlist.append(tstamp) + self.tstamps_[self.n] = self.counter + self.intrinsics_[self.n] = intrinsics / self.RES + + # color info for visualization + clr = (clr[0,:,[2,1,0]] + 0.5) * (255.0 / 2) + self.colors_[self.n] = clr.to(torch.uint8) + + self.index_[self.n + 1] = self.n + 1 + self.index_map_[self.n + 1] = self.m + self.M + + if self.n > 1: + if self.cfg.MOTION_MODEL == 'DAMPED_LINEAR': + P1 = SE3(self.poses_[self.n-1]) + P2 = SE3(self.poses_[self.n-2]) + + xi = self.cfg.MOTION_DAMPING * (P1 * P2.inv()).log() + tvec_qvec = (SE3.exp(xi) * P1).data + self.poses_[self.n] = tvec_qvec + else: + tvec_qvec = self.poses[self.n-1] + self.poses_[self.n] = tvec_qvec + + # TODO better depth initialization + patches[:,:,2] = torch.rand_like(patches[:,:,2,0,0,None,None]) + if self.is_initialized: + s = torch.median(self.patches_[self.n-3:self.n,:,2]) + patches[:,:,2] = s + + self.patches_[self.n] = patches + + ### update network attributes ### + self.imap_[self.n % self.mem] = imap.squeeze() + self.gmap_[self.n % self.mem] = gmap.squeeze() + self.fmap1_[:, self.n % self.mem] = F.avg_pool2d(fmap[0], 1, 1) + self.fmap2_[:, self.n % self.mem] = F.avg_pool2d(fmap[0], 4, 4) + + self.counter += 1 + if self.n > 0 and not self.is_initialized: + if self.motion_probe() < 2.0: + self.delta[self.counter - 1] = (self.counter - 2, Id[0]) + return + + self.n += 1 + self.m += self.M + + # relative pose + self.append_factors(*self.__edges_forw()) + self.append_factors(*self.__edges_back()) + + if self.n == 8 and not self.is_initialized: + self.is_initialized = True + + for itr in range(12): + self.update() + + elif self.is_initialized: + self.update() + self.keyframe() + + + + + + + diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/extractor.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/extractor.py new file mode 100644 index 0000000000000000000000000000000000000000..0c0bc49f9bfda971a310bc455beca0cef58a804f --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/extractor.py @@ -0,0 +1,264 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class ResidualBlock(nn.Module): + def __init__(self, in_planes, planes, norm_fn='group', stride=1): + super(ResidualBlock, self).__init__() + + self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, padding=1, stride=stride) + self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1) + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(planes) + self.norm2 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm3 = nn.BatchNorm2d(planes) + + elif norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(planes) + self.norm2 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm3 = nn.InstanceNorm2d(planes) + + elif norm_fn == 'none': + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + if not stride == 1: + self.norm3 = nn.Sequential() + + if stride == 1: + self.downsample = None + + else: + self.downsample = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm3) + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x+y) + + +class BottleneckBlock(nn.Module): + def __init__(self, in_planes, planes, norm_fn='group', stride=1): + super(BottleneckBlock, self).__init__() + + self.conv1 = nn.Conv2d(in_planes, planes//4, kernel_size=1, padding=0) + self.conv2 = nn.Conv2d(planes//4, planes//4, kernel_size=3, padding=1, stride=stride) + self.conv3 = nn.Conv2d(planes//4, planes, kernel_size=1, padding=0) + self.relu = nn.ReLU(inplace=True) + + num_groups = planes // 8 + + if norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4) + self.norm2 = nn.GroupNorm(num_groups=num_groups, num_channels=planes//4) + self.norm3 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + if not stride == 1: + self.norm4 = nn.GroupNorm(num_groups=num_groups, num_channels=planes) + + elif norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(planes//4) + self.norm2 = nn.BatchNorm2d(planes//4) + self.norm3 = nn.BatchNorm2d(planes) + if not stride == 1: + self.norm4 = nn.BatchNorm2d(planes) + + elif norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(planes//4) + self.norm2 = nn.InstanceNorm2d(planes//4) + self.norm3 = nn.InstanceNorm2d(planes) + if not stride == 1: + self.norm4 = nn.InstanceNorm2d(planes) + + elif norm_fn == 'none': + self.norm1 = nn.Sequential() + self.norm2 = nn.Sequential() + self.norm3 = nn.Sequential() + if not stride == 1: + self.norm4 = nn.Sequential() + + if stride == 1: + self.downsample = None + + else: + self.downsample = nn.Sequential( + nn.Conv2d(in_planes, planes, kernel_size=1, stride=stride), self.norm4) + + def forward(self, x): + y = x + y = self.relu(self.norm1(self.conv1(y))) + y = self.relu(self.norm2(self.conv2(y))) + y = self.relu(self.norm3(self.conv3(y))) + + if self.downsample is not None: + x = self.downsample(x) + + return self.relu(x+y) + +DIM=32 + +class BasicEncoder(nn.Module): + def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0, multidim=False): + super(BasicEncoder, self).__init__() + self.norm_fn = norm_fn + self.multidim = multidim + + if self.norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=8, num_channels=DIM) + + elif self.norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(DIM) + + elif self.norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(DIM) + + elif self.norm_fn == 'none': + self.norm1 = nn.Sequential() + + self.conv1 = nn.Conv2d(3, DIM, kernel_size=7, stride=2, padding=3) + self.relu1 = nn.ReLU(inplace=True) + + self.in_planes = DIM + self.layer1 = self._make_layer(DIM, stride=1) + self.layer2 = self._make_layer(2*DIM, stride=2) + self.layer3 = self._make_layer(4*DIM, stride=2) + + # output convolution + self.conv2 = nn.Conv2d(4*DIM, output_dim, kernel_size=1) + + if self.multidim: + self.layer4 = self._make_layer(256, stride=2) + self.layer5 = self._make_layer(512, stride=2) + + self.in_planes = 256 + self.layer6 = self._make_layer(256, stride=1) + + self.in_planes = 128 + self.layer7 = self._make_layer(128, stride=1) + + self.up1 = nn.Conv2d(512, 256, 1) + self.up2 = nn.Conv2d(256, 128, 1) + self.conv3 = nn.Conv2d(128, output_dim, kernel_size=1) + + if dropout > 0: + self.dropout = nn.Dropout2d(p=dropout) + else: + self.dropout = None + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1): + layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride) + layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1) + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + def forward(self, x): + b, n, c1, h1, w1 = x.shape + x = x.view(b*n, c1, h1, w1) + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + + x = self.conv2(x) + + _, c2, h2, w2 = x.shape + return x.view(b, n, c2, h2, w2) + + +class BasicEncoder4(nn.Module): + def __init__(self, output_dim=128, norm_fn='batch', dropout=0.0, multidim=False): + super(BasicEncoder4, self).__init__() + self.norm_fn = norm_fn + self.multidim = multidim + + if self.norm_fn == 'group': + self.norm1 = nn.GroupNorm(num_groups=8, num_channels=DIM) + + elif self.norm_fn == 'batch': + self.norm1 = nn.BatchNorm2d(DIM) + + elif self.norm_fn == 'instance': + self.norm1 = nn.InstanceNorm2d(DIM) + + elif self.norm_fn == 'none': + self.norm1 = nn.Sequential() + + self.conv1 = nn.Conv2d(3, DIM, kernel_size=7, stride=2, padding=3) + self.relu1 = nn.ReLU(inplace=True) + + self.in_planes = DIM + self.layer1 = self._make_layer(DIM, stride=1) + self.layer2 = self._make_layer(2*DIM, stride=2) + + # output convolution + self.conv2 = nn.Conv2d(2*DIM, output_dim, kernel_size=1) + + if dropout > 0: + self.dropout = nn.Dropout2d(p=dropout) + else: + self.dropout = None + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.InstanceNorm2d, nn.GroupNorm)): + if m.weight is not None: + nn.init.constant_(m.weight, 1) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + + def _make_layer(self, dim, stride=1): + layer1 = ResidualBlock(self.in_planes, dim, self.norm_fn, stride=stride) + layer2 = ResidualBlock(dim, dim, self.norm_fn, stride=1) + layers = (layer1, layer2) + + self.in_planes = dim + return nn.Sequential(*layers) + + def forward(self, x): + b, n, c1, h1, w1 = x.shape + x = x.view(b*n, c1, h1, w1) + + x = self.conv1(x) + x = self.norm1(x) + x = self.relu1(x) + + x = self.layer1(x) + x = self.layer2(x) + + x = self.conv2(x) + + _, c2, h2, w2 = x.shape + return x.view(b, n, c2, h2, w2) diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/fastba/__init__.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/fastba/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..16ec66ade2ef4bc7b99673e0144a9e9a4bff2998 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/fastba/__init__.py @@ -0,0 +1 @@ +from .ba import BA, neighbors, reproject \ No newline at end of file diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/fastba/ba.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/fastba/ba.py new file mode 100644 index 0000000000000000000000000000000000000000..b97c1772be621fb90e026dfffab24dce22df629c --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/fastba/ba.py @@ -0,0 +1,8 @@ +import torch +import cuda_ba + +neighbors = cuda_ba.neighbors +reproject = cuda_ba.reproject + +def BA(poses, patches, intrinsics, target, weight, lmbda, ii, jj, kk, t0, t1, iterations=2): + return cuda_ba.forward(poses.data, patches, intrinsics, target, weight, lmbda, ii, jj, kk, t0, t1, iterations) \ No newline at end of file diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/__init__.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1b339f949cd91dace1ad74a8b9f7f9f692931771 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/__init__.py @@ -0,0 +1,2 @@ +__all__ = ['groups'] +from .groups import LieGroupParameter, SO3, RxSO3, SE3, Sim3, cat, stack diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/broadcasting.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/broadcasting.py new file mode 100644 index 0000000000000000000000000000000000000000..5522db67a6cad2a390f296964c13c2c1597812d6 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/broadcasting.py @@ -0,0 +1,31 @@ +import torch +import numpy as np + +def check_broadcastable(x, y): + assert len(x.shape) == len(y.shape) + for (n, m) in zip(x.shape[:-1], y.shape[:-1]): + assert n==m or n==1 or m==1 + +def broadcast_inputs(x, y): + """ Automatic broadcasting of missing dimensions """ + if y is None: + xs, xd = x.shape[:-1], x.shape[-1] + return (x.view(-1, xd).contiguous(), ), x.shape[:-1] + + check_broadcastable(x, y) + + xs, xd = x.shape[:-1], x.shape[-1] + ys, yd = y.shape[:-1], y.shape[-1] + out_shape = [max(n,m) for (n,m) in zip(xs,ys)] + + if x.shape[:-1] == y.shape[-1]: + x1 = x.view(-1, xd) + y1 = y.view(-1, yd) + + else: + x_expand = [m if n==1 else 1 for (n,m) in zip(xs, ys)] + y_expand = [n if m==1 else 1 for (n,m) in zip(xs, ys)] + x1 = x.repeat(x_expand + [1]).reshape(-1, xd).contiguous() + y1 = y.repeat(y_expand + [1]).reshape(-1, yd).contiguous() + + return (x1, y1), tuple(out_shape) diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/gradcheck.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/gradcheck.py new file mode 100644 index 0000000000000000000000000000000000000000..55f3e3864020cfae68f4b1558b59969eab9eadac --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/gradcheck.py @@ -0,0 +1,592 @@ +import torch + +TORCH_MAJOR = int(torch.__version__.split('.')[0]) +TORCH_MINOR = int(torch.__version__.split('.')[1]) + +from torch.types import _TensorOrTensors +if TORCH_MAJOR == 1 and TORCH_MINOR < 8: + from torch._six import container_abcs, istuple +else: + import collections.abc as container_abcs + +import torch.testing +from torch.overrides import is_tensor_like +from itertools import product +import warnings +from typing import Callable, Union, Optional, Iterable, List + +def zero_gradients(x): + if isinstance(x, torch.Tensor): + if x.grad is not None: + x.grad.detach_() + x.grad.zero_() + elif isinstance(x, container_abcs.Iterable): + for elem in x: + zero_gradients(elem) + + +def make_jacobian(input, num_out): + if is_tensor_like(input): + if not input.is_floating_point() and not input.is_complex(): + return None + if not input.requires_grad: + return None + return input.new_zeros((input.nelement(), num_out), dtype=input.dtype, layout=torch.strided) + elif isinstance(input, container_abcs.Iterable) and not isinstance(input, str): + jacobians = list(filter( + lambda x: x is not None, (make_jacobian(elem, num_out) for elem in input))) + if not jacobians: + return None + return type(input)(jacobians) # type: ignore + else: + return None + + +def iter_tensors(x: Union[torch.Tensor, Iterable[torch.Tensor]], only_requiring_grad: bool = False) -> Iterable[torch.Tensor]: + if is_tensor_like(x): + # mypy doesn't narrow type of `x` to torch.Tensor + if x.requires_grad or not only_requiring_grad: # type: ignore + yield x # type: ignore + elif isinstance(x, container_abcs.Iterable) and not isinstance(x, str): + for elem in x: + for result in iter_tensors(elem, only_requiring_grad): + yield result + +def get_numerical_jacobian(fn, input, target=None, eps=1e-3, grad_out=1.0): + """ + input: input to `fn` + target: the Tensors wrt whom Jacobians are calculated (default=`input`) + grad_out: grad output value used to calculate gradients. + + Note that `target` may not even be part of `input` to `fn`, so please be + **very careful** in this to not clone `target`. + """ + if target is None: + target = input + output_size = fn(input).numel() + jacobian = make_jacobian(target, output_size) + + # It's much easier to iterate over flattened lists of tensors. + # These are reference to the same objects in jacobian, so any changes + # will be reflected in it as well. + x_tensors = iter_tensors(target, True) + j_tensors = iter_tensors(jacobian) + + def update_jacobians(x, idx, d, d_idx, is_mkldnn=False): + + # compute_jacobian only works for pure real + # or pure imaginary delta + def compute_gradient(delta): + # we currently assume that the norm of delta equals eps + assert(delta == eps or delta == (eps * 1j)) + + def fn_out(): + if not is_mkldnn: + # x is a view into input and so this works + return fn(input).clone() + else: + # convert the dense tensor back to have mkldnn layout + return fn([x.to_mkldnn()]) + + orig = x[idx].item() + x[idx] = orig - delta + outa = fn_out() + x[idx] = orig + delta + outb = fn_out() + x[idx] = orig + r = (outb - outa) / (2 * eps) + return r.detach().reshape(-1) + + # for details on the algorithm used here, refer: + # Section 3.5.3 https://arxiv.org/pdf/1701.00392.pdf + # s = fn(z) where z = x for real valued input + # and z = x + yj for complex valued input + ds_dx = compute_gradient(eps) + if x.is_complex(): # C -> C, C -> R + ds_dy = compute_gradient(eps * 1j) + # conjugate wirtinger derivative + conj_w_d = 0.5 * (ds_dx + ds_dy * 1j) + # wirtinger derivative + w_d = 0.5 * (ds_dx - ds_dy * 1j) + d[d_idx] = grad_out.conjugate() * conj_w_d + grad_out * w_d.conj() + elif ds_dx.is_complex(): # R -> C + # w_d = conj_w_d = 0.5 * ds_dx + # dL_dz_conj = 0.5 * [grad_out.conj() * ds_dx + grad_out * ds_dx.conj()] + # = 0.5 * [grad_out.conj() * ds_dx + (grad_out.conj() * ds_dx).conj()] + # = 0.5 * 2 * real(grad_out.conj() * ds_dx) + # = real(grad_out.conj() * ds_dx) + d[d_idx] = torch.real(grad_out.conjugate() * ds_dx) + else: # R -> R + d[d_idx] = ds_dx * grad_out + + # TODO: compare structure + for x_tensor, d_tensor in zip(x_tensors, j_tensors): + if x_tensor.is_sparse: + def get_stride(size): + dim = len(size) + tmp = 1 + stride = [0] * dim + for i in reversed(range(dim)): + stride[i] = tmp + tmp *= size[i] + return stride + + x_nnz = x_tensor._nnz() + x_size = list(x_tensor.size()) + x_indices = x_tensor._indices().t() + x_values = x_tensor._values() + x_stride = get_stride(x_size) + + # Use .data here to get around the version check + x_values = x_values.data + + for i in range(x_nnz): + x_value = x_values[i] + for x_idx in product(*[range(m) for m in x_values.size()[1:]]): + indices = x_indices[i].tolist() + list(x_idx) + d_idx = sum(indices[k] * x_stride[k] for k in range(len(x_size))) + update_jacobians(x_value, x_idx, d_tensor, d_idx) + elif x_tensor.layout == torch._mkldnn: # type: ignore + # Use .data here to get around the version check + x_tensor = x_tensor.data + if len(input) != 1: + raise ValueError('gradcheck currently only supports functions with 1 input, but got: ', + len(input)) + for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])): + # this is really inefficient, but without indexing implemented, there's + # not really a better way than converting back and forth + x_tensor_dense = x_tensor.to_dense() + update_jacobians(x_tensor_dense, x_idx, d_tensor, d_idx, is_mkldnn=True) + else: + # Use .data here to get around the version check + x_tensor = x_tensor.data + for d_idx, x_idx in enumerate(product(*[range(m) for m in x_tensor.size()])): + update_jacobians(x_tensor, x_idx, d_tensor, d_idx) + + return jacobian + + +def get_analytical_jacobian(input, output, nondet_tol=0.0, grad_out=1.0): + # it is easier to call to_dense() on the sparse output than + # to modify analytical jacobian + if output.is_sparse: + raise ValueError('Sparse output is not supported at gradcheck yet. ' + 'Please call to_dense() on the output of fn for gradcheck.') + if output.layout == torch._mkldnn: # type: ignore + raise ValueError('MKLDNN output is not supported at gradcheck yet. ' + 'Please call to_dense() on the output of fn for gradcheck.') + diff_input_list = list(iter_tensors(input, True)) + jacobian = make_jacobian(input, output.numel()) + jacobian_reentrant = make_jacobian(input, output.numel()) + grad_output = torch.zeros_like(output, memory_format=torch.legacy_contiguous_format) + flat_grad_output = grad_output.view(-1) + reentrant = True + correct_grad_sizes = True + correct_grad_types = True + + for i in range(flat_grad_output.numel()): + flat_grad_output.zero_() + flat_grad_output[i] = grad_out + for jacobian_c in (jacobian, jacobian_reentrant): + grads_input = torch.autograd.grad(output, diff_input_list, grad_output, + retain_graph=True, allow_unused=True) + for jacobian_x, d_x, x in zip(jacobian_c, grads_input, diff_input_list): + if d_x is not None and d_x.size() != x.size(): + correct_grad_sizes = False + elif d_x is not None and d_x.dtype != x.dtype: + correct_grad_types = False + elif jacobian_x.numel() != 0: + if d_x is None: + jacobian_x[:, i].zero_() + else: + d_x_dense = d_x.to_dense() if not d_x.layout == torch.strided else d_x + assert jacobian_x[:, i].numel() == d_x_dense.numel() + jacobian_x[:, i] = d_x_dense.contiguous().view(-1) + + for jacobian_x, jacobian_reentrant_x in zip(jacobian, jacobian_reentrant): + if jacobian_x.numel() != 0 and (jacobian_x - jacobian_reentrant_x).abs().max() > nondet_tol: + reentrant = False + + return jacobian, reentrant, correct_grad_sizes, correct_grad_types + + +def _as_tuple(x): + if TORCH_MAJOR == 1 and TORCH_MINOR < 8: + b_tuple = istuple(x) + else: + b_tuple = isinstance(x, tuple) + + if b_tuple: + return x + elif isinstance(x, list): + return tuple(x) + else: + return x, + + + +def _differentiable_outputs(x): + return tuple(o for o in _as_tuple(x) if o.requires_grad) + + +# Note [VarArg of Tensors] +# ~~~~~~~~~~~~~~~~~~~~~~~~ +# 'func' accepts a vararg of tensors, which isn't expressable in the type system at the moment. +# If https://mypy.readthedocs.io/en/latest/additional_features.html?highlight=callable#extended-callable-types is accepted, +# the '...' first argument of Callable can be replaced with VarArg(Tensor). +# For now, we permit any input. +# the '...' first argument of Callable can be replaced with VarArg(Tensor). +# For now, we permit any input. + +def gradcheck( + func: Callable[..., Union[_TensorOrTensors]], # See Note [VarArg of Tensors] + inputs: _TensorOrTensors, + eps: float = 1e-6, + atol: float = 1e-5, + rtol: float = 1e-3, + raise_exception: bool = True, + check_sparse_nnz: bool = False, + nondet_tol: float = 0.0, + check_undefined_grad: bool = True, + check_grad_dtypes: bool = False +) -> bool: + r"""Check gradients computed via small finite differences against analytical + gradients w.r.t. tensors in :attr:`inputs` that are of floating point or complex type + and with ``requires_grad=True``. + + The check between numerical and analytical gradients uses :func:`~torch.allclose`. + + For complex functions, no notion of Jacobian exists. Gradcheck verifies if the numerical and + analytical values of Wirtinger and Conjugate Wirtinger derivative are consistent. The gradient + computation is done under the assumption that the overall function has a real valued output. + For functions with complex output, gradcheck compares the numerical and analytical gradients + for two values of :attr:`grad_output`: 1 and 1j. For more details, check out + :ref:`complex_autograd-doc`. + + .. note:: + The default values are designed for :attr:`input` of double precision. + This check will likely fail if :attr:`input` is of less precision, e.g., + ``FloatTensor``. + + .. warning:: + If any checked tensor in :attr:`input` has overlapping memory, i.e., + different indices pointing to the same memory address (e.g., from + :func:`torch.expand`), this check will likely fail because the numerical + gradients computed by point perturbation at such indices will change + values at all other indices that share the same memory address. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a Tensor or a tuple of Tensors + inputs (tuple of Tensor or Tensor): inputs to the function + eps (float, optional): perturbation for finite differences + atol (float, optional): absolute tolerance + rtol (float, optional): relative tolerance + raise_exception (bool, optional): indicating whether to raise an exception if + the check fails. The exception gives more information about the + exact nature of the failure. This is helpful when debugging gradchecks. + check_sparse_nnz (bool, optional): if True, gradcheck allows for SparseTensor input, + and for any SparseTensor at input, gradcheck will perform check at nnz positions only. + nondet_tol (float, optional): tolerance for non-determinism. When running + identical inputs through the differentiation, the results must either match + exactly (default, 0.0) or be within this tolerance. + check_undefined_grad (bool, options): if True, check if undefined output grads + are supported and treated as zeros, for ``Tensor`` outputs. + + Returns: + True if all differences satisfy allclose condition + """ + def fail_test(msg): + if raise_exception: + raise RuntimeError(msg) + return False + + tupled_inputs = _as_tuple(inputs) + if not check_sparse_nnz and any(t.is_sparse for t in tupled_inputs if isinstance(t, torch.Tensor)): + return fail_test('gradcheck expects all tensor inputs are dense when check_sparse_nnz is set to False.') + + # Make sure that gradients are saved for at least one input + any_input_requiring_grad = False + for idx, inp in enumerate(tupled_inputs): + if is_tensor_like(inp) and inp.requires_grad: + if not (inp.dtype == torch.float64 or inp.dtype == torch.complex128): + warnings.warn( + f'Input #{idx} requires gradient and ' + 'is not a double precision floating point or complex. ' + 'This check will likely fail if all the inputs are ' + 'not of double precision floating point or complex. ') + content = inp._values() if inp.is_sparse else inp + # TODO: To cover more problematic cases, replace stride = 0 check with + # "any overlap in memory" once we have a proper function to check it. + if content.layout is not torch._mkldnn: # type: ignore + if not all(st > 0 or sz <= 1 for st, sz in zip(content.stride(), content.size())): + raise RuntimeError( + 'The {}th input has a dimension with stride 0. gradcheck only ' + 'supports inputs that are non-overlapping to be able to ' + 'compute the numerical gradients correctly. You should call ' + '.contiguous on the input before passing it to gradcheck.') + any_input_requiring_grad = True + inp.retain_grad() + if not any_input_requiring_grad: + raise ValueError( + 'gradcheck expects at least one input tensor to require gradient, ' + 'but none of the them have requires_grad=True.') + + func_out = func(*tupled_inputs) + output = _differentiable_outputs(func_out) + + if not output: + for i, o in enumerate(func_out): + def fn(input): + return _as_tuple(func(*input))[i] + numerical = get_numerical_jacobian(fn, tupled_inputs, eps=eps) + for n in numerical: + if torch.ne(n, 0).sum() > 0: + return fail_test('Numerical gradient for function expected to be zero') + return True + + for i, o in enumerate(output): + if not o.requires_grad: + continue + + def fn(input): + return _as_tuple(func(*input))[i] + + analytical, reentrant, correct_grad_sizes, correct_grad_types = get_analytical_jacobian(tupled_inputs, + o, + nondet_tol=nondet_tol) + numerical = get_numerical_jacobian(fn, tupled_inputs, eps=eps) + + return analytical, numerical + + out_is_complex = o.is_complex() + + if out_is_complex: + # analytical vjp with grad_out = 1.0j + analytical_with_imag_grad_out, reentrant_with_imag_grad_out, \ + correct_grad_sizes_with_imag_grad_out, correct_grad_types_with_imag_grad_out \ + = get_analytical_jacobian(tupled_inputs, o, nondet_tol=nondet_tol, grad_out=1j) + numerical_with_imag_grad_out = get_numerical_jacobian(fn, tupled_inputs, eps=eps, grad_out=1j) + + if not correct_grad_types and check_grad_dtypes: + return fail_test('Gradient has dtype mismatch') + + if out_is_complex and not correct_grad_types_with_imag_grad_out and check_grad_dtypes: + return fail_test('Gradient (calculated using complex valued grad output) has dtype mismatch') + + if not correct_grad_sizes: + return fail_test('Analytical gradient has incorrect size') + + if out_is_complex and not correct_grad_sizes_with_imag_grad_out: + return fail_test('Analytical gradient (calculated using complex valued grad output) has incorrect size') + + def checkIfNumericalAnalyticAreClose(a, n, j, error_str=''): + if not torch.allclose(a, n, rtol, atol): + return fail_test(error_str + 'Jacobian mismatch for output %d with respect to input %d,\n' + 'numerical:%s\nanalytical:%s\n' % (i, j, n, a)) + + inp_tensors = iter_tensors(tupled_inputs, True) + + for j, (a, n, inp) in enumerate(zip(analytical, numerical, inp_tensors)): + if a.numel() != 0 or n.numel() != 0: + if o.is_complex(): + # C -> C, R -> C + a_with_imag_grad_out = analytical_with_imag_grad_out[j] + n_with_imag_grad_out = numerical_with_imag_grad_out[j] + checkIfNumericalAnalyticAreClose(a_with_imag_grad_out, n_with_imag_grad_out, j, + "Gradients failed to compare equal for grad output = 1j. ") + if inp.is_complex(): + # C -> R, C -> C + checkIfNumericalAnalyticAreClose(a, n, j, + "Gradients failed to compare equal for grad output = 1. ") + else: + # R -> R, R -> C + checkIfNumericalAnalyticAreClose(a, n, j) + + + def not_reentrant_error(error_str=''): + error_msg = "Backward" + error_str + " is not reentrant, i.e., running backward with same \ + input and grad_output multiple times gives different values, \ + although analytical gradient matches numerical gradient. \ + The tolerance for nondeterminism was {}.".format(nondet_tol) + return fail_test(error_msg) + + if not reentrant: + return not_reentrant_error() + + if out_is_complex and not reentrant_with_imag_grad_out: + return not_reentrant_error(' (calculated using complex valued grad output)') + + # check if the backward multiplies by grad_output + output = _differentiable_outputs(func(*tupled_inputs)) + if any([o.requires_grad for o in output]): + diff_input_list: List[torch.Tensor] = list(iter_tensors(tupled_inputs, True)) + if not diff_input_list: + raise RuntimeError("no Tensors requiring grad found in input") + grads_input = torch.autograd.grad(output, diff_input_list, + [torch.zeros_like(o, memory_format=torch.legacy_contiguous_format) for o in output], + allow_unused=True) + for gi, di in zip(grads_input, diff_input_list): + if gi is None: + continue + if isinstance(gi, torch.Tensor) and gi.layout != torch.strided: + if gi.layout != di.layout: + return fail_test('grad is incorrect layout (' + str(gi.layout) + ' is not ' + str(di.layout) + ')') + if gi.layout == torch.sparse_coo: + if gi.sparse_dim() != di.sparse_dim(): + return fail_test('grad is sparse tensor, but has incorrect sparse_dim') + if gi.dense_dim() != di.dense_dim(): + return fail_test('grad is sparse tensor, but has incorrect dense_dim') + gi = gi.to_dense() + di = di.to_dense() + if not gi.eq(0).all(): + return fail_test('backward not multiplied by grad_output') + if gi.dtype != di.dtype or gi.device != di.device or gi.is_sparse != di.is_sparse: + return fail_test("grad is incorrect type") + if gi.size() != di.size(): + return fail_test('grad is incorrect size') + + if check_undefined_grad: + def warn_bc_breaking(): + warnings.warn(( + 'Backwards compatibility: New undefined gradient support checking ' + 'feature is enabled by default, but it may break existing callers ' + 'of this function. If this is true for you, you can call this ' + 'function with "check_undefined_grad=False" to disable the feature')) + + def check_undefined_grad_support(output_to_check): + grads_output = [torch.zeros_like(o, memory_format=torch.legacy_contiguous_format) for o in output_to_check] + try: + grads_input = torch.autograd.grad(output_to_check, + diff_input_list, + grads_output, + allow_unused=True) + except RuntimeError: + warn_bc_breaking() + return fail_test(( + 'Expected backward function to handle undefined output grads. ' + 'Please look at "Notes about undefined output gradients" in ' + '"tools/autograd/derivatives.yaml"')) + + for gi, i in zip(grads_input, diff_input_list): + if (gi is not None) and (not gi.eq(0).all()): + warn_bc_breaking() + return fail_test(( + 'Expected all input grads to be undefined or zero when all output grads are undefined ' + 'or zero. Please look at "Notes about undefined output gradients" in ' + '"tools/autograd/derivatives.yaml"')) + return True + + # All backward functions must work properly if all output grads are undefined + outputs_to_check = [[ + torch._C._functions.UndefinedGrad()(o) for o in _differentiable_outputs(func(*tupled_inputs)) + # This check filters out Tensor-likes that aren't instances of Tensor. + if isinstance(o, torch.Tensor) + ]] + + # If there are multiple output grads, we should be able to undef one at a time without error + if len(outputs_to_check[0]) > 1: + for undef_grad_idx in range(len(output)): + output_to_check = _differentiable_outputs(func(*tupled_inputs)) + outputs_to_check.append([ + torch._C._functions.UndefinedGrad()(o) if idx == undef_grad_idx else o + for idx, o in enumerate(output_to_check)]) + + for output_to_check in outputs_to_check: + if not check_undefined_grad_support(output_to_check): + return False + + return True + + +def gradgradcheck( + func: Callable[..., _TensorOrTensors], # See Note [VarArg of Tensors] + inputs: _TensorOrTensors, + grad_outputs: Optional[_TensorOrTensors] = None, + eps: float = 1e-6, + atol: float = 1e-5, + rtol: float = 1e-3, + gen_non_contig_grad_outputs: bool = False, + raise_exception: bool = True, + nondet_tol: float = 0.0, + check_undefined_grad: bool = True, + check_grad_dtypes: bool = False +) -> bool: + r"""Check gradients of gradients computed via small finite differences + against analytical gradients w.r.t. tensors in :attr:`inputs` and + :attr:`grad_outputs` that are of floating point or complex type and with + ``requires_grad=True``. + + This function checks that backpropagating through the gradients computed + to the given :attr:`grad_outputs` are correct. + + The check between numerical and analytical gradients uses :func:`~torch.allclose`. + + .. note:: + The default values are designed for :attr:`input` and + :attr:`grad_outputs` of double precision. This check will likely fail if + they are of less precision, e.g., ``FloatTensor``. + + .. warning:: + If any checked tensor in :attr:`input` and :attr:`grad_outputs` has + overlapping memory, i.e., different indices pointing to the same memory + address (e.g., from :func:`torch.expand`), this check will likely fail + because the numerical gradients computed by point perturbation at such + indices will change values at all other indices that share the same + memory address. + + Args: + func (function): a Python function that takes Tensor inputs and returns + a Tensor or a tuple of Tensors + inputs (tuple of Tensor or Tensor): inputs to the function + grad_outputs (tuple of Tensor or Tensor, optional): The gradients with + respect to the function's outputs. + eps (float, optional): perturbation for finite differences + atol (float, optional): absolute tolerance + rtol (float, optional): relative tolerance + gen_non_contig_grad_outputs (bool, optional): if :attr:`grad_outputs` is + ``None`` and :attr:`gen_non_contig_grad_outputs` is ``True``, the + randomly generated gradient outputs are made to be noncontiguous + raise_exception (bool, optional): indicating whether to raise an exception if + the check fails. The exception gives more information about the + exact nature of the failure. This is helpful when debugging gradchecks. + nondet_tol (float, optional): tolerance for non-determinism. When running + identical inputs through the differentiation, the results must either match + exactly (default, 0.0) or be within this tolerance. Note that a small amount + of nondeterminism in the gradient will lead to larger inaccuracies in + the second derivative. + check_undefined_grad (bool, options): if True, check if undefined output grads + are supported and treated as zeros + + Returns: + True if all differences satisfy allclose condition + """ + tupled_inputs = _as_tuple(inputs) + + if grad_outputs is None: + # If grad_outputs is not specified, create random Tensors of the same + # shape, type, and device as the outputs + def randn_like(x): + y = torch.testing.randn_like( + x if (x.is_floating_point() or x.is_complex()) else x.double(), memory_format=torch.legacy_contiguous_format) + if gen_non_contig_grad_outputs: + y = torch.testing.make_non_contiguous(y) + return y.requires_grad_() + outputs = _as_tuple(func(*tupled_inputs)) + tupled_grad_outputs = tuple(randn_like(x) for x in outputs) + else: + tupled_grad_outputs = _as_tuple(grad_outputs) + + num_outputs = len(tupled_grad_outputs) + + def new_func(*args): + input_args = args[:-num_outputs] + grad_outputs = args[-num_outputs:] + outputs = _differentiable_outputs(func(*input_args)) + input_args = tuple(x for x in input_args if isinstance(x, torch.Tensor) and x.requires_grad) + grad_inputs = torch.autograd.grad(outputs, input_args, grad_outputs, create_graph=True) + return grad_inputs + + return gradcheck(new_func, tupled_inputs + tupled_grad_outputs, eps, atol, rtol, raise_exception, + nondet_tol=nondet_tol, check_undefined_grad=check_undefined_grad, + check_grad_dtypes=check_grad_dtypes) diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/group_ops.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/group_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..bad19a7d9cb7b2218e0ab68ef538742d477dec73 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/group_ops.py @@ -0,0 +1,102 @@ +import lietorch_backends +import torch +import torch.nn.functional as F + + + +class GroupOp(torch.autograd.Function): + """ group operation base class """ + + @classmethod + def forward(cls, ctx, group_id, *inputs): + ctx.group_id = group_id + ctx.save_for_backward(*inputs) + out = cls.forward_op(ctx.group_id, *inputs) + return out + + @classmethod + def backward(cls, ctx, grad): + error_str = "Backward operation not implemented for {}".format(cls) + assert cls.backward_op is not None, error_str + + inputs = ctx.saved_tensors + grad = grad.contiguous() + grad_inputs = cls.backward_op(ctx.group_id, grad, *inputs) + return (None, ) + tuple(grad_inputs) + + +class Exp(GroupOp): + """ exponential map """ + forward_op, backward_op = lietorch_backends.expm, lietorch_backends.expm_backward + +class Log(GroupOp): + """ logarithm map """ + forward_op, backward_op = lietorch_backends.logm, lietorch_backends.logm_backward + +class Inv(GroupOp): + """ group inverse """ + forward_op, backward_op = lietorch_backends.inv, lietorch_backends.inv_backward + +class Mul(GroupOp): + """ group multiplication """ + forward_op, backward_op = lietorch_backends.mul, lietorch_backends.mul_backward + +class Adj(GroupOp): + """ adjoint operator """ + forward_op, backward_op = lietorch_backends.adj, lietorch_backends.adj_backward + +class AdjT(GroupOp): + """ adjoint operator """ + forward_op, backward_op = lietorch_backends.adjT, lietorch_backends.adjT_backward + +class Act3(GroupOp): + """ action on point """ + forward_op, backward_op = lietorch_backends.act, lietorch_backends.act_backward + +class Act4(GroupOp): + """ action on point """ + forward_op, backward_op = lietorch_backends.act4, lietorch_backends.act4_backward + +class Jinv(GroupOp): + """ adjoint operator """ + forward_op, backward_op = lietorch_backends.Jinv, None + +class ToMatrix(GroupOp): + """ convert to matrix representation """ + forward_op, backward_op = lietorch_backends.as_matrix, None + + + + +### conversion operations to/from Euclidean embeddings ### + +class FromVec(torch.autograd.Function): + """ convert vector into group object """ + + @classmethod + def forward(cls, ctx, group_id, *inputs): + ctx.group_id = group_id + ctx.save_for_backward(*inputs) + return inputs[0] + + @classmethod + def backward(cls, ctx, grad): + inputs = ctx.saved_tensors + J = lietorch_backends.projector(ctx.group_id, *inputs) + return None, torch.matmul(grad.unsqueeze(-2), torch.linalg.pinv(J)).squeeze(-2) + +class ToVec(torch.autograd.Function): + """ convert group object to vector """ + + @classmethod + def forward(cls, ctx, group_id, *inputs): + ctx.group_id = group_id + ctx.save_for_backward(*inputs) + return inputs[0] + + @classmethod + def backward(cls, ctx, grad): + inputs = ctx.saved_tensors + J = lietorch_backends.projector(ctx.group_id, *inputs) + return None, torch.matmul(grad.unsqueeze(-2), J).squeeze(-2) + diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/groups.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/groups.py new file mode 100644 index 0000000000000000000000000000000000000000..61b9290f68bb30c378174520a48f0b8be80fc3c2 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/groups.py @@ -0,0 +1,322 @@ +import torch +import numpy as np + +# group operations implemented in cuda +from .group_ops import Exp, Log, Inv, Mul, Adj, AdjT, Jinv, Act3, Act4, ToMatrix, ToVec, FromVec +from .broadcasting import broadcast_inputs + + +class LieGroupParameter(torch.Tensor): + """ Wrapper class for LieGroup """ + + from torch._C import _disabled_torch_function_impl + __torch_function__ = _disabled_torch_function_impl + + def __new__(cls, group, requires_grad=True): + data = torch.zeros(group.tangent_shape, + device=group.data.device, + dtype=group.data.dtype, + requires_grad=True) + + return torch.Tensor._make_subclass(cls, data, requires_grad) + + def __init__(self, group): + self.group = group + + def retr(self): + return self.group.retr(self) + + def log(self): + return self.retr().log() + + def inv(self): + return self.retr().inv() + + def adj(self, a): + return self.retr().adj(a) + + def __mul__(self, other): + if isinstance(other, LieGroupParameter): + return self.retr() * other.retr() + else: + return self.retr() * other + + def add_(self, update, alpha): + self.group = self.group.exp(alpha*update) * self.group + + def __getitem__(self, index): + return self.retr().__getitem__(index) + + +class LieGroup: + """ Base class for Lie Group """ + + def __init__(self, data): + self.data = data + + def __repr__(self): + return "{}: size={}, device={}, dtype={}".format( + self.group_name, self.shape, self.device, self.dtype) + + @property + def shape(self): + return self.data.shape[:-1] + + @property + def device(self): + return self.data.device + + @property + def dtype(self): + return self.data.dtype + + def vec(self): + return self.apply_op(ToVec, self.data) + + @property + def tangent_shape(self): + return self.data.shape[:-1] + (self.manifold_dim,) + + @classmethod + def Identity(cls, *batch_shape, **kwargs): + """ Construct identity element with batch shape """ + + if isinstance(batch_shape[0], tuple): + batch_shape = batch_shape[0] + + elif isinstance(batch_shape[0], list): + batch_shape = tuple(batch_shape[0]) + + numel = np.prod(batch_shape) + data = cls.id_elem.reshape(1,-1) + + if 'device' in kwargs: + data = data.to(kwargs['device']) + + if 'dtype' in kwargs: + data = data.type(kwargs['dtype']) + + data = data.repeat(numel, 1) + return cls(data).view(batch_shape) + + @classmethod + def IdentityLike(cls, G): + return cls.Identity(G.shape, device=G.data.device, dtype=G.data.dtype) + + @classmethod + def InitFromVec(cls, data): + return cls(cls.apply_op(FromVec, data)) + + @classmethod + def Random(cls, *batch_shape, sigma=1.0, **kwargs): + """ Construct random element with batch_shape by random sampling in tangent space""" + + if isinstance(batch_shape[0], tuple): + batch_shape = batch_shape[0] + + elif isinstance(batch_shape[0], list): + batch_shape = tuple(batch_shape[0]) + + tangent_shape = batch_shape + (cls.manifold_dim,) + xi = torch.randn(tangent_shape, **kwargs) + return cls.exp(sigma * xi) + + @classmethod + def apply_op(cls, op, x, y=None): + """ Apply group operator """ + inputs, out_shape = broadcast_inputs(x, y) + + data = op.apply(cls.group_id, *inputs) + return data.view(out_shape + (-1,)) + + @classmethod + def exp(cls, x): + """ exponential map: x -> X """ + return cls(cls.apply_op(Exp, x)) + + def quaternion(self): + """ extract quaternion """ + return self.apply_op(Quat, self.data) + + def log(self): + """ logarithm map """ + return self.apply_op(Log, self.data) + + def inv(self): + """ group inverse """ + return self.__class__(self.apply_op(Inv, self.data)) + + def mul(self, other): + """ group multiplication """ + return self.__class__(self.apply_op(Mul, self.data, other.data)) + + def retr(self, a): + """ retraction: Exp(a) * X """ + dX = self.__class__.apply_op(Exp, a) + return self.__class__(self.apply_op(Mul, dX, self.data)) + + def adj(self, a): + """ adjoint operator: b = A(X) * a """ + return self.apply_op(Adj, self.data, a) + + def adjT(self, a): + """ transposed adjoint operator: b = a * A(X) """ + return self.apply_op(AdjT, self.data, a) + + def Jinv(self, a): + return self.apply_op(Jinv, self.data, a) + + def act(self, p): + """ action on a point cloud """ + + # action on point + if p.shape[-1] == 3: + return self.apply_op(Act3, self.data, p) + + # action on homogeneous point + elif p.shape[-1] == 4: + return self.apply_op(Act4, self.data, p) + + def matrix(self): + """ convert element to 4x4 matrix """ + I = torch.eye(4, dtype=self.dtype, device=self.device) + I = I.view([1] * (len(self.data.shape) - 1) + [4, 4]) + return self.__class__(self.data[...,None,:]).act(I).transpose(-1,-2) + + def translation(self): + """ extract translation component """ + p = torch.as_tensor([0.0, 0.0, 0.0, 1.0], dtype=self.dtype, device=self.device) + p = p.view([1] * (len(self.data.shape) - 1) + [4,]) + return self.apply_op(Act4, self.data, p) + + def detach(self): + return self.__class__(self.data.detach()) + + def view(self, dims): + data_reshaped = self.data.view(dims + (self.embedded_dim,)) + return self.__class__(data_reshaped) + + def __mul__(self, other): + # group multiplication + + if isinstance(other, LieGroup): + return self.mul(other) + + # action on point + elif isinstance(other, torch.Tensor): + return self.act(other) + + def __getitem__(self, index): + return self.__class__(self.data[index]) + + def __setitem__(self, index, item): + self.data[index] = item.data + + def to(self, *args, **kwargs): + return self.__class__(self.data.to(*args, **kwargs)) + + def cpu(self): + return self.__class__(self.data.cpu()) + + def cuda(self): + return self.__class__(self.data.cuda()) + + def float(self, device): + return self.__class__(self.data.float()) + + def double(self, device): + return self.__class__(self.data.double()) + + def unbind(self, dim=0): + return [self.__class__(x) for x in self.data.unbind(dim=dim)] + + +class SO3(LieGroup): + group_name = 'SO3' + group_id = 1 + manifold_dim = 3 + embedded_dim = 4 + + # unit quaternion + id_elem = torch.as_tensor([0.0, 0.0, 0.0, 1.0]) + + def __init__(self, data): + if isinstance(data, SE3): + data = data.data[..., 3:7] + + super(SO3, self).__init__(data) + + +class RxSO3(LieGroup): + group_name = 'RxSO3' + group_id = 2 + manifold_dim = 4 + embedded_dim = 5 + + # unit quaternion + id_elem = torch.as_tensor([0.0, 0.0, 0.0, 1.0, 1.0]) + + def __init__(self, data): + if isinstance(data, Sim3): + data = data.data[..., 3:8] + + super(RxSO3, self).__init__(data) + + +class SE3(LieGroup): + group_name = 'SE3' + group_id = 3 + manifold_dim = 6 + embedded_dim = 7 + + # translation, unit quaternion + id_elem = torch.as_tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]) + + def __init__(self, data): + if isinstance(data, SO3): + translation = torch.zeros_like(data.data[...,:3]) + data = torch.cat([translation, data.data], -1) + + super(SE3, self).__init__(data) + + def scale(self, s): + t, q = self.data.split([3,4], -1) + t = t * s.unsqueeze(-1) + return SE3(torch.cat([t, q], dim=-1)) + + +class Sim3(LieGroup): + group_name = 'Sim3' + group_id = 4 + manifold_dim = 7 + embedded_dim = 8 + + # translation, unit quaternion, scale + id_elem = torch.as_tensor([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 1.0]) + + def __init__(self, data): + + if isinstance(data, SO3): + scale = torch.ones_like(SO3.data[...,:1]) + translation = torch.zeros_like(SO3.data[...,:3]) + data = torch.cat([translation, SO3.data, scale], -1) + + elif isinstance(data, SE3): + scale = torch.ones_like(data.data[...,:1]) + data = torch.cat([data.data, scale], -1) + + elif isinstance(data, Sim3): + data = data.data + + super(Sim3, self).__init__(data) + + +def cat(group_list, dim): + """ Concatenate groups along dimension """ + data = torch.cat([X.data for X in group_list], dim=dim) + return group_list[0].__class__(data) + +def stack(group_list, dim): + """ Concatenate groups along dimension """ + data = torch.stack([X.data for X in group_list], dim=dim) + return group_list[0].__class__(data) diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/run_tests.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/run_tests.py new file mode 100644 index 0000000000000000000000000000000000000000..2e1ba008a6c2c93ecd48f270a98355bc5353c4af --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/lietorch/run_tests.py @@ -0,0 +1,302 @@ +import torch +import lietorch + +from lietorch import SO3, RxSO3, SE3, Sim3 +from gradcheck import gradcheck, get_analytical_jacobian + + +### forward tests ### + +def make_homogeneous(p): + return torch.cat([p, torch.ones_like(p[...,:1])], dim=-1) + +def matv(A, b): + return torch.matmul(A, b[...,None])[..., 0] + +def test_exp_log(Group, device='cuda'): + """ check Log(Exp(x)) == x """ + a = .2*torch.randn(2,3,4,5,6,7,Group.manifold_dim, device=device).double() + b = Group.exp(a).log() + assert torch.allclose(a,b,atol=1e-8), "should be identity" + print("\t-", Group, "Passed exp-log test") + +def test_inv(Group, device='cuda'): + """ check X * X^{-1} == 0 """ + X = Group.exp(.1*torch.randn(2,3,4,5,Group.manifold_dim, device=device).double()) + a = (X * X.inv()).log() + assert torch.allclose(a, torch.zeros_like(a), atol=1e-8), "should be 0" + print("\t-", Group, "Passed inv test") + +def test_adj(Group, device='cuda'): + """ check X * Exp(a) == Exp(Adj(X,a)) * X 0 """ + X = Group.exp(torch.randn(2,3,4,5, Group.manifold_dim, device=device).double()) + a = torch.randn(2,3,4,5, Group.manifold_dim, device=device).double() + + b = X.adj(a) + Y1 = X * Group.exp(a) + Y2 = Group.exp(b) * X + + c = (Y1 * Y2.inv()).log() + assert torch.allclose(c, torch.zeros_like(c), atol=1e-8), "should be 0" + print("\t-", Group, "Passed adj test") + + +def test_act(Group, device='cuda'): + X = Group.exp(torch.randn(1, Group.manifold_dim, device=device).double()) + p = torch.randn(1,3,device=device).double() + + p1 = X.act(p) + p2 = matv(X.matrix(), make_homogeneous(p)) + + assert torch.allclose(p1, p2[...,:3], atol=1e-8), "should be 0" + print("\t-", Group, "Passed act test") + + +### backward tests ### +def test_exp_log_grad(Group, device='cuda', tol=1e-8): + + D = Group.manifold_dim + + def fn(a): + return Group.exp(a).log() + + a = torch.zeros(1, Group.manifold_dim, requires_grad=True, device=device).double() + analytical, reentrant, correct_grad_sizes, correct_grad_types = \ + get_analytical_jacobian((a,), fn(a)) + + assert torch.allclose(analytical[0], torch.eye(D, device=device).double(), atol=tol) + + a = .2 * torch.randn(1, Group.manifold_dim, requires_grad=True, device=device).double() + analytical, reentrant, correct_grad_sizes, correct_grad_types = \ + get_analytical_jacobian((a,), fn(a)) + + assert torch.allclose(analytical[0], torch.eye(D, device=device).double(), atol=tol) + + print("\t-", Group, "Passed eye-grad test") + + +def test_inv_log_grad(Group, device='cuda', tol=1e-8): + + D = Group.manifold_dim + X = Group.exp(.2*torch.randn(1,D,device=device).double()) + + def fn(a): + return (Group.exp(a) * X).inv().log() + + a = torch.zeros(1, D, requires_grad=True, device=device).double() + analytical, numerical = gradcheck(fn, [a], eps=1e-4) + + # assert torch.allclose(analytical[0], numerical[0], atol=tol) + if not torch.allclose(analytical[0], numerical[0], atol=tol): + print(analytical[0]) + print(numerical[0]) + + print("\t-", Group, "Passed inv-grad test") + + +def test_adj_grad(Group, device='cuda'): + D = Group.manifold_dim + X = Group.exp(.5*torch.randn(1,Group.manifold_dim, device=device).double()) + + def fn(a, b): + return (Group.exp(a) * X).adj(b) + + a = torch.zeros(1, D, requires_grad=True, device=device).double() + b = torch.randn(1, D, requires_grad=True, device=device).double() + + analytical, numerical = gradcheck(fn, [a, b], eps=1e-4) + assert torch.allclose(analytical[0], numerical[0], atol=1e-8) + assert torch.allclose(analytical[1], numerical[1], atol=1e-8) + + print("\t-", Group, "Passed adj-grad test") + + +def test_adjT_grad(Group, device='cuda'): + D = Group.manifold_dim + X = Group.exp(.5*torch.randn(1,Group.manifold_dim, device=device).double()) + + def fn(a, b): + return (Group.exp(a) * X).adjT(b) + + a = torch.zeros(1, D, requires_grad=True, device=device).double() + b = torch.randn(1, D, requires_grad=True, device=device).double() + + analytical, numerical = gradcheck(fn, [a, b], eps=1e-4) + + assert torch.allclose(analytical[0], numerical[0], atol=1e-8) + assert torch.allclose(analytical[1], numerical[1], atol=1e-8) + + print("\t-", Group, "Passed adjT-grad test") + + +def test_act_grad(Group, device='cuda'): + D = Group.manifold_dim + X = Group.exp(5*torch.randn(1,D, device=device).double()) + + def fn(a, b): + return (X*Group.exp(a)).act(b) + + a = torch.zeros(1, D, requires_grad=True, device=device).double() + b = torch.randn(1, 3, requires_grad=True, device=device).double() + + analytical, numerical = gradcheck(fn, [a, b], eps=1e-4) + + assert torch.allclose(analytical[0], numerical[0], atol=1e-8) + assert torch.allclose(analytical[1], numerical[1], atol=1e-8) + + print("\t-", Group, "Passed act-grad test") + + +def test_matrix_grad(Group, device='cuda'): + D = Group.manifold_dim + X = Group.exp(torch.randn(1, D, device=device).double()) + + def fn(a): + return (Group.exp(a) * X).matrix() + + a = torch.zeros(1, D, requires_grad=True, device=device).double() + analytical, numerical = gradcheck(fn, [a], eps=1e-4) + assert torch.allclose(analytical[0], numerical[0], atol=1e-6) + + print("\t-", Group, "Passed matrix-grad test") + + +def extract_translation_grad(Group, device='cuda'): + """ prototype function """ + + D = Group.manifold_dim + X = Group.exp(5*torch.randn(1,D, device=device).double()) + + def fn(a): + return (Group.exp(a)*X).translation() + + a = torch.zeros(1, D, requires_grad=True, device=device).double() + + analytical, numerical = gradcheck(fn, [a], eps=1e-4) + + assert torch.allclose(analytical[0], numerical[0], atol=1e-8) + print("\t-", Group, "Passed translation grad test") + + +def test_vec_grad(Group, device='cuda', tol=1e-6): + + D = Group.manifold_dim + X = Group.exp(5*torch.randn(1,D, device=device).double()) + + def fn(a): + return (Group.exp(a)*X).vec() + + a = torch.zeros(1, D, requires_grad=True, device=device).double() + + analytical, numerical = gradcheck(fn, [a], eps=1e-4) + + assert torch.allclose(analytical[0], numerical[0], atol=tol) + print("\t-", Group, "Passed tovec grad test") + + +def test_fromvec_grad(Group, device='cuda', tol=1e-6): + + def fn(a): + if Group == SO3: + a = a / a.norm(dim=-1, keepdim=True) + + elif Group == RxSO3: + q, s = a.split([4, 1], dim=-1) + q = q / q.norm(dim=-1, keepdim=True) + a = torch.cat([q, s.exp()], dim=-1) + + elif Group == SE3: + t, q = a.split([3, 4], dim=-1) + q = q / q.norm(dim=-1, keepdim=True) + a = torch.cat([t, q], dim=-1) + + elif Group == Sim3: + t, q, s = a.split([3, 4, 1], dim=-1) + q = q / q.norm(dim=-1, keepdim=True) + a = torch.cat([t, q, s.exp()], dim=-1) + + return Group.InitFromVec(a).vec() + + D = Group.embedded_dim + a = torch.randn(1, 2, D, requires_grad=True, device=device).double() + + analytical, numerical = gradcheck(fn, [a], eps=1e-4) + + assert torch.allclose(analytical[0], numerical[0], atol=tol) + print("\t-", Group, "Passed fromvec grad test") + + + +def scale(device='cuda'): + + def fn(a, s): + X = SE3.exp(a) + X.scale(s) + return X.log() + + s = torch.rand(1, requires_grad=True, device=device).double() + a = torch.randn(1, 6, requires_grad=True, device=device).double() + + analytical, numerical = gradcheck(fn, [a, s], eps=1e-3) + print(analytical[1]) + print(numerical[1]) + + + assert torch.allclose(analytical[0], numerical[0], atol=1e-8) + assert torch.allclose(analytical[1], numerical[1], atol=1e-8) + + print("\t-", "Passed se3-to-sim3 test") + + +if __name__ == '__main__': + + + print("Testing lietorch forward pass (CPU) ...") + for Group in [SO3, RxSO3, SE3, Sim3]: + test_exp_log(Group, device='cpu') + test_inv(Group, device='cpu') + test_adj(Group, device='cpu') + test_act(Group, device='cpu') + + print("Testing lietorch backward pass (CPU)...") + for Group in [SO3, RxSO3, SE3, Sim3]: + if Group == Sim3: + tol = 1e-3 + else: + tol = 1e-8 + + test_exp_log_grad(Group, device='cpu', tol=tol) + test_inv_log_grad(Group, device='cpu', tol=tol) + test_adj_grad(Group, device='cpu') + test_adjT_grad(Group, device='cpu') + test_act_grad(Group, device='cpu') + test_matrix_grad(Group, device='cpu') + extract_translation_grad(Group, device='cpu') + test_vec_grad(Group, device='cpu') + test_fromvec_grad(Group, device='cpu') + + print("Testing lietorch forward pass (GPU) ...") + for Group in [SO3, RxSO3, SE3, Sim3]: + test_exp_log(Group, device='cuda') + test_inv(Group, device='cuda') + test_adj(Group, device='cuda') + test_act(Group, device='cuda') + + print("Testing lietorch backward pass (GPU)...") + for Group in [SO3, RxSO3, SE3, Sim3]: + if Group == Sim3: + tol = 1e-3 + else: + tol = 1e-8 + + test_exp_log_grad(Group, device='cuda', tol=tol) + test_inv_log_grad(Group, device='cuda', tol=tol) + test_adj_grad(Group, device='cuda') + test_adjT_grad(Group, device='cuda') + test_act_grad(Group, device='cuda') + test_matrix_grad(Group, device='cuda') + extract_translation_grad(Group, device='cuda') + test_vec_grad(Group, device='cuda') + test_fromvec_grad(Group, device='cuda') + + diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/logger.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/logger.py new file mode 100644 index 0000000000000000000000000000000000000000..362ea92e0d82267daf4797939bf5b141df0e0a8b --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/logger.py @@ -0,0 +1,58 @@ + +import torch +from torch.utils.tensorboard import SummaryWriter + + +SUM_FREQ = 100 + +class Logger: + def __init__(self, name, scheduler): + self.total_steps = 0 + self.running_loss = {} + self.writer = None + self.name = name + self.scheduler = scheduler + + def _print_training_status(self): + if self.writer is None: + self.writer = SummaryWriter("runs/{}".format(self.name)) + print([k for k in self.running_loss]) + + lr = self.scheduler.get_lr().pop() + metrics_data = [self.running_loss[k]/SUM_FREQ for k in self.running_loss.keys()] + training_str = "[{:6d}, {:10.7f}] ".format(self.total_steps+1, lr) + metrics_str = ("{:10.4f}, "*len(metrics_data)).format(*metrics_data) + + # print the training status + print(training_str + metrics_str) + + for key in self.running_loss: + val = self.running_loss[key] / SUM_FREQ + self.writer.add_scalar(key, val, self.total_steps) + self.running_loss[key] = 0.0 + + def push(self, metrics): + + for key in metrics: + if key not in self.running_loss: + self.running_loss[key] = 0.0 + + self.running_loss[key] += metrics[key] + + if self.total_steps % SUM_FREQ == SUM_FREQ-1: + self._print_training_status() + self.running_loss = {} + + self.total_steps += 1 + + def write_dict(self, results): + if self.writer is None: + self.writer = SummaryWriter("runs/{}".format(self.name)) + print([k for k in self.running_loss]) + + for key in results: + self.writer.add_scalar(key, results[key], self.total_steps) + + def close(self): + self.writer.close() + diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/net.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/net.py new file mode 100644 index 0000000000000000000000000000000000000000..def53706e5019e1f5cf49e33e94e53170a27f6d7 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/net.py @@ -0,0 +1,270 @@ +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from collections import OrderedDict + +import torch_scatter +from torch_scatter import scatter_sum + +from . import fastba +from . import altcorr +from . import lietorch +from .lietorch import SE3 + +from .extractor import BasicEncoder, BasicEncoder4 +from .blocks import GradientClip, GatedResidual, SoftAgg + +from .utils import * +from .ba import BA +from . import projective_ops as pops + +autocast = torch.cuda.amp.autocast +import matplotlib.pyplot as plt + +DIM = 384 + +class Update(nn.Module): + def __init__(self, p): + super(Update, self).__init__() + + self.c1 = nn.Sequential( + nn.Linear(DIM, DIM), + nn.ReLU(inplace=True), + nn.Linear(DIM, DIM)) + + self.c2 = nn.Sequential( + nn.Linear(DIM, DIM), + nn.ReLU(inplace=True), + nn.Linear(DIM, DIM)) + + self.norm = nn.LayerNorm(DIM, eps=1e-3) + + self.agg_kk = SoftAgg(DIM) + self.agg_ij = SoftAgg(DIM) + + self.gru = nn.Sequential( + nn.LayerNorm(DIM, eps=1e-3), + GatedResidual(DIM), + nn.LayerNorm(DIM, eps=1e-3), + GatedResidual(DIM), + ) + + self.corr = nn.Sequential( + nn.Linear(2*49*p*p, DIM), + nn.ReLU(inplace=True), + nn.Linear(DIM, DIM), + nn.LayerNorm(DIM, eps=1e-3), + nn.ReLU(inplace=True), + nn.Linear(DIM, DIM), + ) + + self.d = nn.Sequential( + nn.ReLU(inplace=False), + nn.Linear(DIM, 2), + GradientClip()) + + self.w = nn.Sequential( + nn.ReLU(inplace=False), + nn.Linear(DIM, 2), + GradientClip(), + nn.Sigmoid()) + + + def forward(self, net, inp, corr, flow, ii, jj, kk): + """ update operator """ + + net = net + inp + self.corr(corr) + net = self.norm(net) + + ix, jx = fastba.neighbors(kk, jj) + mask_ix = (ix >= 0).float().reshape(1, -1, 1) + mask_jx = (jx >= 0).float().reshape(1, -1, 1) + + net = net + self.c1(mask_ix * net[:,ix]) + net = net + self.c2(mask_jx * net[:,jx]) + + net = net + self.agg_kk(net, kk) + net = net + self.agg_ij(net, ii*12345 + jj) + + net = self.gru(net) + + return net, (self.d(net), self.w(net), None) + + +class Patchifier(nn.Module): + def __init__(self, patch_size=3): + super(Patchifier, self).__init__() + self.patch_size = patch_size + self.fnet = BasicEncoder4(output_dim=128, norm_fn='instance') + self.inet = BasicEncoder4(output_dim=DIM, norm_fn='none') + + def __image_gradient(self, images): + gray = ((images + 0.5) * (255.0 / 2)).sum(dim=2) + dx = gray[...,:-1,1:] - gray[...,:-1,:-1] + dy = gray[...,1:,:-1] - gray[...,:-1,:-1] + g = torch.sqrt(dx**2 + dy**2) + g = F.avg_pool2d(g, 4, 4) + return g + + def forward(self, images, patches_per_image=80, disps=None, gradient_bias=False, return_color=False): + """ extract patches from input images """ + fmap = self.fnet(images) / 4.0 + imap = self.inet(images) / 4.0 + + b, n, c, h, w = fmap.shape + P = self.patch_size + + # bias patch selection towards regions with high gradient + if gradient_bias: + g = self.__image_gradient(images) + x = torch.randint(1, w-1, size=[n, 3*patches_per_image], device="cuda") + y = torch.randint(1, h-1, size=[n, 3*patches_per_image], device="cuda") + + coords = torch.stack([x, y], dim=-1).float() + g = altcorr.patchify(g[0,:,None], coords, 0).view(n, 3 * patches_per_image) + + ix = torch.argsort(g, dim=1) + x = torch.gather(x, 1, ix[:, -patches_per_image:]) + y = torch.gather(y, 1, ix[:, -patches_per_image:]) + + else: + x = torch.randint(1, w-1, size=[n, patches_per_image], device="cuda") + y = torch.randint(1, h-1, size=[n, patches_per_image], device="cuda") + + coords = torch.stack([x, y], dim=-1).float() + imap = altcorr.patchify(imap[0], coords, 0).view(b, -1, DIM, 1, 1) + gmap = altcorr.patchify(fmap[0], coords, P//2).view(b, -1, 128, P, P) + + if return_color: + clr = altcorr.patchify(images[0], 4*(coords + 0.5), 0).view(b, -1, 3) + + if disps is None: + disps = torch.ones(b, n, h, w, device="cuda") + + grid, _ = coords_grid_with_index(disps, device=fmap.device) + patches = altcorr.patchify(grid[0], coords, P//2).view(b, -1, 3, P, P) + + index = torch.arange(n, device="cuda").view(n, 1) + index = index.repeat(1, patches_per_image).reshape(-1) + + if return_color: + return fmap, gmap, imap, patches, index, clr + + return fmap, gmap, imap, patches, index + + +class CorrBlock: + def __init__(self, fmap, gmap, radius=3, dropout=0.2, levels=[1,4]): + self.dropout = dropout + self.radius = radius + self.levels = levels + + self.gmap = gmap + self.pyramid = pyramidify(fmap, lvls=levels) + + def __call__(self, ii, jj, coords): + corrs = [] + for i in range(len(self.levels)): + corrs += [ altcorr.corr(self.gmap, self.pyramid[i], coords / self.levels[i], ii, jj, self.radius, self.dropout) ] + return torch.stack(corrs, -1).view(1, len(ii), -1) + + +class VONet(nn.Module): + def __init__(self, use_viewer=False): + super(VONet, self).__init__() + self.P = 3 + self.patchify = Patchifier(self.P) + self.update = Update(self.P) + + self.DIM = DIM + self.RES = 4 + + + @autocast(enabled=False) + def forward(self, images, poses, disps, intrinsics, M=1024, STEPS=12, P=1, structure_only=False, rescale=False): + """ Estimates SE3 or Sim3 between pair of frames """ + + images = 2 * (images / 255.0) - 0.5 + intrinsics = intrinsics / 4.0 + disps = disps[:, :, 1::4, 1::4].float() + + fmap, gmap, imap, patches, ix = self.patchify(images, disps=disps) + + corr_fn = CorrBlock(fmap, gmap) + + b, N, c, h, w = fmap.shape + p = self.P + + patches_gt = patches.clone() + Ps = poses + + d = patches[..., 2, p//2, p//2] + patches = set_depth(patches, torch.rand_like(d)) + + kk, jj = flatmeshgrid(torch.where(ix < 8)[0], torch.arange(0,8, device="cuda")) + ii = ix[kk] + + imap = imap.view(b, -1, DIM) + net = torch.zeros(b, len(kk), DIM, device="cuda", dtype=torch.float) + + Gs = SE3.IdentityLike(poses) + + if structure_only: + Gs.data[:] = poses.data[:] + + traj = [] + bounds = [-64, -64, w + 64, h + 64] + + while len(traj) < STEPS: + Gs = Gs.detach() + patches = patches.detach() + + n = ii.max() + 1 + if len(traj) >= 8 and n < images.shape[1]: + if not structure_only: Gs.data[:,n] = Gs.data[:,n-1] + kk1, jj1 = flatmeshgrid(torch.where(ix < n)[0], torch.arange(n, n+1, device="cuda")) + kk2, jj2 = flatmeshgrid(torch.where(ix == n)[0], torch.arange(0, n+1, device="cuda")) + + ii = torch.cat([ix[kk1], ix[kk2], ii]) + jj = torch.cat([jj1, jj2, jj]) + kk = torch.cat([kk1, kk2, kk]) + + net1 = torch.zeros(b, len(kk1) + len(kk2), DIM, device="cuda") + net = torch.cat([net1, net], dim=1) + + if np.random.rand() < 0.1: + k = (ii != (n - 4)) & (jj != (n - 4)) + ii = ii[k] + jj = jj[k] + kk = kk[k] + net = net[:,k] + + patches[:,ix==n,2] = torch.median(patches[:,(ix == n-1) | (ix == n-2),2]) + n = ii.max() + 1 + + coords = pops.transform(Gs, patches, intrinsics, ii, jj, kk) + coords1 = coords.permute(0, 1, 4, 2, 3).contiguous() + + corr = corr_fn(kk, jj, coords1) + net, (delta, weight, _) = self.update(net, imap[:,kk], corr, None, ii, jj, kk) + + lmbda = 1e-4 + target = coords[...,p//2,p//2,:] + delta + + ep = 10 + for itr in range(2): + Gs, patches = BA(Gs, patches, intrinsics, target, weight, lmbda, ii, jj, kk, + bounds, ep=ep, fixedp=1, structure_only=structure_only) + + kl = torch.as_tensor(0) + dij = (ii - jj).abs() + k = (dij > 0) & (dij <= 2) + + coords = pops.transform(Gs, patches, intrinsics, ii[k], jj[k], kk[k]) + coords_gt, valid, _ = pops.transform(Ps, patches_gt, intrinsics, ii[k], jj[k], kk[k], jacobian=True) + + traj.append((valid, coords, coords_gt, Gs[:,:n], Ps[:,:n], kl)) + + return traj + diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/plot_utils.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/plot_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..015385e24dd38c9fc26758665a787d66cbb3ef52 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/plot_utils.py @@ -0,0 +1,52 @@ +from copy import deepcopy + +import matplotlib.pyplot as plt +import numpy as np +from evo.core import sync +from evo.core.trajectory import PoseTrajectory3D +from evo.tools import plot +from pathlib import Path + + +def make_traj(args) -> PoseTrajectory3D: + if isinstance(args, tuple): + traj, tstamps = args + return PoseTrajectory3D(positions_xyz=traj[:,:3], orientations_quat_wxyz=traj[:,3:], timestamps=tstamps) + assert isinstance(args, PoseTrajectory3D), type(args) + return deepcopy(args) + +def best_plotmode(traj): + _, i1, i2 = np.argsort(np.var(traj.positions_xyz, axis=0)) + plot_axes = "xyz"[i2] + "xyz"[i1] + return getattr(plot.PlotMode, plot_axes) + +def plot_trajectory(pred_traj, gt_traj=None, title="", filename="", align=True, correct_scale=True): + pred_traj = make_traj(pred_traj) + + if gt_traj is not None: + gt_traj = make_traj(gt_traj) + gt_traj, pred_traj = sync.associate_trajectories(gt_traj, pred_traj) + + if align: + pred_traj.align(gt_traj, correct_scale=correct_scale) + + plot_collection = plot.PlotCollection("PlotCol") + fig = plt.figure(figsize=(8, 8)) + plot_mode = best_plotmode(gt_traj if (gt_traj is not None) else pred_traj) + ax = plot.prepare_axis(fig, plot_mode) + ax.set_title(title) + if gt_traj is not None: + plot.traj(ax, plot_mode, gt_traj, '--', 'gray', "Ground Truth") + plot.traj(ax, plot_mode, pred_traj, '-', 'blue', "Predicted") + plot_collection.add_figure("traj (error)", fig) + plot_collection.export(filename, confirm_overwrite=False) + plt.close(fig=fig) + print(f"Saved {filename}") + +def save_trajectory_tum_format(traj, filename): + traj = make_traj(traj) + tostr = lambda a: ' '.join(map(str, a)) + with Path(filename).open('w') as f: + for i in range(traj.num_poses): + f.write(f"{traj.timestamps[i]} {tostr(traj.positions_xyz[i])} {tostr(traj.orientations_quat_wxyz[i][[1,2,3,0]])}\n") + print(f"Saved {filename}") diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/projective_ops.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/projective_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..f9d80dc40435c338dee239ad27cc1fa281202197 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/projective_ops.py @@ -0,0 +1,121 @@ +import torch +import torch.nn.functional as F + +from .lietorch import SE3, Sim3 + +MIN_DEPTH = 0.2 + +def extract_intrinsics(intrinsics): + return intrinsics[...,None,None,:].unbind(dim=-1) + +def coords_grid(ht, wd, **kwargs): + y, x = torch.meshgrid( + torch.arange(ht).to(**kwargs).float(), + torch.arange(wd).to(**kwargs).float()) + + return torch.stack([x, y], dim=-1) + + +def iproj(patches, intrinsics): + """ inverse projection """ + x, y, d = patches.unbind(dim=2) + fx, fy, cx, cy = intrinsics[...,None,None].unbind(dim=2) + + i = torch.ones_like(d) + xn = (x - cx) / fx + yn = (y - cy) / fy + + X = torch.stack([xn, yn, i, d], dim=-1) + return X + + +def proj(X, intrinsics, depth=False): + """ projection """ + + X, Y, Z, W = X.unbind(dim=-1) + fx, fy, cx, cy = intrinsics[...,None,None].unbind(dim=2) + + # d = 0.01 * torch.ones_like(Z) + # d[Z > 0.01] = 1.0 / Z[Z > 0.01] + # d = torch.ones_like(Z) + # d[Z.abs() > 0.1] = 1.0 / Z[Z.abs() > 0.1] + + d = 1.0 / Z.clamp(min=0.1) + x = fx * (d * X) + cx + y = fy * (d * Y) + cy + + if depth: + return torch.stack([x, y, d], dim=-1) + + return torch.stack([x, y], dim=-1) + + +def transform(poses, patches, intrinsics, ii, jj, kk, depth=False, valid=False, jacobian=False, tonly=False): + """ projective transform """ + + # backproject + X0 = iproj(patches[:,kk], intrinsics[:,ii]) + + # transform + Gij = poses[:, jj] * poses[:, ii].inv() + + if tonly: + Gij[...,3:] = torch.as_tensor([0,0,0,1], device=Gij.device) + + X1 = Gij[:,:,None,None] * X0 + + # project + x1 = proj(X1, intrinsics[:,jj], depth) + + + if jacobian: + p = X1.shape[2] + X, Y, Z, H = X1[...,p//2,p//2,:].unbind(dim=-1) + o = torch.zeros_like(H) + i = torch.zeros_like(H) + + fx, fy, cx, cy = intrinsics[:,jj].unbind(dim=-1) + + d = torch.zeros_like(Z) + d[Z.abs() > 0.2] = 1.0 / Z[Z.abs() > 0.2] + + Ja = torch.stack([ + H, o, o, o, Z, -Y, + o, H, o, -Z, o, X, + o, o, H, Y, -X, o, + o, o, o, o, o, o, + ], dim=-1).view(1, len(ii), 4, 6) + + Jp = torch.stack([ + fx*d, o, -fx*X*d*d, o, + o, fy*d, -fy*Y*d*d, o, + ], dim=-1).view(1, len(ii), 2, 4) + + Jj = torch.matmul(Jp, Ja) + Ji = -Gij[:,:,None].adjT(Jj) + + Jz = torch.matmul(Jp, Gij.matrix()[...,:,3:]) + + return x1, (Z > 0.2).float(), (Ji, Jj, Jz) + + if valid: + return x1, (X1[...,2] > 0.2).float() + + return x1 + +def point_cloud(poses, patches, intrinsics, ix): + """ generate point cloud from patches """ + return poses[:,ix,None,None].inv() * iproj(patches, intrinsics[:,ix]) + + +def flow_mag(poses, patches, intrinsics, ii, jj, kk, beta=0.3): + """ projective transform """ + + coords0 = transform(poses, patches, intrinsics, ii, ii, kk) + coords1 = transform(poses, patches, intrinsics, ii, jj, kk, tonly=False) + coords2 = transform(poses, patches, intrinsics, ii, jj, kk, tonly=True) + + flow1 = (coords1 - coords0).norm(dim=-1) + flow2 = (coords2 - coords0).norm(dim=-1) + + return beta * flow1 + (1-beta) * flow2 diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/stream.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/stream.py new file mode 100644 index 0000000000000000000000000000000000000000..a7c4e35b518757d69ed6b9f2f8b73d237c2e0a14 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/stream.py @@ -0,0 +1,87 @@ +import os +import cv2 +import numpy as np +from multiprocessing import Process, Queue +from pathlib import Path +from itertools import chain + +def image_stream(queue, imagedir, calib, stride, skip=0): + """ image generator """ + + calib = np.loadtxt(calib, delimiter=" ") + fx, fy, cx, cy = calib[:4] + + K = np.eye(3) + K[0,0] = fx + K[0,2] = cx + K[1,1] = fy + K[1,2] = cy + + img_exts = ["*.png", "*.jpeg", "*.jpg"] + image_list = sorted(chain.from_iterable(Path(imagedir).glob(e) for e in img_exts))[skip::stride] + + for t, imfile in enumerate(image_list): + image = cv2.imread(str(imfile)) + if len(calib) > 4: + image = cv2.undistort(image, K, calib[4:]) + + if 0: + image = cv2.resize(image, None, fx=0.5, fy=0.5) + intrinsics = np.array([fx / 2, fy / 2, cx / 2, cy / 2]) + + else: + intrinsics = np.array([fx, fy, cx, cy]) + + h, w, _ = image.shape + image = image[:h-h%16, :w-w%16] + + queue.put((t, image, intrinsics)) + + queue.put((-1, image, intrinsics)) + + +def video_stream(queue, imagedir, calib, stride, skip=0): + """ video generator """ + + calib = np.loadtxt(calib, delimiter=" ") + fx, fy, cx, cy = calib[:4] + + K = np.eye(3) + K[0,0] = fx + K[0,2] = cx + K[1,1] = fy + K[1,2] = cy + + cap = cv2.VideoCapture(imagedir) + + t = 0 + + for _ in range(skip): + ret, image = cap.read() + + while True: + # Capture frame-by-frame + for _ in range(stride): + ret, image = cap.read() + # if frame is read correctly ret is True + if not ret: + break + + if not ret: + break + + if len(calib) > 4: + image = cv2.undistort(image, K, calib[4:]) + + image = cv2.resize(image, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_AREA) + h, w, _ = image.shape + image = image[:h-h%16, :w-w%16] + + intrinsics = np.array([fx*.5, fy*.5, cx*.5, cy*.5]) + queue.put((t, image, intrinsics)) + + t += 1 + + queue.put((-1, image, intrinsics)) + cap.release() + diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/utils.py b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..597a2d4bd5d358480fd8b2f66a9fb5f1fca11a70 --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/dpvo/utils.py @@ -0,0 +1,87 @@ +import torch +import torch.nn.functional as F + + +all_times = [] + +class Timer: + def __init__(self, name, enabled=True): + self.name = name + self.enabled = enabled + + if self.enabled: + self.start = torch.cuda.Event(enable_timing=True) + self.end = torch.cuda.Event(enable_timing=True) + + def __enter__(self): + if self.enabled: + self.start.record() + + def __exit__(self, type, value, traceback): + global all_times + if self.enabled: + self.end.record() + torch.cuda.synchronize() + + elapsed = self.start.elapsed_time(self.end) + all_times.append(elapsed) + print(self.name, elapsed) + + +def coords_grid(b, n, h, w, **kwargs): + """ coordinate grid """ + x = torch.arange(0, w, dtype=torch.float, **kwargs) + y = torch.arange(0, h, dtype=torch.float, **kwargs) + coords = torch.stack(torch.meshgrid(y, x, indexing="ij")) + return coords[[1,0]].view(1, 1, 2, h, w).repeat(b, n, 1, 1, 1) + +def coords_grid_with_index(d, **kwargs): + """ coordinate grid with frame index""" + b, n, h, w = d.shape + i = torch.ones_like(d) + x = torch.arange(0, w, dtype=torch.float, **kwargs) + y = torch.arange(0, h, dtype=torch.float, **kwargs) + + y, x = torch.stack(torch.meshgrid(y, x, indexing="ij")) + y = y.view(1, 1, h, w).repeat(b, n, 1, 1) + x = x.view(1, 1, h, w).repeat(b, n, 1, 1) + + coords = torch.stack([x, y, d], dim=2) + index = torch.arange(0, n, dtype=torch.float, **kwargs) + index = index.view(1, n, 1, 1, 1).repeat(b, 1, 1, h, w) + + return coords, index + +def patchify(x, patch_size=3): + """ extract patches from video """ + b, n, c, h, w = x.shape + x = x.view(b*n, c, h, w) + y = F.unfold(x, patch_size) + y = y.transpose(1,2) + return y.reshape(b, -1, c, patch_size, patch_size) + + +def pyramidify(fmap, lvls=[1]): + """ turn fmap into a pyramid """ + b, n, c, h, w = fmap.shape + + pyramid = [] + for lvl in lvls: + gmap = F.avg_pool2d(fmap.view(b*n, c, h, w), lvl, stride=lvl) + pyramid += [ gmap.view(b, n, c, h//lvl, w//lvl) ] + + return pyramid + +def all_pairs_exclusive(n, **kwargs): + ii, jj = torch.meshgrid(torch.arange(n, **kwargs), torch.arange(n, **kwargs)) + k = ii != jj + return ii[k].reshape(-1), jj[k].reshape(-1) + +def set_depth(patches, depth): + patches[...,2,:,:] = depth[...,None,None] + return patches + +def flatmeshgrid(*args, **kwargs): + grid = torch.meshgrid(*args, **kwargs) + return (x.reshape(-1) for x in grid) + diff --git a/third-party/DPVO/build/lib.win-amd64-3.9/lietorch_backends.cp39-win_amd64.pyd b/third-party/DPVO/build/lib.win-amd64-3.9/lietorch_backends.cp39-win_amd64.pyd new file mode 100644 index 0000000000000000000000000000000000000000..4a3dc36371e09cf4e416daa8d5baa863f4addf0c --- /dev/null +++ b/third-party/DPVO/build/lib.win-amd64-3.9/lietorch_backends.cp39-win_amd64.pyd @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2c6a2dd2d29f5ee56eeb0912681982366f36a0bc7420a1ea05b05d64d761a2f +size 2983936 diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/.ninja_deps b/third-party/DPVO/build/temp.win-amd64-3.9/Release/.ninja_deps new file mode 100644 index 0000000000000000000000000000000000000000..ea5bcf68878053f4be25460a18715c505fe19712 Binary files /dev/null and b/third-party/DPVO/build/temp.win-amd64-3.9/Release/.ninja_deps differ diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/.ninja_log b/third-party/DPVO/build/temp.win-amd64-3.9/Release/.ninja_log new file mode 100644 index 0000000000000000000000000000000000000000..e44e32f043f889d11cfce756823df1010fcc5fe3 --- /dev/null +++ b/third-party/DPVO/build/temp.win-amd64-3.9/Release/.ninja_log @@ -0,0 +1,6 @@ +# ninja log v5 +0 10746 7510839870546033 C:/Users/thpap/PycharmProjects/motionbert-meta-sapiens/WHAM/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation.obj 8a6ea87c38b5029e +5 34954 7510840112325833 C:/Users/thpap/PycharmProjects/motionbert-meta-sapiens/WHAM/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation_kernel.obj f7d1343ce924f3b5 +7 10971 7510840239521303 C:/Users/thpap/PycharmProjects/motionbert-meta-sapiens/WHAM/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba.obj 1448633d654dbba0 +1 33893 7510840468461448 C:/Users/thpap/PycharmProjects/motionbert-meta-sapiens/WHAM/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba_cuda.obj d1400b5b4a6daa70 +0 10676 7510840592828284 C:/Users/thpap/PycharmProjects/motionbert-meta-sapiens/WHAM/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch.obj ba69a789264375ff diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/build.ninja b/third-party/DPVO/build/temp.win-amd64-3.9/Release/build.ninja new file mode 100644 index 0000000000000000000000000000000000000000..369556be981500cf1745fb9f09e7ced53ca8ec4c --- /dev/null +++ b/third-party/DPVO/build/temp.win-amd64-3.9/Release/build.ninja @@ -0,0 +1,29 @@ +ninja_required_version = 1.3 +cxx = cl +nvcc = C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.3\bin\nvcc + +cflags = /nologo /Ox /W3 /GL /DNDEBUG /MD /MD /wd4819 /wd4251 /wd4244 /wd4267 /wd4275 /wd4018 /wd4190 /EHsc -IC:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\dpvo/lietorch/include -IC:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\thirdparty/eigen-3.4.0 -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include\torch\csrc\api\include -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include\TH -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include\THC "-IC:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.3\include" -IC:\Users\thpap\miniconda3\envs\wham\include -IC:\Users\thpap\miniconda3\envs\wham\include "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\ATLMFC\include" "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\include" "-IC:\Program Files (x86)\Windows Kits\NETFXSDK\4.8\include\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\ucrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\shared" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\winrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\cppwinrt" "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\ATLMFC\include" "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\include" "-IC:\Program Files (x86)\Windows Kits\NETFXSDK\4.8\include\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\ucrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\shared" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\winrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\cppwinrt" +post_cflags = -O3 -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=lietorch_backends -D_GLIBCXX_USE_CXX11_ABI=0 /std:c++14 +cuda_cflags = --use-local-env -Xcompiler /MD -Xcompiler /wd4819 -Xcompiler /wd4251 -Xcompiler /wd4244 -Xcompiler /wd4267 -Xcompiler /wd4275 -Xcompiler /wd4018 -Xcompiler /wd4190 -Xcompiler /EHsc -Xcudafe --diag_suppress=base_class_has_different_dll_interface -Xcudafe --diag_suppress=field_without_dll_interface -Xcudafe --diag_suppress=dll_interface_conflict_none_assumed -Xcudafe --diag_suppress=dll_interface_conflict_dllexport_assumed -IC:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\dpvo/lietorch/include -IC:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\thirdparty/eigen-3.4.0 -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include\torch\csrc\api\include -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include\TH -IC:\Users\thpap\miniconda3\envs\wham\lib\site-packages\torch\include\THC "-IC:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v11.3\include" -IC:\Users\thpap\miniconda3\envs\wham\include -IC:\Users\thpap\miniconda3\envs\wham\include "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\ATLMFC\include" "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\include" "-IC:\Program Files (x86)\Windows Kits\NETFXSDK\4.8\include\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\ucrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\shared" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\winrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\cppwinrt" "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\ATLMFC\include" "-IC:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.29.30133\include" "-IC:\Program Files (x86)\Windows Kits\NETFXSDK\4.8\include\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\ucrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\shared" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\um" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\winrt" "-IC:\Program Files (x86)\Windows Kits\10\include\10.0.22621.0\cppwinrt" +cuda_post_cflags = -D__CUDA_NO_HALF_OPERATORS__ -D__CUDA_NO_HALF_CONVERSIONS__ -D__CUDA_NO_BFLOAT16_CONVERSIONS__ -D__CUDA_NO_HALF2_OPERATORS__ --expt-relaxed-constexpr -O3 --allow-unsupported-compiler -DTORCH_API_INCLUDE_EXTENSION_H -DTORCH_EXTENSION_NAME=lietorch_backends -D_GLIBCXX_USE_CXX11_ABI=0 -gencode=arch=compute_86,code=compute_86 -gencode=arch=compute_86,code=sm_86 +ldflags = + +rule compile + command = cl /showIncludes $cflags -c $in /Fo$out $post_cflags + deps = msvc + +rule cuda_compile + depfile = $out.d + deps = gcc + command = $nvcc --generate-dependencies-with-compile --dependency-output $out.d $cuda_cflags -c $in -o $out $cuda_post_cflags + + + +build C$:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\build\temp.win-amd64-3.9\Release\dpvo/lietorch/src/lietorch.obj: compile C$:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\dpvo\lietorch\src\lietorch.cpp +build C$:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\build\temp.win-amd64-3.9\Release\dpvo/lietorch/src/lietorch_cpu.obj: compile C$:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\dpvo\lietorch\src\lietorch_cpu.cpp +build C$:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\build\temp.win-amd64-3.9\Release\dpvo/lietorch/src/lietorch_gpu.obj: cuda_compile C$:\Users\thpap\PycharmProjects\motionbert-meta-sapiens\WHAM\third-party\DPVO\dpvo\lietorch\src\lietorch_gpu.cu + + + + + diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation.obj b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation.obj new file mode 100644 index 0000000000000000000000000000000000000000..1faa583159bcb156a9c5d217232537f7abbf8bdf --- /dev/null +++ b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation.obj @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7f1ef51859e20505bb755ea5e5ce48fba33b480d99b464405edb944ee5c40191 +size 37419806 diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation_kernel.obj b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation_kernel.obj new file mode 100644 index 0000000000000000000000000000000000000000..3397cc7fe9316437d2be8564861b2e73caa52766 --- /dev/null +++ b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/correlation_kernel.obj @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c41ced021c0c59fdfba4692ce3769cbc85e0a7b72ad5a1c332c86ff5ca5015b3 +size 1461202 diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/cuda_corr.cp39-win_amd64.exp b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/cuda_corr.cp39-win_amd64.exp new file mode 100644 index 0000000000000000000000000000000000000000..b3821d5c792675d953a5020bafa970eaef14f253 Binary files /dev/null and b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/cuda_corr.cp39-win_amd64.exp differ diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/cuda_corr.cp39-win_amd64.lib b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/cuda_corr.cp39-win_amd64.lib new file mode 100644 index 0000000000000000000000000000000000000000..c48800d7c2d5dcc10f26162be26cc130b4823cf7 Binary files /dev/null and b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/altcorr/cuda_corr.cp39-win_amd64.lib differ diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba.obj b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba.obj new file mode 100644 index 0000000000000000000000000000000000000000..78b4dec715decaafade8d1ab0296a3dfff749614 --- /dev/null +++ b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba.obj @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0c345d67f220f9216fd2ce229b4d4a25c91ba82568459eb137ed18ae96d77aad +size 37657608 diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba_cuda.obj b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba_cuda.obj new file mode 100644 index 0000000000000000000000000000000000000000..eac916017b2aea14ecf1e99e9607b00a0bdf48ce --- /dev/null +++ b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/ba_cuda.obj @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:766b7b9f519617f73e61fefca21afde2a56852bfc92bec64cb88fcf60307657e +size 1195236 diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/cuda_ba.cp39-win_amd64.exp b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/cuda_ba.cp39-win_amd64.exp new file mode 100644 index 0000000000000000000000000000000000000000..1ea0ddcf6210e67cff0c65808b7ce8d2502e8fa2 Binary files /dev/null and b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/cuda_ba.cp39-win_amd64.exp differ diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/cuda_ba.cp39-win_amd64.lib b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/cuda_ba.cp39-win_amd64.lib new file mode 100644 index 0000000000000000000000000000000000000000..8ea61453e226597a00038d266d1d9cc79440da91 Binary files /dev/null and b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/fastba/cuda_ba.cp39-win_amd64.lib differ diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch.obj b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch.obj new file mode 100644 index 0000000000000000000000000000000000000000..89df53cf9e654b1d6e29d5bd0b4602a5d1751ea0 --- /dev/null +++ b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch.obj @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc39b10aa93073560e20f0b855a64775989f4368fc8dc067d086a10546a18c63 +size 44606503 diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_backends.cp39-win_amd64.exp b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_backends.cp39-win_amd64.exp new file mode 100644 index 0000000000000000000000000000000000000000..e9a88280aa498966c23d51e096341dfd6c340ce2 Binary files /dev/null and b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_backends.cp39-win_amd64.exp differ diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_backends.cp39-win_amd64.lib b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_backends.cp39-win_amd64.lib new file mode 100644 index 0000000000000000000000000000000000000000..49ceec58cb18d18292d7b35aa1607e6c95695734 Binary files /dev/null and b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_backends.cp39-win_amd64.lib differ diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_cpu.obj b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_cpu.obj new file mode 100644 index 0000000000000000000000000000000000000000..fdd9bf20773a826d18e0172201b1c3b137251ba4 --- /dev/null +++ b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_cpu.obj @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1957cc4bf3d2edd2496ff09abebd3da682200fc79241e85be4ba902323d90d47 +size 124596555 diff --git a/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_gpu.obj b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_gpu.obj new file mode 100644 index 0000000000000000000000000000000000000000..96fa9504a5ef3e62302cff1591569ca7474c797a --- /dev/null +++ b/third-party/DPVO/build/temp.win-amd64-3.9/Release/dpvo/lietorch/src/lietorch_gpu.obj @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9ca5f4a62d14769a29e712dc88a70da544e30e8ccd6332839d8bdb1b01137fc +size 4108787